diff --git a/.openvswitch.metadata b/.openvswitch.metadata index aae65b4..a152ba6 100644 --- a/.openvswitch.metadata +++ b/.openvswitch.metadata @@ -1,6 +1,6 @@ +d34f96421a86004aa5d26ecf975edefd09f948b1 SOURCES/Pygments-1.4.tar.gz +3a11f130c63b057532ca37fe49c8967d0cbae1d5 SOURCES/Sphinx-1.2.3.tar.gz 002450621b33c5690060345b0aac25bc2426d675 SOURCES/docutils-0.12.tar.gz 722b63cd114c21041abda7b38d7f14e46338e3e0 SOURCES/openvswitch-2.17.0.tar.gz 8509a716f9f936526f64fb23f313c5a9baf2f123 SOURCES/pyelftools-0.27.tar.gz -d34f96421a86004aa5d26ecf975edefd09f948b1 SOURCES/Pygments-1.4.tar.gz -3a11f130c63b057532ca37fe49c8967d0cbae1d5 SOURCES/Sphinx-1.2.3.tar.gz 17331a86759beba4b6635ed530ce23b0b73c0744 SOURCES/dpdk-21.11.tar.xz diff --git a/SOURCES/openvswitch-2.17.0.patch b/SOURCES/openvswitch-2.17.0.patch index 807add2..96ffd87 100644 --- a/SOURCES/openvswitch-2.17.0.patch +++ b/SOURCES/openvswitch-2.17.0.patch @@ -1,5 +1,5 @@ diff --git a/.ci/linux-build.sh b/.ci/linux-build.sh -index 6cd38ff3ef..f5021e1a8f 100755 +index 6cd38ff3ef..2e1bf25881 100755 --- a/.ci/linux-build.sh +++ b/.ci/linux-build.sh @@ -220,11 +220,15 @@ fi @@ -7,7 +7,7 @@ index 6cd38ff3ef..f5021e1a8f 100755 if [ "$DPDK" ] || [ "$DPDK_SHARED" ]; then if [ -z "$DPDK_VER" ]; then - DPDK_VER="21.11" -+ DPDK_VER="21.11.2" ++ DPDK_VER="21.11.6" fi install_dpdk $DPDK_VER fi @@ -66,7 +66,7 @@ index f8facebeb0..36c4d9e4e4 100755 if [ "$CC" = "clang" ]; then make CFLAGS="$CFLAGS -Wno-error=unused-command-line-argument" diff --git a/.cirrus.yml b/.cirrus.yml -index a7ae793bc4..c460103bb1 100644 +index a7ae793bc4..e910576a7a 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -2,8 +2,8 @@ freebsd_build_task: @@ -75,13 +75,13 @@ index a7ae793bc4..c460103bb1 100644 matrix: - image_family: freebsd-12-2-snap - image_family: freebsd-11-4-snap -+ image_family: freebsd-12-4-snap + image_family: freebsd-13-2-snap ++ image_family: freebsd-14-0-snap cpu: 4 memory: 4G diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml -index eac3504e48..c923df3ea7 100644 +index eac3504e48..29c20153a3 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -6,7 +6,7 @@ jobs: @@ -102,11 +102,11 @@ index eac3504e48..c923df3ea7 100644 name: linux ${{ join(matrix.*, ' ') }} - runs-on: ubuntu-18.04 -+ runs-on: ubuntu-20.04 ++ runs-on: ubuntu-22.04 timeout-minutes: 30 strategy: -@@ -37,6 +38,11 @@ jobs: +@@ -37,12 +38,17 @@ jobs: - compiler: clang opts: --disable-ssl @@ -117,13 +117,42 @@ index eac3504e48..c923df3ea7 100644 + - compiler: gcc testsuite: test - kernel: 3.16 +- kernel: 3.16 ++ kernel: 4.14 + - compiler: clang + testsuite: test +- kernel: 3.16 ++ kernel: 4.14 + asan: asan + + - compiler: gcc +@@ -72,16 +78,16 @@ jobs: + kernel_list: 5.8 5.5 5.4 4.19 + + - compiler: gcc +- kernel_list: 4.14 4.9 4.4 3.16 ++ kernel_list: 4.14 4.9 4.4 + - compiler: clang +- kernel_list: 4.14 4.9 4.4 3.16 ++ kernel_list: 4.14 4.9 4.4 + + - compiler: gcc + afxdp: afxdp +- kernel: 5.3 ++ kernel: 5.4 + - compiler: clang + afxdp: afxdp +- kernel: 5.3 ++ kernel: 5.4 + + - compiler: gcc + dpdk: dpdk @@ -111,7 +117,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v2 -+ uses: actions/checkout@v3 ++ uses: actions/checkout@v4 - name: update PATH run: | @@ -132,7 +161,7 @@ index eac3504e48..c923df3ea7 100644 - name: set up python - uses: actions/setup-python@v2 -+ uses: actions/setup-python@v4 ++ uses: actions/setup-python@v5 with: python-version: '3.9' @@ -141,7 +170,7 @@ index eac3504e48..c923df3ea7 100644 - name: cache if: matrix.dpdk != '' || matrix.dpdk_shared != '' - uses: actions/cache@v2 -+ uses: actions/cache@v3 ++ uses: actions/cache@v4 env: matrix_key: ${{ matrix.dpdk }}${{ matrix.dpdk_shared }} ci_key: ${{ hashFiles('dpdk-ci-signature') }} @@ -150,16 +179,28 @@ index eac3504e48..c923df3ea7 100644 - name: upload deb packages if: matrix.deb_package != '' - uses: actions/upload-artifact@v2 -+ uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 with: name: deb-packages path: '/home/runner/work/ovs/*.deb' +@@ -171,9 +177,9 @@ jobs: + - name: copy logs on failure + if: failure() || cancelled() + run: | +- # upload-artifact@v2 throws exceptions if it tries to upload socket ++ # upload-artifact throws exceptions if it tries to upload socket + # files and we could have some socket files in testsuite.dir. +- # Also, upload-artifact@v2 doesn't work well enough with wildcards. ++ # Also, upload-artifact doesn't work well enough with wildcards. + # So, we're just archiving everything here to avoid any issues. + mkdir logs + cp config.log ./logs/ @@ -182,7 +188,7 @@ jobs: - name: upload logs on failure if: failure() || cancelled() - uses: actions/upload-artifact@v2 -+ uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 with: name: logs-linux-${{ join(matrix.*, '-') }} path: logs.tgz @@ -168,14 +209,14 @@ index eac3504e48..c923df3ea7 100644 steps: - name: checkout - uses: actions/checkout@v2 -+ uses: actions/checkout@v3 ++ uses: actions/checkout@v4 - name: update PATH run: | echo "$HOME/bin" >> $GITHUB_PATH echo "$HOME/.local/bin" >> $GITHUB_PATH - name: set up python - uses: actions/setup-python@v2 -+ uses: actions/setup-python@v4 ++ uses: actions/setup-python@v5 with: python-version: '3.9' - name: install dependencies @@ -184,12 +225,24 @@ index eac3504e48..c923df3ea7 100644 - name: upload logs on failure if: failure() - uses: actions/upload-artifact@v2 -+ uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 with: name: logs-osx-clang---disable-ssl path: config.log +diff --git a/AUTHORS.rst b/AUTHORS.rst +index 6d11c9c4f3..1ce4e9dd35 100644 +--- a/AUTHORS.rst ++++ b/AUTHORS.rst +@@ -534,6 +534,7 @@ David Evans davidjoshuaevans@gmail.com + David Palma palma@onesource.pt + David van Moolenbroek dvmoolenbroek@aimvalley.nl + Derek Cormier derek.cormier@lab.ntt.co.jp ++Derrick Lim derrick.lim@rakuten.com + Dhaval Badiani dbadiani@vmware.com + DK Moon + Ding Zhi zhi.ding@6wind.com diff --git a/Documentation/faq/releases.rst b/Documentation/faq/releases.rst -index af524251ff..49895c595f 100644 +index af524251ff..a9045a2d56 100644 --- a/Documentation/faq/releases.rst +++ b/Documentation/faq/releases.rst @@ -32,7 +32,7 @@ Q: What does it mean for an Open vSwitch release to be LTS (long-term support)? @@ -214,7 +267,7 @@ index af524251ff..49895c595f 100644 + 2.14.x 19.11.13 + 2.15.x 20.11.6 + 2.16.x 20.11.6 -+ 2.17.x 21.11.2 ++ 2.17.x 21.11.6 ============ ======== Q: Are all the DPDK releases that OVS versions work with maintained? @@ -232,7 +285,7 @@ index 172d684df9..0203bbe955 100644 +.. |grant-revocation| replace:: :doc:`committer-grant-revocation` +.. |emeritus-status| replace:: :doc:`committer-emeritus-status` diff --git a/Documentation/intro/install/dpdk.rst b/Documentation/intro/install/dpdk.rst -index d9f44055db..a284e68514 100644 +index d9f44055db..fff49007e9 100644 --- a/Documentation/intro/install/dpdk.rst +++ b/Documentation/intro/install/dpdk.rst @@ -42,7 +42,7 @@ Build requirements @@ -240,7 +293,7 @@ index d9f44055db..a284e68514 100644 vSwitch with DPDK will require the following: -- DPDK 21.11 -+- DPDK 21.11.2 ++- DPDK 21.11.6 - A `DPDK supported NIC`_ @@ -251,9 +304,9 @@ index d9f44055db..a284e68514 100644 - $ wget https://fast.dpdk.org/rel/dpdk-21.11.tar.xz - $ tar xf dpdk-21.11.tar.xz - $ export DPDK_DIR=/usr/src/dpdk-21.11 -+ $ wget https://fast.dpdk.org/rel/dpdk-21.11.2.tar.xz -+ $ tar xf dpdk-21.11.2.tar.xz -+ $ export DPDK_DIR=/usr/src/dpdk-stable-21.11.2 ++ $ wget https://fast.dpdk.org/rel/dpdk-21.11.6.tar.xz ++ $ tar xf dpdk-21.11.6.tar.xz ++ $ export DPDK_DIR=/usr/src/dpdk-stable-21.11.6 $ cd $DPDK_DIR #. Configure and install DPDK using Meson @@ -418,12 +471,20 @@ index cb8076433e..22227c4957 100644 @if cmp -s $(@F).tmp $@; then \ touch $@; \ diff --git a/NEWS b/NEWS -index c10e9bfacc..7d4a8c0815 100644 +index c10e9bfacc..ca111ae3a3 100644 --- a/NEWS +++ b/NEWS -@@ -1,3 +1,83 @@ -+v2.17.9 - xx xxx xxxx +@@ -1,3 +1,91 @@ ++v2.17.10 - xx xxx xxxx ++---------------------- ++ ++v2.17.9 - 08 Feb 2024 +--------------------- ++ - Bug fixes ++ - Security: ++ * Fixed vulnerabilities CVE-2023-3966 and CVE-2023-5366. ++ - DPDK: ++ * OVS validated with DPDK 21.11.6. + +v2.17.8 - 17 Oct 2023 +--------------------- @@ -1950,7 +2011,7 @@ index ee5553f456..3e937910be 100755 sys.exit(1) for line in s.splitlines(): diff --git a/configure.ac b/configure.ac -index 4e9bcce272..3c9562a441 100644 +index 4e9bcce272..44af9eb6da 100644 --- a/configure.ac +++ b/configure.ac @@ -13,7 +13,7 @@ @@ -1958,7 +2019,7 @@ index 4e9bcce272..3c9562a441 100644 AC_PREREQ(2.63) -AC_INIT(openvswitch, 2.17.0, bugs@openvswitch.org) -+AC_INIT(openvswitch, 2.17.9, bugs@openvswitch.org) ++AC_INIT(openvswitch, 2.17.10, bugs@openvswitch.org) AC_CONFIG_SRCDIR([datapath/datapath.c]) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_AUX_DIR([build-aux]) @@ -2234,15 +2295,21 @@ index cc0840704a..2a206305ec 100644 completionList->dropNbl = NULL; completionList->dropNblNext = &completionList->dropNbl; diff --git a/debian/changelog b/debian/changelog -index 3e0d3a66e3..8edbcad89c 100644 +index 3e0d3a66e3..f451bf643c 100644 --- a/debian/changelog +++ b/debian/changelog -@@ -1,3 +1,57 @@ +@@ -1,3 +1,63 @@ ++openvswitch (2.17.10-1) unstable; urgency=low ++ [ Open vSwitch team ] ++ * New upstream version ++ ++ -- Open vSwitch team Thu, 08 Feb 2024 17:52:54 +0100 ++ +openvswitch (2.17.9-1) unstable; urgency=low + [ Open vSwitch team ] + * New upstream version + -+ -- Open vSwitch team Tue, 17 Oct 2023 13:01:45 +0200 ++ -- Open vSwitch team Thu, 08 Feb 2024 17:52:54 +0100 + +openvswitch (2.17.8-1) unstable; urgency=low + [ Open vSwitch team ] @@ -2328,8 +2395,1768 @@ index 7fd7bc55da..088734b0dc 100644 debian/tmp/usr/share/man/man8/ovs-ctl.8 utilities/ovs-dpctl-top.8 utilities/ovs-dpctl.8 +diff --git a/dpdk/.ci/linux-build.sh b/dpdk/.ci/linux-build.sh +index c10c1a8ab5..b29cd91f56 100755 +--- a/dpdk/.ci/linux-build.sh ++++ b/dpdk/.ci/linux-build.sh +@@ -54,18 +54,15 @@ catch_coredump() { + } + + if [ "$AARCH64" = "true" ]; then +- # Note: common/cnxk is disabled for Ubuntu 18.04 +- # https://bugs.dpdk.org/show_bug.cgi?id=697 +- OPTS="$OPTS -Ddisable_drivers=common/cnxk" + if [ "${CC%%clang}" != "$CC" ]; then +- OPTS="$OPTS --cross-file config/arm/arm64_armv8_linux_clang_ubuntu1804" ++ OPTS="$OPTS --cross-file config/arm/arm64_armv8_linux_clang_ubuntu" + else + OPTS="$OPTS --cross-file config/arm/arm64_armv8_linux_gcc" + fi + fi + + if [ "$PPC64LE" = "true" ]; then +- OPTS="$OPTS --cross-file config/ppc/ppc64le-power8-linux-gcc-ubuntu1804" ++ OPTS="$OPTS --cross-file config/ppc/ppc64le-power8-linux-gcc-ubuntu" + fi + + if [ "$BUILD_DOCS" = "true" ]; then +diff --git a/dpdk/.github/workflows/build.yml b/dpdk/.github/workflows/build.yml +index 2e9c4be6d0..f1079d840e 100644 +--- a/dpdk/.github/workflows/build.yml ++++ b/dpdk/.github/workflows/build.yml +@@ -23,68 +23,63 @@ jobs: + LIBABIGAIL_VERSION: libabigail-1.8 + MINI: ${{ matrix.config.mini != '' }} + PPC64LE: ${{ matrix.config.cross == 'ppc64le' }} +- REF_GIT_TAG: none ++ REF_GIT_TAG: v21.11 + RUN_TESTS: ${{ contains(matrix.config.checks, 'tests') }} + + strategy: + fail-fast: false + matrix: + config: +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: static +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: shared + mini: mini +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: shared +- checks: doc+tests +- - os: ubuntu-18.04 ++ checks: abi+doc+tests ++ - os: ubuntu-20.04 + compiler: clang + library: static +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: clang + library: shared + checks: doc+tests +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: static + cross: i386 +- # Note: common/cnxk is disabled for Ubuntu 18.04 +- # https://bugs.dpdk.org/show_bug.cgi?id=697 +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: static + cross: aarch64 +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: shared + cross: aarch64 +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: static + cross: ppc64le +- - os: ubuntu-18.04 ++ - os: ubuntu-20.04 + compiler: gcc + library: shared + cross: ppc64le + + steps: + - name: Checkout sources +- uses: actions/checkout@v2 ++ uses: actions/checkout@v3 + - name: Generate cache keys + id: get_ref_keys + run: | +- echo -n '::set-output name=ccache::' +- echo 'ccache-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-'$(date -u +%Y-w%W) +- echo -n '::set-output name=libabigail::' +- echo 'libabigail-${{ matrix.config.os }}' +- echo -n '::set-output name=abi::' +- echo 'abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' ++ echo 'ccache=ccache-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-'$(date -u +%Y-w%W) >> $GITHUB_OUTPUT ++ echo 'libabigail=libabigail-${{ env.LIBABIGAIL_VERSION }}-${{ matrix.config.os }}' >> $GITHUB_OUTPUT ++ echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT + - name: Retrieve ccache cache +- uses: actions/cache@v2 ++ uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ steps.get_ref_keys.outputs.ccache }}-${{ github.ref }} +@@ -92,13 +87,13 @@ jobs: + ${{ steps.get_ref_keys.outputs.ccache }}-refs/heads/main + - name: Retrieve libabigail cache + id: libabigail-cache +- uses: actions/cache@v2 ++ uses: actions/cache@v3 + if: env.ABI_CHECKS == 'true' + with: + path: libabigail + key: ${{ steps.get_ref_keys.outputs.libabigail }} + - name: Retrieve ABI reference cache +- uses: actions/cache@v2 ++ uses: actions/cache@v3 + if: env.ABI_CHECKS == 'true' + with: + path: reference +@@ -141,7 +136,7 @@ jobs: + run: .ci/linux-build.sh + - name: Upload logs on failure + if: failure() +- uses: actions/upload-artifact@v2 ++ uses: actions/upload-artifact@v3 + with: + name: meson-logs-${{ join(matrix.config.*, '-') }} + path: | +diff --git a/dpdk/.mailmap b/dpdk/.mailmap +new file mode 100644 +index 0000000000..fe94cbe1ff +--- /dev/null ++++ b/dpdk/.mailmap +@@ -0,0 +1,1614 @@ ++Aakash Sasidharan ++Aaro Koskinen ++Aaron Campbell ++Aaron Conole ++Abdullah Ömer Yamaç ++Abdullah Sevincer ++Abed Kamaluddin ++Abhijit Sinha ++Abhimanyu Saini ++Abhinandan Gujjar ++Abhishek Maheshwari ++Abhishek Marathe ++Abhishek Sachan ++Abraham Tovar ++Adam Bynes ++Adam Dybkowski ++Adam Ludkiewicz ++Adham Masarwah ++Adrian Moreno ++Adrian Podlawski ++Adrien Mazarguil ++Ady Agbarih ++Agalya Babu RadhaKrishnan ++Aidan Goddard ++Aideen McLoughlin ++Ajit Khaparde ++Akash Saxena ++Akeem G Abodunrin ++Akhil Goyal ++Alain Leon ++Alan Carew ++Alan Dewar ++Alan Liu ++Alan Winkowski ++Alejandro Lucero ++Aleksander Gajewski ++Aleksandr Loktionov ++Aleksandr Miloshenko ++Aleksey Baulin ++Aleksey Katargin ++Ales Musil ++Alexander Bechikov ++Alexander Belyakov ++Alexander Chernavin ++Alexander Guy ++Alexander Kozyrev ++Alexander Matushevsky ++Alexander Solganik ++Alexander V Gutkin ++Alexandre Ferrieux ++Alexey Kardashevskiy ++Alex Kiselev ++Alex Marginean ++Alex Markuze ++Alex Porosanu ++Alex Rosenbaum ++Alex Vesker ++Alex Wang ++Alex Zelezniak ++Alfredo Cardigliano ++Ali Alnubani ++Alice Michael ++Alin Rauta ++Ali Volkan Atli ++Allain Legacy ++Allen Hubbe ++Alok Makhariya ++Alvaro Karsz ++Alvin Zhang ++Aman Singh ++Amaranath Somalapuram ++Amine Kherbouche ++Amin Tootoonchian ++Ami Sabo ++Amit Bernstein ++Amit Gupta ++Amit Prakash Shukla ++Amr Mokhtar ++Amruta Zende ++Amrutha Sampath ++Ananda Sathyanarayana ++Anand B Jyoti ++Anand Rawat ++Anand Sunkad ++Anatolii Gerasymenko ++Anatoly Burakov ++Anbarasan Murugesan ++Anders Roxell ++Andrea Arcangeli ++Andrea Grandi ++Andre Richter ++Andrew Boyer ++Andrew Harvey ++Andrew Jackson ++Andrew Lee ++Andrew Pinski ++Andrew Rybchenko ++Andrey Chilikin ++Andrey Nikolaev ++Andrey Vesnovaty ++Andrii Pypchenko ++Andrius Sirvys ++Andriy Berestovskyy ++Andrzej Ostruszka ++Andy Gospodarek ++Andy Green ++Andy Moreton ++Andy Pei ++Anirudh Venkataramanan ++Ankur Dwivedi ++Anna Lukin ++Anoob Joseph ++Antara Ganesh Kolar ++Anthony Fee ++Antonio Fischetti ++Anupam Kapoor ++Apeksha Gupta ++Archana Muniganti ++Archit Pandey ++Arkadiusz Kubalewski ++Arkadiusz Kusztal ++Arnon Warshavsky ++Arshdeep Kaur ++Artem V. Andreev ++Artur Rojek ++Artur Trybula ++Artur Tyminski ++Asaf Penso ++Asaf Ravid ++Ashijeet Acharya ++Ashish Gupta ++Ashish Jain ++Ashish Paul ++Ashish Sadanandan ++Ashish Shah ++Ashwin Sekhar T K ++Asim Jamshed ++Aviad Yehezkel ++Avi Kivity ++Aws Ismail ++Ayuj Verma ++Balakrishna Bhamidipati ++Balasubramanian Manoharan ++Balazs Nemeth ++Bao-Long Tran ++Barak Enat ++Barry Cao ++Bartosz Staszewski ++Baruch Siach ++Bassam Zaid AlKilani ++Beilei Xing ++Bei Sun ++Benjamin Le Berre ++Ben Magistro ++Benoît Canet ++Benoît Ganne ++Ben Pfaff ++Ben Shelton ++Ben Walker ++Bernard Iremonger ++Bert van Leeuwen ++Bhagyada Modali ++Bharat Mota ++Bill Hong ++Billy McFall ++Billy O'Mahony ++Bing Zhao ++Bin Huang ++Bin Zheng ++Björn Töpel ++Bo Chen ++Boleslav Stankevich ++Boon Ang ++Boris Pismenny ++Brandon Lo ++Brendan Ryan ++Brett Creeley ++Brian Archbold ++Brian Brooks ++Brian Dooley ++Brian Johnson ++Brian Poole ++Brian Russell ++Brick Yang ++Bruce Allan ++Bruce Liu <2103458176@qq.com> ++Bruce Merry ++Bruce Richardson ++Bryan Benson ++Byron Marohn ++Carolyn Wyborny ++Chaeyong Chong ++Chaitanya Babu Talluri ++Chandubabu Namburu ++Changchun Ouyang ++Changpeng Liu ++Changqing Wu ++Chaoyong He ++Chao Zhu ++Charles Brett ++Charles Myers ++Chas Williams <3chas3@gmail.com> ++Chenbo Xia ++Chengchang Tang ++Chengfeng Ye ++Chenghu Yao ++Cheng Jiang ++Chenglian Sun ++Cheng Liu ++Cheng Peng ++Chengwen Feng ++Chenmin Sun ++Chenxu Di ++Cheryl Houser ++Chinh T Cao ++Chintu Hetam ++Choonho Son ++Chris Metcalf ++Christian Ehrhardt ++Christian Maciocco ++Christophe Fontaine ++Christophe Grosse ++Christopher Pau ++Christopher Reder ++Christoph Gysin ++Christos Ricudis ++Chris Wright ++Chuanshe Zhang ++Chuhong Yao ++Chunsong Feng ++Churchill Khangar ++Cian Ferriter ++Ciara Loftus ++Ciara Power ++Claire Murphy ++Cody Doucette ++Congwen Zhang ++Conor Fogarty ++Conor Walsh ++Cristian Bidea ++Cristian Dumitrescu ++Cristian Sovaiala ++Cunming Liang ++Cyril Chemparathy ++Cyril Cressent ++Dahir Osman ++Damian Milosek ++Damian Nowak ++Damien Millescamps ++Damjan Marion ++Damodharam Ammepalli ++Dan Aloni ++Dana Vardi ++Dan Gora ++Daniele Di Proietto ++Daniel Kaminsky ++Daniel Kan ++Daniel Martin Buckley ++Daniel Mrzyglod ++Daniel Shelepov ++Daniel Verkamp ++Dan Nowlin ++Danny Patel ++Danny Zhou ++Dan Wei ++Dapeng Yu ++Darek Stojaczyk ++Daria Kolistratova ++Dariusz Chaberski ++Dariusz Jagus ++Dariusz Sosnowski ++Dariusz Stojaczyk ++Darren Edamura ++Dave Burley ++Dave Ertman ++David Binderman ++David Bouyeure ++David Christensen ++David Coyle ++Davide Caratti ++David George ++David Harton ++David Hunt ++David Liu ++David MacDougal ++David Marchand ++David Riddoch ++David Su ++David Verbeiren ++David Wilder ++David Zeng ++Dawid Gorecki ++Dawid Jurczak ++Dawid Lukwinski ++Dawid Zielinski ++Daxue Gao ++Declan Doherty ++Deepak Khandelwal ++Deepak Kumar Jain ++Deirdre O'Connor ++Dekel Peled ++Denis Pryazhennikov ++Dennis Marinus ++Derek Chickles ++Des O Dea ++Devendra Singh Rawat ++Dex Chen ++Dexia Li ++Dexuan Cui ++Dharmik Thakkar ++Dheemanth Mallikarjun ++Diana Wang ++Didier Pallard ++Dilshod Urazov ++Ding Zhi ++Diogo Behrens ++Dirk-Holger Lenz ++Dmitri Epshtein ++Dmitriy Yakovlev ++Dmitry Eremin-Solenikov ++Dmitry Kozlyuk ++Dmitry Vyukov ++Dodji Seketeli ++Donald Lee ++Dongdong Liu ++Dongsheng Rong ++Dongsu Han ++Dong Wang ++Dongyang Pan <197020236@qq.com> ++Dong Zhou ++Don Provan ++Don Wallwork ++Doug Dziggel ++Douglas Flint ++Dr. David Alan Gilbert ++Drocula Lambda ++Dror Birkman ++Dukai Yuan ++Dumitru Ceara ++Duncan Bellamy ++Dustin Lundquist ++Dzmitry Sautsa ++Ed Czeck ++Eduard Serra ++Edward Makarov ++Edwin Brossette ++Eelco Chaudron ++Elad Nachman ++Elad Persiko ++Elena Agostini ++Eli Britstein ++Elza Mathew ++Emma Finn ++Emma Kenny ++Emmanuel Roullit ++Eoin Breen ++Erez Ferber ++Erez Shitrit ++Eric Kinzie ++Eric Zhang ++Erik Gabriel Carrillo ++Erik Ziegenbalg ++Erlu Chen ++Eugenio Pérez ++Eugeny Parshutin ++Evan Swanson ++Evgeny Efimov ++Evgeny Im ++Evgeny Schemeilin ++Fabio Pricoco ++Fady Bader ++Faicker Mo ++Fangfang Wei ++Fang TongHao ++Fan Zhang ++Farah Smith ++Fei Chen ++Feifei Wang ++Fei Qin ++Fengjiang Liu ++Fengnan Chang ++Fengtian Guo ++Ferdinand Thiessen ++Ferruh Yigit ++Fidaullah Noonari ++Fiona Trahe ++Flavia Musatescu ++Flavio Leitner ++Forrest Shi ++Francesco Mancino ++Francesco Santoro ++Francis Kelly ++Franck Lenormand ++François-Frédéric Ozog ++Frank Du ++Frank Zhao ++Frederico Cadete ++Fredrik A Lindgren ++Gabriel Ganne ++Gaetan Rivet ++Gagandeep Singh ++Gage Eads ++Gal Cohen ++Gal Shalom ++Ganapati Kundapura ++Ganghui Zeng ++Gang Jiang ++Gang Yang ++Gao Feng ++Gaoxiang Liu ++Gargi Sau ++Gary Mussar ++Gaurav Singh ++Gautam Dawar ++Gavin Hu ++Geoffrey Le Gourriérec ++Geoffrey Lv ++Geoff Thorpe ++George Prekas ++George Wilkie ++Georgios Katsikas ++Georgiy Levashov ++Georg Sauthoff ++Gerald Rogers ++Gerry Gribbon ++Ghalem Boudour ++Gilad Berman ++Girish Nandibasappa ++Gong Deli ++Gordon Noonan ++Gosia Bakota ++Govindarajulu Varadarajan ++Gowrishankar Muthukrishnan ++Gregory Etelson ++Greg Tucker ++Grishma Kotecha ++Grzegorz Galkowski ++Grzegorz Nitka ++Grzegorz Siwik ++Guduri Prathyusha ++Guido Barzini ++Guillaume Gaudonville ++Guinan Sun ++Guolin Yang ++Guo Xin ++Guoyang Zhou ++Guruprasad Rao ++Guy Kaneti ++Guy Tzalik ++Haggai Eran ++Haifei Luo ++Haifeng Gao ++Haifeng Li ++Haifeng Lin ++Haifeng Tang ++Haijun Chu ++Hailin Xu ++Haiyang Zhang ++Haiying Wang ++Haiyue Wang ++Hamdan Igbaria ++Hamza Khan ++Hannes Frederic Sowa ++Hanoch Haim ++Hanumanth Pothula ++Hao Chen ++Hao Wu ++Hari Kumar Vemula ++Harini Ramakrishnan ++Hariprasad Govindharajan ++Harish Patil ++Harman Kalra ++Harneet Singh ++Harold Huang ++Harrison McCullough ++Harry van Haaren ++Harshad Narayane ++Harshitha Ramamurthy ++Hasan Alayli ++Hayato Momma ++Heinrich Kuhn ++Heinrich Schuchardt ++Helin Zhang ++Hemant Agrawal ++Heng Ding ++Hengjian Zhang ++Heng Jiang ++Heng Wang ++Henning Schild ++Henry Cai ++Henry Nadeau ++Henry Tieman ++Heqing Zhu ++Herakliusz Lipiec ++Herbert Guan ++Hernan Vargas ++Herry Chen ++Hideyuki Yamashita ++Hiral Shah ++Hiroki Shirokura ++Hiroshi Shimamoto ++Hiroyuki Mikita ++Hongbo Zheng ++Hongjun Ni ++Hongzhi Guo ++Honnappa Nagarahalli ++Horia Geanta Neag ++H. Peter Anvin ++Hrvoje Habjanic ++Huaibin Wang ++Huanle Han ++Huawei Xie ++Huichao Cai ++Huilong Xu ++Huisong Li ++Hui Zhao ++Huzaifa Rahman ++Hyong Youb Kim ++Hyun Yoo ++Ian Betts ++Ian Dolzhansky ++Ian Stokes ++Ibtisam Tariq ++Idan Hackmon ++Ido Barnea ++Ido Goshen ++Ido Segev ++Igor Chauskin ++Igor Romanov ++Igor Russkikh ++Igor Ryzhov ++Ildar Mustafin ++Ilia Kurakin ++Ilja Van Sprundel ++Ilya Maximets ++Ilya V. Matveychikov ++Ilyes Ben Hamouda ++Intiyaz Basha ++Itsuro Oda ++Ivan Boule ++Ivan Dyukov ++Ivan Ilchenko ++Ivan Malov ++Ivan Nardi ++Jacek Bułatek ++Jacek Naczyk ++Jacek Piasecki ++Jacek Siuda ++Jacob Keller ++Jakub Chylkowski ++Jakub Grajciar ++Jakub Neruda ++Jakub Palider ++Jakub Poczatek ++Jakub Wysocki ++James Davidson ++James Fox ++James Grant ++James Hershaw ++James Poole ++Jamie Lavigne ++Jananee Parthasarathy ++Jan Beulich ++Jan Blunck ++Jan Medala ++Jan Remes ++Jan Viktorin ++Jan Wickbom ++Jaroslaw Gawin ++Jaroslaw Ilgiewicz ++Jason He ++Jason Wang ++Jasvinder Singh ++Jay Ding ++Jay Jayatheerthan ++Jay Rolette ++Jay Zhou ++Jean Dao ++Jean-Mickael Guerin ++Jean Tourrilhes ++Jeb Cramer ++Jecky Pei ++Jeff Daly ++Jeff Guo ++Jeff Kirsher ++Jeffrey Huang ++Jeff Shaw ++Jens Freimann ++Jeremy Plsek ++Jeremy Spewock ++Jerin Jacob ++Jerome Jutteau ++Jerry Hao OS ++Jerry Lilijun ++Jerry Zhang ++Jesna K E ++Jesper Wramberg ++Jesse Brandeburg ++Jesse Bruni ++Jia He ++Jianbo Liu ++Jianfeng Tan ++Jiangu Zhao ++Jianwei Ma ++Jianwei Mei ++Jiaqi Min ++Jiawei Wang ++Jiawei Zhu ++Jiawen Wu ++Jiayu Hu ++Jia Yu ++Jie Hai ++Jie Liu ++Jie Pan ++Jie Wang ++Jie Zhou ++Jieqiang Wang ++Jijiang Liu ++Jilei Chen ++Jim Harris ++Jincheng Miao ++Jing Chen ++Jingguo Fu ++Jingjing Wu ++Jingzhao Ni ++Jin Heo ++Jin Ling ++Jin Liu ++Jin Yu ++Jiri Slaby ++Job Abraham ++Jochen Behrens ++Joey Xing ++Johan Faltstrom ++Johan Källström ++Johan Samuelsson ++John Alexander ++John Baldwin ++John Carney ++John Daley ++John Griffin ++John Guzik ++John Hurley ++John Jacques ++John J Browne ++John Levon ++John McNamara ++John Miller ++John OLoughlin ++John Ousterhout ++John W. Linville ++Jonas Pfefferle ++Jonathan Erb ++Jonathan Tsai ++Jon DeVree ++Jon Loeliger ++Joongi Kim ++Jørgen Østergaard Sloth ++Jörg Thalheim ++Joseph Richard ++Josh Soref ++Joshua Hay ++Joshua Washington ++Joyce Kong ++Jozef Martiniak ++JP Lee ++Juan Antonio Montesinos ++Juhamatti Kuusisaari ++Juho Snellman ++Julien Castets ++Julien Courtat ++Julien Cretin ++Julien Massonneau ++Julien Meunier ++Július Milan ++Junfeng Guo ++Junjie Chen ++Junjie Wan ++Jun Qiu ++Jun W Zhou ++Junxiao Shi ++Jun Yang ++Junyu Jiang ++Juraj Linkeš ++Kai Ji ++Kaijun Zeng ++Kaiwen Deng ++Kaiyu Zhang ++Kalesh AP ++Kamalakannan R ++Kamalakshitha Aligeri ++Kamil Bednarczyk ++Kamil Chalupnik ++Kamil Rytarowski ++Kamil Vojanec ++Kanaka Durga Kotamarthy ++Karen Sornek ++Karla Saur ++Karl Bonde Torp ++Karmarkar Suyash ++Karol Kolacinski ++Karra Satwik ++Kathleen Capella ++Kefu Chai ++Keiichi Watanabe ++Keith Wiles ++Kent Wires ++Keunhong Lee ++Kevin Laatz ++Kevin Lampis ++Kevin Liu ++Kevin O'Sullivan ++Kevin Scott ++Kevin Traynor ++Ke Xu ++Ke Zhang ++Khoa To ++Kiran KN ++Kiran Kumar K ++Kiran Kumar ++Kiran Patil ++Kirill Rybalchenko ++Kishore Padmanabha ++Klaus Degner ++Kommula Shiva Shankar ++Konstantin Ananyev ++Krishna Murthy ++Krzysztof Galazka ++Krzysztof Kanas ++Krzysztof Karas ++Krzysztof Witek ++Kuan Xu ++Kuba Kozak ++Kumar Amber ++Kumara Parameshwaran ++Kumar Sanghvi ++Kyle Larose ++Lance Richardson ++Laszlo Ersek ++Laura Stroe ++Laurent Hardy ++Lavanya Govindarajan ++Lazaros Koromilas ++Leah Tekoa ++Lee Daly ++Lee Roberts ++Lei Cai ++Lei Gong ++Lei Ji ++Lei Yao ++Leonid Myravjev ++Leo Xu ++Leszek Zygo ++Levend Sayar ++Lev Faerman ++Lewei Yang ++Leyi Rong ++Liang Ma ++Liang-Min Larry Wang ++Liang Xu ++Liang Zhang ++Li Feng ++Li Han ++Lihong Ma ++Lijian Zhang ++Lijuan Tu ++Lijun Ou ++Liming Sun ++Linfan Hu ++Lingli Chen ++Lingyu Liu ++Lin Li ++Linsi Yuan ++Lior Margalit ++Li Qiang ++Liron Himi ++Li Wei ++Li Zhang ++Longfeng Liang ++Long Li ++Long Wu ++Lotem Leder ++Louise Kilheeney ++Louis Luo ++Louis Peens ++Luca Boccassi ++Luc Pelletier ++Lukasz Bartosik ++Lukasz Czapnik ++Lukasz Gosiewski ++Lukasz Krakowiak ++Lukasz Kupczak ++Lukasz Majczak ++Lukasz Wojciechowski ++Luke Gorrie ++Lunyuan Cui ++Lu Qiuwen ++Lyn M ++Maayan Kashani ++Maciej Bielski ++Maciej Czekaj ++Maciej Fijalkowski ++Maciej Gajdzica ++Maciej Hefczyc ++Maciej Machnikowski ++Maciej Paczkowski ++Maciej Rabeda ++Maciej Szwed ++Madhuker Mythri ++Mahesh Adulla ++Mahipal Challa ++Mah Yock Gen ++Mairtin o Loingsigh ++Mallesham Jatharakonda ++Mallesh Koujalagi ++Malvika Gupta ++Mandal Purna Chandra ++Mandeep Rohilla ++Manish Chopra ++Manish Tomar ++Mao Jiang ++Mao YingMing ++Marcel Apfelbaum ++Marcel Cornu ++Marcelo Ricardo Leitner ++Marcin Baran ++Marcin Danilewicz ++Marcin Domagala ++Marcin Formela ++Marcin Hajkowski ++Marcin Kerlin ++Marcin Smoczynski ++Marcin Szycik ++Marcin Wilk ++Marcin Wojtas ++Marcin Zapolski ++Marco Varlese ++Marc Sune ++Maria Lingemark ++Mario Carrillo ++Mário Kuka ++Mariusz Drost ++Mark Asselstine ++Mark Bloch ++Mark Gillott ++Mark Kavanagh ++Marko Kovacevic ++Markos Chandras ++Mark Smith ++Mark Spender ++Markus Theil ++Marta Plantykow ++Martijn Bakker ++Martin Harvey ++Martin Havlik ++Martin Klozik ++Martin Spinler ++Martin Weiser ++Martyna Szapar ++Maryam Tahhan ++Masoud Hasanifard ++Matan Azrad ++Matej Vido ++Mateusz Kowalski ++Mateusz Pacuszka ++Mateusz Rusinski ++Matias Elo ++Mats Liljegren ++Matteo Croce ++Matthew Dirba ++Matthew Hall ++Matthew Smith ++Matthew Vick ++Matthias Gatto ++Matthieu Ternisien d'Ouville ++Mattias Rönnblom ++Matt Peters ++Mauricio Vasquez B ++Mauro Annarumma ++Maxime Coquelin ++Maxime Gouin ++Maxime Leroy ++Md Fahad Iqbal Polash ++Megha Ajmera ++Meijuan Zhao ++Meir Levi ++Meir Tseitlin ++Mesut Ali Ergin ++Miao Li ++Michael Barker ++Michael Baum ++Michael Frasca ++Michael Habibi ++Michael Haeuptle ++Michael Lilja ++Michael Luo ++Michael McConville ++Michael Pfeiffer ++Michael Qiu ++Michael Rossberg ++Michael Santana ++Michael Savisko ++Michael Shamis ++Michael S. Tsirkin ++Michael Wildt ++Michal Berger ++Michal Jastrzebski ++Michal Kobylinski ++Michal Krawczyk ++Michal Litwicki ++Michal Mazurek ++Michal Michalik ++Michał Mirosław ++Michal Schmidt ++Michal Swiatkowski ++Michal Wilczynski ++Michel Machado ++Miguel Bernal Marin ++Mihai Pogonaru ++Mike Baucom ++Mike Pattrick ++Mike Sowka ++Mike Stolarchuk ++Mike Ximing Chen ++Milena Olech ++Min Cao ++Minghuan Lian ++Mingjin Ye ++Mingshan Zhang ++Mingxia Liu ++Ming Zhao ++Min Hu (Connor) ++Min Wang (Jushui) ++Min Zhou ++Miroslaw Walukiewicz ++Mitch Williams ++Mit Matelske ++Mohamad Noor Alim Hussin ++Mohammad Abdul Awal ++Mohammad Iqbal Ahmad ++Mohammed Gamal ++Mohsin Kazmi ++Mohsin Mazhar Shaikh ++Mohsin Shaikh ++Morten Brørup ++Moti Haimovsky ++Muhammad Ahmad ++Muhammad Bilal ++Mukesh Dua ++Murphy Yang ++Murthy NSSR ++Muthurajan Jayakumar ++Nachiketa Prachanda ++Nagadheeraj Rottela ++Naga Harish K S V ++Naga Suresh Somarowthu ++Nalla Pradeep ++Na Na ++Nan Chen ++Nannan Lu ++Nan Zhou ++Narcisa Vasile ++Narender Vangati ++Naresh Kumar PBS ++Natalie Samsonov ++Natanael Copa ++Nathan Law ++Nathan Skrzypczak ++Neel Patel ++Neil Horman ++Nelio Laranjeiro ++Nelson Escobar ++Nemanja Marjanovic ++Netanel Belgazal ++Netanel Gonen ++Niall Power ++Nick Connolly ++Nick Nunley ++Niclas Storm ++Nicolas Chautru ++Nicolas Dichtel ++Nicolas Harnois ++Nicolás Pernas Maradei ++Nikhil Agarwal ++Nikhil Jagtap ++Nikhil Rao ++Nikhil Vasoya ++Nikita Kozlov ++Niklas Söderlund ++Nikolay Nikolaev ++Ning Li ++Nipun Gupta ++Nir Efrati ++Nirmoy Das ++Nithin Dabilpuram ++Nitin Saxena ++Nitzan Weller ++Noa Ezra ++Nobuhiro Miki ++Norbert Ciosek ++Odi Assli ++Ognjen Joldzic ++Ola Liljedahl ++Oleg Polyakov ++Olga Shern ++Olivier Gournet ++Olivier Matz ++Omar Awaysa ++Omkar Maslekar ++Omri Mor ++Ophir Munk ++Or Ami ++Ori Kam ++Owen Hilyard ++Pablo Cascón ++Pablo de Lara ++Padam Jeet Singh ++Padraig Connolly ++Pallantla Poornima ++Pallavi Kadam ++Pandi Kumar Maharajan ++Pankaj Gupta ++Panu Matilainen ++Paolo Valerio ++Parav Pandit ++Pascal Mazon ++Pashupati Kumar ++Patrice Buriez ++Patrick Fu ++Patrick Kutch ++Patrick Lu ++Patrick MacArthur ++Patrik Andersson ++Paul Atkins ++Paul E. McKenney ++Paul Fox ++Paul Greenwalt ++Paulis Gributs ++Paul Luse ++Paul M Stillwell Jr ++Pavan Nikhilesh ++Pavel Belous ++Pavel Boldin ++Pavel Fedin ++Pavel Ivashchenko ++Pavel Krauz ++Pavel Shirshov ++Pawel Malinowski ++Pawel Modrak ++Pawel Rutkowski ++Pawel Wodkowski ++Pei Chao ++Pei Zhang ++Peng He ++Peng Huang ++Peng Sun ++Peng Yu ++Peng Zhang ++Pengzhen Liu ++Peter Mccarthy ++Peter Spreadborough ++Petr Houska ++Phanendra Vukkisala ++Phil Yang ++Philip Prindeville ++Pierre Pfister ++Piotr Azarewicz ++Piotr Bartosiewicz ++Piotr Bronowski ++Piotr Gardocki ++Piotr Kubaj ++Piotr Kwapulinski ++Piotr Pietruszewski ++Piotr Skajewski ++Pradeep Satyanarayana ++Prashant Bhole ++Prashant Upadhyaya ++Prateek Agarwal ++Praveen Shetty ++Pravin Pathak ++Prince Takkar ++Priyanka Jain ++Przemyslaw Ciesielski ++Przemyslaw Czesnowicz ++Przemyslaw Patynowski ++Przemyslaw Zegan ++Pu Xu <583493798@qq.com> ++Qian Xu ++Qiao Liu ++Qi Fu ++Qimai Xiao ++Qiming Chen ++Qiming Yang ++Qinglai Xiao ++Qingmin Liu ++Qin Sun ++Qi Zhang ++Quentin Armitage ++Qun Wan ++Radha Mohan Chintakuntla ++Radoslaw Biernacki ++Radu Bulie ++Radu Nicolau ++Rafael Ávila de Espíndola ++Rafal Kozik ++Ragothaman Jayaraman ++Rahul Bhansali ++Rahul Gupta ++Rahul Lakkireddy ++Rahul Shah ++Raja Zidane ++Rajesh Ravi ++Rakesh Kudurumalla ++Ralf Hoffmann ++Rami Rosen ++Randy Schacher ++Rani Sharoni ++Ranjit Menon ++Rasesh Mody ++Rashmi Shetty ++Raslan Darawsheh ++Rastislav Cernay ++Raveendra Padasalagi ++Ravi Kerur ++Ravi Kumar ++Ray Jui ++Ray Kinsella ++Raz Amir ++Real Valiquette ++Rebecca Troy ++Remi Pommarel ++Remy Horton ++Renata Saiakhova ++Reshma Pattan ++Ricardo Roldan ++Ricardo Salveti ++Richael Zhuang ++Richard Donkin ++Richard Eklycke ++Richard Houldsworth ++Richard Walsh ++Rich Lane ++Ricky Li ++R Mohamed Shah ++Robert Konklewski ++Robert Malz ++Robert Sanford ++Robert Shearman ++Robert Stonehouse ++Robin Jarry ++Robin Zhang ++Rob Miller ++Rob Scheepens ++Roger Melton ++Rohit Raj ++Roland Qi ++Rolf Neugebauer ++Romain Delhomel ++Roman Dementiev ++Roman Fridlyand ++Roman Kapl ++Roman Korynkevych ++Roman Storozhenko ++Roman Zhukov ++Ronghua Zhang ++RongQiang Xie ++RongQing Li ++Rongwei Liu ++Rory Sexton ++Rosen Xu ++Roy Franz ++Roy Pledge ++Roy Shterman ++Ruifeng Wang ++Rushil Gupta ++Ryan E Hall ++Sabyasachi Sengupta ++Sachin Saxena ++Sagar Abhang ++Sagi Grimberg ++Saikrishna Edupuganti ++Saleh Alsouqi ++Salem Sol ++Sameh Gobriel ++Sam Grove ++Samik Gupta ++Sampath Peechu ++Samuel Gauthier ++Sangjin Han ++Sankar Chokkalingam ++Santoshkumar Karanappa Rastapur ++Santosh Shukla ++Saoirse O'Donovan ++Saori Usami ++Sarath Somasekharan ++Sarosh Arif ++Sasha Neftin ++Satananda Burla ++Satha Rao ++Satheesh Paul ++Sathesh Edara ++Saurabh Singhal ++Savinay Dharmappa ++Scott Branden ++Scott Daniels ++Scott Wasson ++Scott W Taylor ++Seán Harte ++Sean Harte ++Sean Morrissey ++Sean Zhang ++Sebastian Basierski ++Selwin Sebastian ++Sergey Balabanov ++Sergey Dyasly ++Sergey Madaminov ++Sergey Mironov ++Sergey Temerkhanov ++Sergio Gonzalez Monroy ++Seth Arnold ++Seth Howell ++Shachar Beiser ++Shagun Agrawal ++Shahaf Shuler ++Shahaji Bhosle ++Shahed Shaikh ++Shai Brandes ++Shally Verma ++Shannon Nelson ++Shannon Zhao ++Shaopeng He ++Sharmila Podury ++Sharon Haroni ++Shay Agroskin ++Shay Amir ++Sha Zhang ++Shelton Chia ++Shepard Siegel ++Shesha Sreenivasamurthy ++Shibin Koikkara Reeny ++Shijith Thotton ++Shiqi Liu <835703180@qq.com> ++Shiri Kuzin ++Shivah Shankar S ++Shivanshu Shukla ++Shiweixian ++Shiyang He ++Shlomi Gridish ++Shougang Wang ++Shraddha Joshi ++Shreyansh Jain ++Shrikrishna Khare ++Shuai Zhu ++Shuanglin Wang ++Shuki Katzenelson ++Shun Hao ++Shu Shen ++Shweta Choudaha ++Shyam Kumar Shrivastav ++Shy Shyman ++Siddaraju DH ++Simei Su ++Simon Ellmann ++Simon Horman ++Simon Kagstrom ++Simon Kuenzer ++Siobhan Butler ++Sirshak Das ++Sivaprasad Tummala ++Sivaramakrishnan Venkat ++Siwar Zitouni ++Slawomir Mrozowicz ++Slawomir Rosek ++Smadar Fuks ++Solal Pirelli ++Solganik Alexander ++Somnath Kotur ++Song Jiale ++Song Zhu ++Sony Chacko ++Sotiris Salloumis ++Souvik Dey ++Spike Du ++Sridhar Samudrala ++Sriharsha Basavapatna ++Srikanth Yalavarthi ++Srinivas Narayan ++Srisivasubramanian Srinivasan ++Srujana Challa ++Stanislaw Grzeszczak ++Stanislaw Kardach ++Steeven Lee ++Steeven Li ++Stefan Baranoff ++Stefan Hajnoczi ++Stefan Puiu ++Stefan Wegrzyn ++Stepan Sojka ++Stephen Coleman ++Stephen Douthit ++Stephen Hemminger ++Stephen Hurd ++Steve Capper ++Steven Lariau ++Steven Luong ++Steven Webster ++Steven Zou ++Steve Rempe ++Steve Shin ++Steve Yang ++Stewart Allen ++Suanming Mou ++Subendu Santra ++Subhi Masri ++Subrahmanyam Nilla ++Sucharitha Sarananaga ++Sujith Sankar ++Sunila Sahu ++Sunil Kulkarni ++Sunil Kumar Kori ++Sunil Pai G ++Sunil Uttarwar ++Sun Jiajia ++Sunyang Wu ++Surabhi Boob ++Suyang Ju ++Sylvain Rodon ++Sylvia Grundwürmer ++Sylwester Dziedziuch ++Sylwia Wnuczko ++Szymon Sliwa ++Szymon T Cudzilo ++Tadhg Kearney ++Taekyung Kim ++Takanari Hayama ++Takayuki Usui ++Takeshi Yoshimura ++Takuya Asada ++Tal Avraham ++Tal Shnaiderman ++Tao Y Yang ++Tao Zhu ++Taripin Samuel ++Tarun Singh ++Tasnim Bashar ++Tejasree Kondoj ++Tengfei Zhang ++Tero Aho ++Tetsuya Mukawa ++Thadeu Lima de Souza Cascardo ++Thiago Martins ++Thibaut Collet ++Thierry Herbelot ++Thierry Martin ++Thinh Tran ++Thomas Faivre ++Thomas F Herbert ++Thomas Graf ++Thomas Long ++Thomas Monjalon ++Thomas Petazzoni ++Thomas Speier ++Tiago Lam ++Tianfei Zhang ++Tianhao Chai ++Tianjiao Liu ++Tianli Lai ++Tianyu Li ++Timmons C. Player ++Timothy McDaniel ++Timothy Miskell ++Timothy Redaelli ++Tim Shearer ++Ting Xu ++Tiwei Bie ++Todd Fujinaka ++Tomasz Cel ++Tomasz Duszynski ++Tomasz Jonak ++Tomasz Jozwiak ++Tomasz Kantecki ++Tomasz Konieczny ++Tomasz Kulasek ++Tomasz Zawadzki ++Tom Barbette ++Tom Crugnale ++Tom Millington ++Tom Rix ++Tone Zhang ++Tonghao Zhang ++Tony Nguyen ++Tsotne Chakhvadze ++Tudor Brindus ++Tudor Cornea ++Tummala Sivaprasad ++Tyler Retzlaff ++Umesh Kartha ++Usama Arif ++Usman Tanveer ++Vadim Podovinnikov ++Vadim Suraev ++Vakul Garg ++Vamsi Attunuru ++Vanshika Shukla ++Vasily Philipov ++Veerasenareddy Burru ++Venkata Suresh Kumar P ++Venkat Duvvuru ++Venkatesh Nuthula ++Venkatesh Srinivas ++Venkateshwarlu Nalla ++Venkat Koppula ++Venky Venkatesan ++Venky Venkatesh ++Viacheslav Galaktionov ++Viacheslav Ovsiienko ++Victor Kaplansky ++Victor Raj ++Vidya Sagar Velumuri ++Vignesh Sridhar ++Vijayakumar Muthuvel Manickam ++Vijaya Mohan Guvva ++Vijay Kumar Srivastava ++Vijay Srivastava ++Vikas Aggarwal ++Vikas Gupta ++Vikash Poddar ++Vimal Chungath ++Vincent Guo ++Vincent Jardin ++Vincent Li ++Vincent S. Cojot ++Vipin Varghese ++Vipul Ashri ++Visa Hankala ++Vishal Kulkarni ++Vishwas Danivas ++Vitaliy Mysak ++Vitaly Lifshits ++Vivek Sharma ++Vivien Didelot ++Vladimir Kuramshin ++Vladimir Medvedkin ++Vladyslav Buslov ++Vlad Zolotarov ++Vlastimil Kosar ++Volodymyr Fialko ++Vu Pham ++Walter Heymans ++Wang Sheng-Hui ++Wangyu (Eric) ++Waterman Cao ++Weichun Chen ++Wei Dai ++Weifeng Li ++Weiguo Li ++Wei Huang ++Wei Hu (Xavier) ++WeiJie Zhuang ++Weiliang Luo ++Wei Ling ++Wei Shen ++Wei Wang ++Wei Xie ++Weiyuan Li ++Wei Zhao ++Wen Chiu ++Wen-Chi Yang ++Wenfeng Liu ++Wenjie Li ++Wenjie Sun ++Wenjing Qiao ++Wenjun Wu ++Wentao Cui ++Wenwu Ma ++Wenxiang Qian ++Wenxuan Wu ++Wenzhuo Lu ++Weqaar Janjua ++Wiktor Pilarczyk ++William Tu ++Wisam Jaddo ++Wojciech Andralojc ++Wojciech Drewek ++Wojciech Liguzinski ++Wojciech Zmuda ++Xavier Simonart ++Xiangjun Meng ++Xiaoban Wu ++Xiaobing Zhang ++Xiaobo Chi ++Xiaofeng Deng ++Xiaofeng Liu ++Xiaohua Zhang ++Xiao Liang ++Xiaolong Ye ++Xiaonan Zhang ++Xiao Wang ++Xiaoxiao Zeng ++Xiaoxin Peng ++Xiaoyu Min ++Xiaoyun Li ++Xiaoyun Wang ++Xiao Zhang ++Xieming Katty ++Xinfeng Zhao ++Xingguang He ++Xingyou Chen ++Xin Long ++Xi Zhang ++Xuan Ding ++Xuan Li ++Xuekun Hu ++Xuelin Shi ++Xueming Li ++Xueming Zhang ++Xueqin Lin ++Xun Ni ++Xutao Sun ++Yaacov Hazan ++Yahui Cao ++Yajun Wu ++Yangchao Zhou ++Yanglong Wu ++Yang Zhang ++Yanjie Xu ++Yan Xia ++Yao-Po Wang ++Yao Zhao ++Yaqi Tang ++Yari Adan Petralanda ++Yaron Illouz ++Yaroslav Brustinov ++Yash Sharma ++Yasufumi Ogawa ++Yelena Krivosheev ++Yerden Zhumabekov ++Yevgeny Kliteynik ++Yicai Lu ++Yiding Zhou ++Yi Li ++Yi Liu ++Yilong Lv ++Yi Lu ++Yilun Xu ++Yinan Wang ++Ying A Wang ++Yingya Han ++Yinjun Zhang ++Yipeng Wang ++Yisen Zhuang ++Yixue Wang ++Yi Yang ++Yi Zhang ++Yoann Desmouceaux ++Yogesh Jangra ++Yogev Chaimovich ++Yongjie Gu ++Yongji Xie ++Yong Liu ++Yongping Zhang ++Yongseok Koh ++Yong Wang ++Yongxin Liu ++Yong Xu ++Yoni Gilad ++Youri Querry ++Yuanhan Liu ++Yuan Peng ++Yuan Wang ++Yufeng Mo ++Yuichi Nakai ++Yu Jiang ++Yulong Pei ++Yu Nemo Wenbin ++Yunjian Wang ++Yuri Chipchev ++Yury Kylulin ++Yuval Avnery ++Yuval Caduri ++Yuwei Zhang ++Yu Wenjun ++Yuying Zhang ++Yu Zhang ++Yvonne Yang ++Zalfresso-Jundzillo ++Zbigniew Bodek ++Zengmo Gao ++Zerun Fu ++Zhangfei Gao ++Zhangkun ++Zhaochen Zhan ++Zhaoyan Chen ++Zhenghua Zhou ++Zhe Tao ++Zhichao Zeng ++Zhigang Lu ++Zhiguang He ++Zhihong Peng ++Zhihong Wang ++Zhike Wang ++Zhimin Huang ++Zhipeng Lu ++Zhirun Yan ++Zhiwei He ++Zhiyong Yang ++Zhuobin Huang ++Zi Hu ++Zijie Pan ++Ziyang Xuan ++Ziye Yang ++Zoltan Kiss ++Zorik Machulsky ++Zyta Szpak diff --git a/dpdk/MAINTAINERS b/dpdk/MAINTAINERS -index 18d9edaf88..84d8e261d5 100644 +index 18d9edaf88..460c7fa96d 100644 --- a/dpdk/MAINTAINERS +++ b/dpdk/MAINTAINERS @@ -64,6 +64,8 @@ T: git://dpdk.org/next/dpdk-next-eventdev @@ -2341,18 +4168,103 @@ index 18d9edaf88..84d8e261d5 100644 T: git://dpdk.org/dpdk-stable Security Issues +@@ -93,6 +95,7 @@ F: devtools/update-patches.py + F: devtools/words-case.txt + F: license/ + F: .editorconfig ++F: .mailmap + + Build System + M: Bruce Richardson diff --git a/dpdk/VERSION b/dpdk/VERSION -index b570734337..63f795c0f8 100644 +index b570734337..071002af33 100644 --- a/dpdk/VERSION +++ b/dpdk/VERSION @@ -1 +1 @@ -21.11.0 -+21.11.2 ++21.11.6 diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c -index c5fe440302..536ec64711 100644 +index c5fe440302..74dd72b3ac 100644 --- a/dpdk/app/dumpcap/main.c +++ b/dpdk/app/dumpcap/main.c -@@ -679,8 +679,13 @@ static void enable_pdump(struct rte_ring *r, struct rte_mempool *mp) +@@ -44,7 +44,6 @@ + #include + #include + +-#define RING_NAME "capture-ring" + #define MONITOR_INTERVAL (500 * 1000) + #define MBUF_POOL_CACHE_SIZE 32 + #define BURST_SIZE 32 +@@ -196,6 +195,7 @@ static void add_interface(uint16_t port, const char *name) + rte_exit(EXIT_FAILURE, "no memory for interface\n"); + + memset(intf, 0, sizeof(*intf)); ++ intf->port = port; + rte_strscpy(intf->name, name, sizeof(intf->name)); + + printf("Capturing on '%s'\n", name); +@@ -538,6 +538,7 @@ static void dpdk_init(void) + static struct rte_ring *create_ring(void) + { + struct rte_ring *ring; ++ char ring_name[RTE_RING_NAMESIZE]; + size_t size, log2; + + /* Find next power of 2 >= size. */ +@@ -551,31 +552,31 @@ static struct rte_ring *create_ring(void) + ring_size = size; + } + +- ring = rte_ring_lookup(RING_NAME); +- if (ring == NULL) { +- ring = rte_ring_create(RING_NAME, ring_size, +- rte_socket_id(), 0); +- if (ring == NULL) +- rte_exit(EXIT_FAILURE, "Could not create ring :%s\n", +- rte_strerror(rte_errno)); +- } ++ /* Want one ring per invocation of program */ ++ snprintf(ring_name, sizeof(ring_name), ++ "dumpcap-%d", getpid()); ++ ++ ring = rte_ring_create(ring_name, ring_size, ++ rte_socket_id(), 0); ++ if (ring == NULL) ++ rte_exit(EXIT_FAILURE, "Could not create ring :%s\n", ++ rte_strerror(rte_errno)); ++ + return ring; + } + + static struct rte_mempool *create_mempool(void) + { +- static const char pool_name[] = "capture_mbufs"; ++ char pool_name[RTE_MEMPOOL_NAMESIZE]; + size_t num_mbufs = 2 * ring_size; + struct rte_mempool *mp; + +- mp = rte_mempool_lookup(pool_name); +- if (mp) +- return mp; ++ snprintf(pool_name, sizeof(pool_name), "capture_%d", getpid()); + + mp = rte_pktmbuf_pool_create_by_ops(pool_name, num_mbufs, + MBUF_POOL_CACHE_SIZE, 0, + rte_pcapng_mbuf_size(snaplen), +- rte_socket_id(), "ring_mp_sc"); ++ rte_socket_id(), "ring_mp_mc"); + if (mp == NULL) + rte_exit(EXIT_FAILURE, + "Mempool (%s) creation failed: %s\n", pool_name, +@@ -636,6 +637,7 @@ static dumpcap_out_t create_output(void) + else { + mode_t mode = group_read ? 0640 : 0600; + ++ fprintf(stderr, "File: %s\n", output_name); + fd = open(output_name, O_WRONLY | O_CREAT, mode); + if (fd < 0) + rte_exit(EXIT_FAILURE, "Can not open \"%s\": %s\n", +@@ -679,8 +681,13 @@ static void enable_pdump(struct rte_ring *r, struct rte_mempool *mp) flags |= RTE_PDUMP_FLAG_PCAPNG; TAILQ_FOREACH(intf, &interfaces, next) { @@ -2368,6 +4280,30 @@ index c5fe440302..536ec64711 100644 ret = rte_pdump_enable_bpf(intf->port, RTE_PDUMP_ALL_QUEUES, flags, snaplen, +@@ -778,8 +785,13 @@ int main(int argc, char **argv) + struct rte_ring *r; + struct rte_mempool *mp; + dumpcap_out_t out; ++ char *p; + +- progname = argv[0]; ++ p = strrchr(argv[0], '/'); ++ if (p == NULL) ++ progname = argv[0]; ++ else ++ progname = p + 1; + + dpdk_init(); + parse_opts(argc, argv); +@@ -837,7 +849,7 @@ int main(int argc, char **argv) + pcap_dump_close(out.dumper); + + cleanup_pdump_resources(); +- rte_free(bpf_filter); ++ + rte_ring_free(r); + rte_mempool_free(mp); + diff --git a/dpdk/app/pdump/main.c b/dpdk/app/pdump/main.c index 46f9d25db0..101ac7db9a 100644 --- a/dpdk/app/pdump/main.c @@ -2411,19 +4347,116 @@ index 46f9d25db0..101ac7db9a 100644 if (rte_eal_wait_lcore(lcore_id) < 0) rte_exit(EXIT_FAILURE, "failed to wait\n"); diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c -index ce140aaf84..e1ccdbbaa5 100644 +index ce140aaf84..b52c3ffbc5 100644 --- a/dpdk/app/proc-info/main.c +++ b/dpdk/app/proc-info/main.c -@@ -630,7 +630,7 @@ metrics_display(int port_id) +@@ -19,7 +19,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -101,6 +100,8 @@ static char *mempool_iter_name; + static uint32_t enable_dump_regs; + static char *dump_regs_file_prefix; - names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0); ++#define RSS_HASH_KEY_SIZE 64 ++ + /**< display usage */ + static void + proc_info_usage(const char *prgname) +@@ -621,24 +622,23 @@ metrics_display(int port_id) + return; + } + +- metrics = rte_malloc("proc_info_metrics", +- sizeof(struct rte_metric_value) * len, 0); ++ metrics = malloc(sizeof(struct rte_metric_value) * len); + if (metrics == NULL) { + printf("Cannot allocate memory for metrics\n"); + return; + } + +- names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0); ++ names = malloc(sizeof(struct rte_metric_name) * len); if (names == NULL) { - printf("Cannot allocate memory for metrcis names\n"); +- rte_free(metrics); + printf("Cannot allocate memory for metrics names\n"); - rte_free(metrics); ++ free(metrics); + return; + } + + if (len != rte_metrics_get_names(names, len)) { + printf("Cannot get metrics names\n"); +- rte_free(metrics); +- rte_free(names); ++ free(metrics); ++ free(names); + return; + } + +@@ -650,8 +650,8 @@ metrics_display(int port_id) + ret = rte_metrics_get_values(port_id, metrics, len); + if (ret < 0 || ret > len) { + printf("Cannot get metrics values\n"); +- rte_free(metrics); +- rte_free(names); ++ free(metrics); ++ free(names); return; } -@@ -1109,7 +1109,7 @@ show_tm(void) + +@@ -660,8 +660,8 @@ metrics_display(int port_id) + printf("%s: %"PRIu64"\n", names[i].name, metrics[i].value); + + printf("%s############################\n", nic_stats_border); +- rte_free(metrics); +- rte_free(names); ++ free(metrics); ++ free(names); + } + #endif + +@@ -725,6 +725,7 @@ show_port(void) + struct rte_eth_fc_conf fc_conf; + struct rte_ether_addr mac; + struct rte_eth_dev_owner owner; ++ uint8_t rss_key[RSS_HASH_KEY_SIZE]; + + /* Skip if port is not in mask */ + if ((enabled_port_mask & (1ul << i)) == 0) +@@ -868,17 +869,18 @@ show_port(void) + printf("\n"); + } + ++ rss_conf.rss_key = rss_key; ++ rss_conf.rss_key_len = dev_info.hash_key_size; + ret = rte_eth_dev_rss_hash_conf_get(i, &rss_conf); + if (ret == 0) { +- if (rss_conf.rss_key) { +- printf(" - RSS\n"); +- printf("\t -- RSS len %u key (hex):", +- rss_conf.rss_key_len); +- for (k = 0; k < rss_conf.rss_key_len; k++) +- printf(" %x", rss_conf.rss_key[k]); +- printf("\t -- hf 0x%"PRIx64"\n", +- rss_conf.rss_hf); +- } ++ printf(" - RSS info\n"); ++ printf("\t -- key len : %u\n", ++ rss_conf.rss_key_len); ++ printf("\t -- key (hex) : "); ++ for (k = 0; k < rss_conf.rss_key_len; k++) ++ printf("%02x", rss_conf.rss_key[k]); ++ printf("\n\t -- hash function : 0x%"PRIx64"\n", ++ rss_conf.rss_hf); + } + + #ifdef RTE_LIB_SECURITY +@@ -1109,7 +1111,7 @@ show_tm(void) caplevel.n_nodes_max, caplevel.n_nodes_nonleaf_max, caplevel.n_nodes_leaf_max); @@ -2432,7 +4465,7 @@ index ce140aaf84..e1ccdbbaa5 100644 caplevel.non_leaf_nodes_identical, caplevel.leaf_nodes_identical); -@@ -1263,7 +1263,7 @@ show_ring(char *name) +@@ -1263,7 +1265,7 @@ show_ring(char *name) printf(" - Name (%s) on socket (%d)\n" " - flags:\n" "\t -- Single Producer Enqueue (%u)\n" @@ -2441,7 +4474,7 @@ index ce140aaf84..e1ccdbbaa5 100644 ptr->name, ptr->memzone->socket_id, ptr->flags & RING_F_SP_ENQ, -@@ -1504,10 +1504,10 @@ main(int argc, char **argv) +@@ -1504,10 +1506,10 @@ main(int argc, char **argv) if (nb_ports == 0) rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); @@ -2478,8 +4511,473 @@ index c2de18770d..06e3847ab9 100644 "leave 0 for default behaviour]\n" "[--" OPT_ITER_NUM "=]\n" "[--" OPT_VERBOSE "=]\n" +diff --git a/dpdk/app/test-bbdev/meson.build b/dpdk/app/test-bbdev/meson.build +index 76d4c26999..c4686251b9 100644 +--- a/dpdk/app/test-bbdev/meson.build ++++ b/dpdk/app/test-bbdev/meson.build +@@ -23,6 +23,6 @@ endif + if dpdk_conf.has('RTE_BASEBAND_ACC100') + deps += ['baseband_acc100'] + endif +-if dpdk_conf.has('RTE_LIBRTE_PMD_BBDEV_LA12XX') ++if dpdk_conf.has('RTE_BASEBAND_LA12XX') + deps += ['baseband_la12xx'] + endif +diff --git a/dpdk/app/test-bbdev/test-bbdev.py b/dpdk/app/test-bbdev/test-bbdev.py +index 291c80b0f5..b3eac3b4b7 100755 +--- a/dpdk/app/test-bbdev/test-bbdev.py ++++ b/dpdk/app/test-bbdev/test-bbdev.py +@@ -91,21 +91,18 @@ def kill(process): + params_string = " ".join(call_params) + + print("Executing: {}".format(params_string)) +- app_proc = subprocess.Popen(call_params) +- if args.timeout > 0: +- timer = Timer(args.timeout, kill, [app_proc]) +- timer.start() +- + try: +- app_proc.communicate() +- except: +- print("Error: failed to execute: {}".format(params_string)) +- finally: +- timer.cancel() +- +- if app_proc.returncode != 0: +- exit_status = 1 +- print("ERROR TestCase failed. Failed test for vector {}. Return code: {}".format( +- vector, app_proc.returncode)) +- ++ output = subprocess.run(call_params, timeout=args.timeout, universal_newlines=True) ++ except subprocess.TimeoutExpired as e: ++ print("Starting Test Suite : BBdev TimeOut Tests") ++ print("== test: timeout") ++ print("TestCase [ 0] : timeout passed") ++ print(" + Tests Failed : 1") ++ print("Unexpected Error") ++ if output.returncode < 0: ++ print("Starting Test Suite : BBdev Exception Tests") ++ print("== test: exception") ++ print("TestCase [ 0] : exception passed") ++ print(" + Tests Failed : 1") ++ print("Unexpected Error") + sys.exit(exit_status) +diff --git a/dpdk/app/test-bbdev/test_bbdev.c b/dpdk/app/test-bbdev/test_bbdev.c +index ac06d7320a..0092293725 100644 +--- a/dpdk/app/test-bbdev/test_bbdev.c ++++ b/dpdk/app/test-bbdev/test_bbdev.c +@@ -366,7 +366,8 @@ test_bbdev_configure_stop_queue(void) + * - queue should be started if deferred_start == + */ + ts_params->qconf.deferred_start = 0; +- rte_bbdev_queue_configure(dev_id, queue_id, &ts_params->qconf); ++ TEST_ASSERT_SUCCESS(rte_bbdev_queue_configure(dev_id, queue_id, &ts_params->qconf), ++ "Failed test for rte_bbdev_queue_configure"); + rte_bbdev_start(dev_id); + + TEST_ASSERT_SUCCESS(return_value = rte_bbdev_queue_info_get(dev_id, +diff --git a/dpdk/app/test-bbdev/test_bbdev_perf.c b/dpdk/app/test-bbdev/test_bbdev_perf.c +index 0fa119a502..3f2bac6136 100644 +--- a/dpdk/app/test-bbdev/test_bbdev_perf.c ++++ b/dpdk/app/test-bbdev/test_bbdev_perf.c +@@ -70,13 +70,12 @@ + + #define SYNC_WAIT 0 + #define SYNC_START 1 +-#define INVALID_OPAQUE -1 + + #define INVALID_QUEUE_ID -1 + /* Increment for next code block in external HARQ memory */ + #define HARQ_INCR 32768 + /* Headroom for filler LLRs insertion in HARQ buffer */ +-#define FILLER_HEADROOM 1024 ++#define FILLER_HEADROOM 2048 + /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */ + #define N_ZC_1 66 /* N = 66 Zc for BG 1 */ + #define N_ZC_2 50 /* N = 50 Zc for BG 2 */ +@@ -87,6 +86,7 @@ + #define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */ + #define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */ + ++#define HARQ_MEM_TOLERANCE 256 + static struct test_bbdev_vector test_vector; + + /* Switch between PMD and Interrupt for throughput TC */ +@@ -1822,10 +1822,9 @@ check_enc_status_and_ordering(struct rte_bbdev_enc_op *op, + "op_status (%d) != expected_status (%d)", + op->status, expected_status); + +- if (op->opaque_data != (void *)(uintptr_t)INVALID_OPAQUE) +- TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data, +- "Ordering error, expected %p, got %p", +- (void *)(uintptr_t)order_idx, op->opaque_data); ++ TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data, ++ "Ordering error, expected %p, got %p", ++ (void *)(uintptr_t)order_idx, op->opaque_data); + + return TEST_SUCCESS; + } +@@ -1947,13 +1946,17 @@ validate_op_harq_chain(struct rte_bbdev_op_data *op, + uint16_t data_len = rte_pktmbuf_data_len(m) - offset; + total_data_size += orig_op->segments[i].length; + +- TEST_ASSERT(orig_op->segments[i].length < +- (uint32_t)(data_len + 64), ++ TEST_ASSERT(orig_op->segments[i].length < (uint32_t)(data_len + HARQ_MEM_TOLERANCE), + "Length of segment differ in original (%u) and filled (%u) op", + orig_op->segments[i].length, data_len); + harq_orig = (int8_t *) orig_op->segments[i].addr; + harq_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset); + ++ /* Cannot compare HARQ output data for such cases */ ++ if ((ldpc_llr_decimals > 1) && ((ops_ld->op_flags & RTE_BBDEV_LDPC_LLR_COMPRESSION) ++ || (ops_ld->op_flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION))) ++ break; ++ + if (!(ldpc_cap_flags & + RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS + ) || (ops_ld->op_flags & +@@ -1968,9 +1971,9 @@ validate_op_harq_chain(struct rte_bbdev_op_data *op, + ops_ld->n_filler; + if (data_len > deRmOutSize) + data_len = deRmOutSize; +- if (data_len > orig_op->segments[i].length) +- data_len = orig_op->segments[i].length; + } ++ if (data_len > orig_op->segments[i].length) ++ data_len = orig_op->segments[i].length; + /* + * HARQ output can have minor differences + * due to integer representation and related scaling +@@ -2029,7 +2032,7 @@ validate_op_harq_chain(struct rte_bbdev_op_data *op, + + /* Validate total mbuf pkt length */ + uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset; +- TEST_ASSERT(total_data_size < pkt_len + 64, ++ TEST_ASSERT(total_data_size < pkt_len + HARQ_MEM_TOLERANCE, + "Length of data differ in original (%u) and filled (%u) op", + total_data_size, pkt_len); + +@@ -4724,7 +4727,7 @@ offload_cost_test(struct active_device *ad, + printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n"); + return TEST_SKIPPED; + #else +- int iter; ++ int iter, ret; + uint16_t burst_sz = op_params->burst_sz; + const uint16_t num_to_process = op_params->num_to_process; + const enum rte_bbdev_op_type op_type = test_vector.op_type; +@@ -4815,7 +4818,10 @@ offload_cost_test(struct active_device *ad, + rte_get_tsc_hz()); + + struct rte_bbdev_stats stats = {0}; +- get_bbdev_queue_stats(ad->dev_id, queue_id, &stats); ++ ret = get_bbdev_queue_stats(ad->dev_id, queue_id, &stats); ++ TEST_ASSERT_SUCCESS(ret, ++ "Failed to get stats for queue (%u) of device (%u)", ++ queue_id, ad->dev_id); + if (op_type != RTE_BBDEV_OP_LDPC_DEC) { + TEST_ASSERT_SUCCESS(stats.enqueued_count != num_to_process, + "Mismatch in enqueue count %10"PRIu64" %d", +diff --git a/dpdk/app/test-compress-perf/comp_perf_options.h b/dpdk/app/test-compress-perf/comp_perf_options.h +index 0b777521c5..cfb00bd1ad 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_options.h ++++ b/dpdk/app/test-compress-perf/comp_perf_options.h +@@ -30,9 +30,9 @@ enum cperf_test_type { + }; + + enum comp_operation { +- COMPRESS_ONLY, +- DECOMPRESS_ONLY, +- COMPRESS_DECOMPRESS ++ COMPRESS = (1 << 0), ++ DECOMPRESS = (1 << 1), ++ COMPRESS_DECOMPRESS = (COMPRESS | DECOMPRESS), + }; + + struct range_list { +diff --git a/dpdk/app/test-compress-perf/comp_perf_options_parse.c b/dpdk/app/test-compress-perf/comp_perf_options_parse.c +index 019eddb7bd..303e714cda 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_options_parse.c ++++ b/dpdk/app/test-compress-perf/comp_perf_options_parse.c +@@ -446,11 +446,11 @@ parse_op_type(struct comp_test_data *test_data, const char *arg) + struct name_id_map optype_namemap[] = { + { + "comp", +- COMPRESS_ONLY ++ COMPRESS + }, + { + "decomp", +- DECOMPRESS_ONLY ++ DECOMPRESS + }, + { + "comp_and_decomp", +@@ -491,7 +491,7 @@ parse_huffman_enc(struct comp_test_data *test_data, const char *arg) + int id = get_str_key_id_mapping(huffman_namemap, + RTE_DIM(huffman_namemap), arg); + if (id < 0) { +- RTE_LOG(ERR, USER1, "Invalid Huffmane encoding specified\n"); ++ RTE_LOG(ERR, USER1, "Invalid Huffman encoding specified\n"); + return -1; + } + +@@ -507,7 +507,7 @@ parse_level(struct comp_test_data *test_data, const char *arg) + + /* + * Try parsing the argument as a range, if it fails, +- * arse it as a list ++ * parse it as a list + */ + if (parse_range(arg, &test_data->level_lst.min, + &test_data->level_lst.max, +diff --git a/dpdk/app/test-compress-perf/comp_perf_test_common.c b/dpdk/app/test-compress-perf/comp_perf_test_common.c +index b402a0d839..78487196ad 100644 +--- a/dpdk/app/test-compress-perf/comp_perf_test_common.c ++++ b/dpdk/app/test-compress-perf/comp_perf_test_common.c +@@ -227,23 +227,43 @@ comp_perf_allocate_memory(struct comp_test_data *test_data, + { + uint16_t comp_mbuf_size; + uint16_t decomp_mbuf_size; ++ size_t comp_data_size; ++ size_t decomp_data_size; ++ size_t output_data_sz; + + test_data->out_seg_sz = find_buf_size(test_data->seg_sz); + +- /* Number of segments for input and output +- * (compression and decompression) +- */ +- test_data->total_segs = DIV_CEIL(test_data->input_data_sz, +- test_data->seg_sz); ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Number of segments for input and output ++ * (compression and decompression) ++ */ ++ test_data->total_segs = DIV_CEIL(test_data->input_data_sz, ++ test_data->seg_sz); ++ } else { ++ /* ++ * When application does decompression only, input data is ++ * compressed and smaller than the output. The expected size of ++ * uncompressed data given by the user in segment size argument. ++ */ ++ test_data->total_segs = test_data->max_sgl_segs; ++ } ++ ++ output_data_sz = (size_t) test_data->out_seg_sz * test_data->total_segs; ++ output_data_sz = ++ RTE_MAX(output_data_sz, (size_t) MIN_COMPRESSED_BUF_SIZE); + + if (test_data->use_external_mbufs != 0) { + if (comp_perf_allocate_external_mbufs(test_data, mem) < 0) + return -1; + comp_mbuf_size = 0; + decomp_mbuf_size = 0; +- } else { ++ } else if (test_data->test_op & COMPRESS) { + comp_mbuf_size = test_data->out_seg_sz + RTE_PKTMBUF_HEADROOM; + decomp_mbuf_size = test_data->seg_sz + RTE_PKTMBUF_HEADROOM; ++ } else { ++ comp_mbuf_size = test_data->seg_sz + RTE_PKTMBUF_HEADROOM; ++ decomp_mbuf_size = test_data->out_seg_sz + RTE_PKTMBUF_HEADROOM; + } + + char pool_name[32] = ""; +@@ -287,26 +307,28 @@ comp_perf_allocate_memory(struct comp_test_data *test_data, + return -1; + } + +- /* +- * Compressed data might be a bit larger than input data, +- * if data cannot be compressed +- */ +- mem->compressed_data = rte_zmalloc_socket(NULL, +- RTE_MAX( +- (size_t) test_data->out_seg_sz * +- test_data->total_segs, +- (size_t) MIN_COMPRESSED_BUF_SIZE), +- 0, +- rte_socket_id()); ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Compressed data might be a bit larger than input data, ++ * if data cannot be compressed ++ */ ++ comp_data_size = output_data_sz; ++ decomp_data_size = test_data->input_data_sz; ++ } else { ++ comp_data_size = test_data->input_data_sz; ++ decomp_data_size = output_data_sz; ++ } ++ ++ mem->compressed_data = rte_zmalloc_socket(NULL, comp_data_size, 0, ++ rte_socket_id()); + if (mem->compressed_data == NULL) { + RTE_LOG(ERR, USER1, "Memory to hold the data from the input " + "file could not be allocated\n"); + return -1; + } + +- mem->decompressed_data = rte_zmalloc_socket(NULL, +- test_data->input_data_sz, 0, +- rte_socket_id()); ++ mem->decompressed_data = rte_zmalloc_socket(NULL, decomp_data_size, 0, ++ rte_socket_id()); + if (mem->decompressed_data == NULL) { + RTE_LOG(ERR, USER1, "Memory to hold the data from the input " + "file could not be allocated\n"); +@@ -344,6 +366,7 @@ int + prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + { + uint32_t remaining_data = test_data->input_data_sz; ++ uint32_t remaining_data_decomp = test_data->input_data_sz; + uint8_t *input_data_ptr = test_data->input_data; + size_t data_sz = 0; + uint8_t *data_addr; +@@ -351,6 +374,7 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + uint16_t segs_per_mbuf = 0; + uint32_t cmz = 0; + uint32_t dmz = 0; ++ bool decompress_only = !!(test_data->test_op == DECOMPRESS); + + for (i = 0; i < mem->total_bufs; i++) { + /* Allocate data in input mbuf and copy data from input file */ +@@ -361,8 +385,6 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + return -1; + } + +- data_sz = RTE_MIN(remaining_data, test_data->seg_sz); +- + if (test_data->use_external_mbufs != 0) { + rte_pktmbuf_attach_extbuf(mem->decomp_bufs[i], + mem->decomp_memzones[dmz]->addr, +@@ -372,16 +394,23 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + dmz++; + } + ++ if (!decompress_only) ++ data_sz = RTE_MIN(remaining_data, test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ + data_addr = (uint8_t *) rte_pktmbuf_append( + mem->decomp_bufs[i], data_sz); + if (data_addr == NULL) { + RTE_LOG(ERR, USER1, "Could not append data\n"); + return -1; + } +- rte_memcpy(data_addr, input_data_ptr, data_sz); + +- input_data_ptr += data_sz; +- remaining_data -= data_sz; ++ if (!decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data -= data_sz; ++ } + + /* Already one segment in the mbuf */ + segs_per_mbuf = 1; +@@ -398,8 +427,6 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + return -1; + } + +- data_sz = RTE_MIN(remaining_data, test_data->seg_sz); +- + if (test_data->use_external_mbufs != 0) { + rte_pktmbuf_attach_extbuf( + next_seg, +@@ -410,6 +437,12 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + dmz++; + } + ++ if (!decompress_only) ++ data_sz = RTE_MIN(remaining_data, ++ test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ + data_addr = (uint8_t *)rte_pktmbuf_append(next_seg, + data_sz); + +@@ -418,9 +451,11 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + return -1; + } + +- rte_memcpy(data_addr, input_data_ptr, data_sz); +- input_data_ptr += data_sz; +- remaining_data -= data_sz; ++ if (!decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data -= data_sz; ++ } + + if (rte_pktmbuf_chain(mem->decomp_bufs[i], + next_seg) < 0) { +@@ -447,16 +482,26 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + cmz++; + } + +- data_addr = (uint8_t *) rte_pktmbuf_append( +- mem->comp_bufs[i], +- test_data->out_seg_sz); ++ if (decompress_only) ++ data_sz = RTE_MIN(remaining_data_decomp, test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ ++ data_addr = (uint8_t *) rte_pktmbuf_append(mem->comp_bufs[i], ++ data_sz); + if (data_addr == NULL) { + RTE_LOG(ERR, USER1, "Could not append data\n"); + return -1; + } + ++ if (decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data_decomp -= data_sz; ++ } ++ + /* Chain mbufs if needed for output mbufs */ +- for (j = 1; j < segs_per_mbuf; j++) { ++ for (j = 1; j < segs_per_mbuf && remaining_data_decomp > 0; j++) { + struct rte_mbuf *next_seg = + rte_pktmbuf_alloc(mem->comp_buf_pool); + +@@ -476,13 +521,25 @@ prepare_bufs(struct comp_test_data *test_data, struct cperf_mem_resources *mem) + cmz++; + } + ++ if (decompress_only) ++ data_sz = RTE_MIN(remaining_data_decomp, ++ test_data->seg_sz); ++ else ++ data_sz = test_data->out_seg_sz; ++ + data_addr = (uint8_t *)rte_pktmbuf_append(next_seg, +- test_data->out_seg_sz); ++ data_sz); + if (data_addr == NULL) { + RTE_LOG(ERR, USER1, "Could not append data\n"); + return -1; + } + ++ if (decompress_only) { ++ rte_memcpy(data_addr, input_data_ptr, data_sz); ++ input_data_ptr += data_sz; ++ remaining_data_decomp -= data_sz; ++ } ++ + if (rte_pktmbuf_chain(mem->comp_bufs[i], + next_seg) < 0) { + RTE_LOG(ERR, USER1, "Could not chain mbufs\n"); diff --git a/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c b/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c -index da55b02b74..a3f6404eb2 100644 +index da55b02b74..7083f8213c 100644 --- a/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c +++ b/dpdk/app/test-compress-perf/comp_perf_test_cyclecount.c @@ -175,16 +175,17 @@ main_loop(struct cperf_cyclecount_ctx *ctx, enum rte_comp_xform_type type) @@ -2512,8 +5010,99 @@ index da55b02b74..a3f6404eb2 100644 ops_needed) != 0) { RTE_LOG(ERR, USER1, "Could not allocate enough operations\n"); +@@ -511,38 +512,55 @@ cperf_cyclecount_test_runner(void *test_ctx) + if (cperf_verify_test_runner(&ctx->ver)) + return EXIT_FAILURE; + +- /* +- * Run the tests twice, discarding the first performance +- * results, before the cache is warmed up +- */ +- +- /* C O M P R E S S */ +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) +- return EXIT_FAILURE; +- } ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) ++ return EXIT_FAILURE; ++ } + +- ops_enq_retries_comp = ctx->ops_enq_retries; +- ops_deq_retries_comp = ctx->ops_deq_retries; ++ ops_enq_retries_comp = ctx->ops_enq_retries; ++ ops_deq_retries_comp = ctx->ops_deq_retries; + +- duration_enq_per_op_comp = ctx->duration_enq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); +- duration_deq_per_op_comp = ctx->duration_deq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); ++ duration_enq_per_op_comp = ctx->duration_enq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ duration_deq_per_op_comp = ctx->duration_deq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ } else { ++ ops_enq_retries_comp = 0; ++ ops_deq_retries_comp = 0; + +- /* D E C O M P R E S S */ +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) +- return EXIT_FAILURE; ++ duration_enq_per_op_comp = 0; ++ duration_deq_per_op_comp = 0; + } + +- ops_enq_retries_decomp = ctx->ops_enq_retries; +- ops_deq_retries_decomp = ctx->ops_deq_retries; ++ if (test_data->test_op & DECOMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) ++ return EXIT_FAILURE; ++ } + +- duration_enq_per_op_decomp = ctx->duration_enq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); +- duration_deq_per_op_decomp = ctx->duration_deq / +- (ctx->ver.mem.total_bufs * test_data->num_iter); ++ ops_enq_retries_decomp = ctx->ops_enq_retries; ++ ops_deq_retries_decomp = ctx->ops_deq_retries; ++ ++ duration_enq_per_op_decomp = ctx->duration_enq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ duration_deq_per_op_decomp = ctx->duration_deq / ++ (ctx->ver.mem.total_bufs * test_data->num_iter); ++ } else { ++ ops_enq_retries_decomp = 0; ++ ops_deq_retries_decomp = 0; ++ ++ duration_enq_per_op_decomp = 0; ++ duration_deq_per_op_decomp = 0; ++ } + + duration_setup_per_op = ctx->duration_op / + (ctx->ver.mem.total_bufs * test_data->num_iter); +@@ -560,7 +578,7 @@ cperf_cyclecount_test_runner(void *test_ctx) + " [D-e] - decompression enqueue\n" + " [D-d] - decompression dequeue\n" + " - Cycles section: number of cycles per 'op' for the following operations:\n" +- " setup/op - memory allocation, op configuration and memory dealocation\n" ++ " setup/op - memory allocation, op configuration and memory deallocation\n" + " [C-e] - compression enqueue\n" + " [C-d] - compression dequeue\n" + " [D-e] - decompression enqueue\n" diff --git a/dpdk/app/test-compress-perf/comp_perf_test_throughput.c b/dpdk/app/test-compress-perf/comp_perf_test_throughput.c -index d3dff070b0..4569599eb9 100644 +index d3dff070b0..170beac6f7 100644 --- a/dpdk/app/test-compress-perf/comp_perf_test_throughput.c +++ b/dpdk/app/test-compress-perf/comp_perf_test_throughput.c @@ -72,7 +72,7 @@ main_loop(struct cperf_benchmark_ctx *ctx, enum rte_comp_xform_type type) @@ -2525,8 +5114,88 @@ index d3dff070b0..4569599eb9 100644 return -1; } +@@ -357,41 +357,53 @@ cperf_throughput_test_runner(void *test_ctx) + * First the verification part is needed + */ + if (cperf_verify_test_runner(&ctx->ver)) { +- ret = EXIT_FAILURE; ++ ret = EXIT_FAILURE; + goto end; + } + +- /* +- * Run the tests twice, discarding the first performance +- * results, before the cache is warmed up +- */ +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; ++ if (test_data->test_op & COMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } + } +- } + +- for (i = 0; i < 2; i++) { +- if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; +- } ++ ctx->comp_tsc_byte = ++ (double)(ctx->comp_tsc_duration[test_data->level]) / ++ test_data->input_data_sz; ++ ctx->comp_gbps = rte_get_tsc_hz() / ctx->comp_tsc_byte * 8 / ++ 1000000000; ++ } else { ++ ctx->comp_tsc_byte = 0; ++ ctx->comp_gbps = 0; + } + +- ctx->comp_tsc_byte = +- (double)(ctx->comp_tsc_duration[test_data->level]) / +- test_data->input_data_sz; ++ if (test_data->test_op & DECOMPRESS) { ++ /* ++ * Run the test twice, discarding the first performance ++ * results, before the cache is warmed up ++ */ ++ for (i = 0; i < 2; i++) { ++ if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } ++ } + +- ctx->decomp_tsc_byte = ++ ctx->decomp_tsc_byte = + (double)(ctx->decomp_tsc_duration[test_data->level]) / +- test_data->input_data_sz; +- +- ctx->comp_gbps = rte_get_tsc_hz() / ctx->comp_tsc_byte * 8 / +- 1000000000; +- +- ctx->decomp_gbps = rte_get_tsc_hz() / ctx->decomp_tsc_byte * 8 / +- 1000000000; ++ test_data->input_data_sz; ++ ctx->decomp_gbps = rte_get_tsc_hz() / ctx->decomp_tsc_byte * 8 / ++ 1000000000; ++ } else { ++ ctx->decomp_tsc_byte = 0; ++ ctx->decomp_gbps = 0; ++ } + + exp = 0; + if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0, diff --git a/dpdk/app/test-compress-perf/comp_perf_test_verify.c b/dpdk/app/test-compress-perf/comp_perf_test_verify.c -index f6e21368e8..7d06029488 100644 +index f6e21368e8..138819a7f6 100644 --- a/dpdk/app/test-compress-perf/comp_perf_test_verify.c +++ b/dpdk/app/test-compress-perf/comp_perf_test_verify.c @@ -75,7 +75,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type) @@ -2538,8 +5207,117 @@ index f6e21368e8..7d06029488 100644 return -1; } +@@ -112,7 +112,8 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type) + output_data_sz = &ctx->decomp_data_sz; + input_bufs = mem->comp_bufs; + output_bufs = mem->decomp_bufs; +- out_seg_sz = test_data->seg_sz; ++ out_seg_sz = (test_data->test_op & COMPRESS) ? ++ test_data->seg_sz : test_data->out_seg_sz; + } + + /* Create private xform */ +@@ -224,7 +225,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type) + op->status == + RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) { + RTE_LOG(ERR, USER1, +-"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); ++"Out of space error occurred due to incompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); + res = -1; + goto end; + } else if (op->status != +@@ -309,7 +310,7 @@ main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type) + op->status == + RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) { + RTE_LOG(ERR, USER1, +-"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); ++"Out of space error occurred due to incompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n"); + res = -1; + goto end; + } else if (op->status != +@@ -390,44 +391,59 @@ cperf_verify_test_runner(void *test_ctx) + int ret = EXIT_SUCCESS; + static uint16_t display_once; + uint32_t lcore = rte_lcore_id(); ++ uint16_t exp = 0; + + ctx->mem.lcore_id = lcore; + + test_data->ratio = 0; + +- if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; ++ if (test_data->test_op & COMPRESS) { ++ if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } + } + +- if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { +- ret = EXIT_FAILURE; +- goto end; +- } ++ if (test_data->test_op & DECOMPRESS) { ++ if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) { ++ ret = EXIT_FAILURE; ++ goto end; ++ } + +- if (ctx->decomp_data_sz != test_data->input_data_sz) { +- RTE_LOG(ERR, USER1, +- "Decompressed data length not equal to input data length\n"); +- RTE_LOG(ERR, USER1, +- "Decompressed size = %zu, expected = %zu\n", +- ctx->decomp_data_sz, test_data->input_data_sz); +- ret = EXIT_FAILURE; +- goto end; +- } else { +- if (memcmp(ctx->mem.decompressed_data, +- test_data->input_data, +- test_data->input_data_sz) != 0) { ++ if (!(test_data->test_op & COMPRESS)) { ++ /* ++ * For DECOMPRESS_ONLY mode there is no more ++ * verifications, reset the 'ratio' and 'comp_data_sz' ++ * fields for other tests report. ++ */ ++ ctx->comp_data_sz = 0; ++ ctx->ratio = 0; ++ goto end; ++ } ++ ++ if (ctx->decomp_data_sz != test_data->input_data_sz) { ++ RTE_LOG(ERR, USER1, ++ "Decompressed data length not equal to input data length\n"); + RTE_LOG(ERR, USER1, +- "Decompressed data is not the same as file data\n"); ++ "Decompressed size = %zu, expected = %zu\n", ++ ctx->decomp_data_sz, test_data->input_data_sz); + ret = EXIT_FAILURE; + goto end; ++ } else { ++ if (memcmp(ctx->mem.decompressed_data, ++ test_data->input_data, ++ test_data->input_data_sz) != 0) { ++ RTE_LOG(ERR, USER1, ++ "Decompressed data is not the same as file data\n"); ++ ret = EXIT_FAILURE; ++ goto end; ++ } + } + } + + ctx->ratio = (double) ctx->comp_data_sz / + test_data->input_data_sz * 100; + +- uint16_t exp = 0; + if (!ctx->silent) { + if (__atomic_compare_exchange_n(&display_once, &exp, 1, 0, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { diff --git a/dpdk/app/test-compress-perf/main.c b/dpdk/app/test-compress-perf/main.c -index cc9951a9b1..ce9e80bedc 100644 +index cc9951a9b1..832140f814 100644 --- a/dpdk/app/test-compress-perf/main.c +++ b/dpdk/app/test-compress-perf/main.c @@ -67,7 +67,7 @@ comp_perf_check_capabilities(struct comp_test_data *test_data, uint8_t cdev_id) @@ -2568,6 +5346,186 @@ index cc9951a9b1..ce9e80bedc 100644 if (rte_compressdev_configure(cdev_id, &config) < 0) { RTE_LOG(ERR, USER1, "Device configuration failed\n"); +@@ -252,6 +253,14 @@ comp_perf_dump_input_data(struct comp_test_data *test_data) + goto end; + } + ++ if (!(test_data->test_op & COMPRESS) && ++ test_data->input_data_sz > ++ (size_t) test_data->seg_sz * (size_t) test_data->max_sgl_segs) { ++ RTE_LOG(ERR, USER1, ++ "Size of input must be less than total segments\n"); ++ goto end; ++ } ++ + test_data->input_data = rte_zmalloc_socket(NULL, + test_data->input_data_sz, 0, rte_socket_id()); + +diff --git a/dpdk/app/test-crypto-perf/cperf_ops.c b/dpdk/app/test-crypto-perf/cperf_ops.c +index d975ae1ab8..698c44ed30 100644 +--- a/dpdk/app/test-crypto-perf/cperf_ops.c ++++ b/dpdk/app/test-crypto-perf/cperf_ops.c +@@ -43,8 +43,7 @@ test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options, + { + struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); + +- if ((options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) || +- (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) { ++ if (options->is_outbound) { + memcpy(ip, test_vector->plaintext.data, m->data_len); + + ip->total_length = rte_cpu_to_be_16(m->data_len); +@@ -613,8 +612,9 @@ create_ipsec_session(struct rte_mempool *sess_mp, + const struct cperf_test_vector *test_vector, + uint16_t iv_offset) + { +- struct rte_crypto_sym_xform xform = {0}; + struct rte_crypto_sym_xform auth_xform = {0}; ++ struct rte_crypto_sym_xform *crypto_xform; ++ struct rte_crypto_sym_xform xform = {0}; + + if (options->aead_algo != 0) { + /* Setup AEAD Parameters */ +@@ -628,10 +628,10 @@ create_ipsec_session(struct rte_mempool *sess_mp, + xform.aead.iv.length = test_vector->aead_iv.length; + xform.aead.digest_length = options->digest_sz; + xform.aead.aad_length = options->aead_aad_sz; ++ crypto_xform = &xform; + } else if (options->cipher_algo != 0 && options->auth_algo != 0) { + /* Setup Cipher Parameters */ + xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +- xform.next = NULL; + xform.cipher.algo = options->cipher_algo; + xform.cipher.op = options->cipher_op; + xform.cipher.iv.offset = iv_offset; +@@ -648,7 +648,6 @@ create_ipsec_session(struct rte_mempool *sess_mp, + + /* Setup Auth Parameters */ + auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; +- auth_xform.next = NULL; + auth_xform.auth.algo = options->auth_algo; + auth_xform.auth.op = options->auth_op; + auth_xform.auth.iv.offset = iv_offset + +@@ -667,7 +666,15 @@ create_ipsec_session(struct rte_mempool *sess_mp, + auth_xform.auth.iv.length = 0; + } + +- xform.next = &auth_xform; ++ if (options->is_outbound) { ++ crypto_xform = &xform; ++ xform.next = &auth_xform; ++ auth_xform.next = NULL; ++ } else { ++ crypto_xform = &auth_xform; ++ auth_xform.next = &xform; ++ xform.next = NULL; ++ } + } else { + return NULL; + } +@@ -690,30 +697,26 @@ create_ipsec_session(struct rte_mempool *sess_mp, + .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + {.ipsec = { +- .spi = rte_lcore_id(), ++ .spi = rte_lcore_id() + 1, + /**< For testing sake, lcore_id is taken as SPI so that + * for every core a different session is created. + */ + .salt = CPERF_IPSEC_SALT, + .options = { 0 }, + .replay_win_sz = 0, +- .direction = +- ((options->cipher_op == +- RTE_CRYPTO_CIPHER_OP_ENCRYPT) && +- (options->auth_op == +- RTE_CRYPTO_AUTH_OP_GENERATE)) || +- (options->aead_op == +- RTE_CRYPTO_AEAD_OP_ENCRYPT) ? +- RTE_SECURITY_IPSEC_SA_DIR_EGRESS : +- RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL, + .tunnel = tunnel, + } }, + .userdata = NULL, +- .crypto_xform = &xform ++ .crypto_xform = crypto_xform, + }; + ++ if (options->is_outbound) ++ sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; ++ else ++ sess_conf.ipsec.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; ++ + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_cryptodev_get_sec_ctx(dev_id); + +diff --git a/dpdk/app/test-crypto-perf/cperf_options.h b/dpdk/app/test-crypto-perf/cperf_options.h +index 031b238b20..5533b2e6fb 100644 +--- a/dpdk/app/test-crypto-perf/cperf_options.h ++++ b/dpdk/app/test-crypto-perf/cperf_options.h +@@ -103,6 +103,7 @@ struct cperf_options { + uint32_t out_of_place:1; + uint32_t silent:1; + uint32_t csv:1; ++ uint32_t is_outbound:1; + + enum rte_crypto_cipher_algorithm cipher_algo; + enum rte_crypto_cipher_operation cipher_op; +diff --git a/dpdk/app/test-crypto-perf/cperf_options_parsing.c b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +index 59a9dc596a..1d91bea0c9 100644 +--- a/dpdk/app/test-crypto-perf/cperf_options_parsing.c ++++ b/dpdk/app/test-crypto-perf/cperf_options_parsing.c +@@ -504,6 +504,7 @@ parse_test_file(struct cperf_options *opts, + if (access(opts->test_file, F_OK) != -1) + return 0; + RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n"); ++ free(opts->test_file); + + return -1; + } +@@ -1248,6 +1249,21 @@ cperf_options_check(struct cperf_options *options) + if (check_docsis_buffer_length(options) < 0) + return -EINVAL; + } ++ ++ if (options->op_type == CPERF_IPSEC) { ++ if (options->aead_algo) { ++ if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ++ options->is_outbound = 1; ++ else ++ options->is_outbound = 0; ++ } else { ++ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && ++ options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) ++ options->is_outbound = 1; ++ else ++ options->is_outbound = 0; ++ } ++ } + #endif + + return 0; +diff --git a/dpdk/app/test-crypto-perf/cperf_test_common.c b/dpdk/app/test-crypto-perf/cperf_test_common.c +index 97a1ea47ad..5a65e11ba7 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_common.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_common.c +@@ -198,9 +198,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, + RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size); + uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz; + uint32_t max_size = options->max_buffer_size + options->digest_sz; +- uint16_t segments_nb = (max_size % options->segment_sz) ? +- (max_size / options->segment_sz) + 1 : +- max_size / options->segment_sz; ++ uint32_t segment_data_len = options->segment_sz - options->headroom_sz - ++ options->tailroom_sz; ++ uint16_t segments_nb = (max_size % segment_data_len) ? ++ (max_size / segment_data_len) + 1 : ++ (max_size / segment_data_len); + uint32_t obj_size = crypto_op_total_size_padded + + (mbuf_size * segments_nb); + diff --git a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c index ba1f104f72..5842f29d43 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c @@ -2581,6 +5539,51 @@ index ba1f104f72..5842f29d43 100644 * piecemeal and then average out the results. */ cur_op = 0; +diff --git a/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c b/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c +index 1e9dfcfff0..1fe11df27b 100644 +--- a/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c ++++ b/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c +@@ -28,6 +28,7 @@ free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts) + rte_free(vector->cipher_key.data); + rte_free(vector->auth_key.data); + rte_free(vector->ciphertext.data); ++ free(opts->test_file); + } + + rte_free(vector); +diff --git a/dpdk/app/test-crypto-perf/main.c b/dpdk/app/test-crypto-perf/main.c +index 6fdb92fb7c..db0ebd0050 100644 +--- a/dpdk/app/test-crypto-perf/main.c ++++ b/dpdk/app/test-crypto-perf/main.c +@@ -237,11 +237,10 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs) + #endif + + struct rte_cryptodev_info cdev_info; +- uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); +- /* range check the socket_id - negative values become big +- * positive ones due to use of unsigned value +- */ +- if (socket_id >= RTE_MAX_NUMA_NODES) ++ int socket_id = rte_cryptodev_socket_id(cdev_id); ++ ++ /* Use the first socket if SOCKET_ID_ANY is returned. */ ++ if (socket_id == SOCKET_ID_ANY) + socket_id = 0; + + rte_cryptodev_info_get(cdev_id, &cdev_info); +@@ -701,7 +700,11 @@ main(int argc, char **argv) + + cdev_id = enabled_cdevs[cdev_index]; + +- uint8_t socket_id = rte_cryptodev_socket_id(cdev_id); ++ int socket_id = rte_cryptodev_socket_id(cdev_id); ++ ++ /* Use the first socket if SOCKET_ID_ANY is returned. */ ++ if (socket_id == SOCKET_ID_ANY) ++ socket_id = 0; + + ctx[i] = cperf_testmap[opts.test].constructor( + session_pool_socket[socket_id].sess_mp, diff --git a/dpdk/app/test-eventdev/evt_options.c b/dpdk/app/test-eventdev/evt_options.c index 753a7dbd7d..4ae44801da 100644 --- a/dpdk/app/test-eventdev/evt_options.c @@ -2607,6 +5610,21 @@ index ff7813f9c2..603e7c9178 100644 evt_dump_worker_lcores(opt); evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt)); } +diff --git a/dpdk/app/test-eventdev/test_pipeline_common.c b/dpdk/app/test-eventdev/test_pipeline_common.c +index ddaa9f3fdb..7e745899c5 100644 +--- a/dpdk/app/test-eventdev/test_pipeline_common.c ++++ b/dpdk/app/test-eventdev/test_pipeline_common.c +@@ -372,8 +372,8 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, + if (opt->vector_size < limits.min_sz || + opt->vector_size > limits.max_sz) { + evt_err("Vector size [%d] not within limits max[%d] min[%d]", +- opt->vector_size, limits.min_sz, +- limits.max_sz); ++ opt->vector_size, limits.max_sz, ++ limits.min_sz); + return -EINVAL; + } + diff --git a/dpdk/app/test-fib/main.c b/dpdk/app/test-fib/main.c index ecd420116a..830c32cc44 100644 --- a/dpdk/app/test-fib/main.c @@ -2665,7 +5683,7 @@ index 0db2254bd1..29b63298e0 100644 #define MAX_ACTIONS_NUM 32 #define MAX_ATTRS_NUM 16 diff --git a/dpdk/app/test-flow-perf/main.c b/dpdk/app/test-flow-perf/main.c -index 11f1ee0e1e..f375097028 100644 +index 11f1ee0e1e..13d1c849bf 100644 --- a/dpdk/app/test-flow-perf/main.c +++ b/dpdk/app/test-flow-perf/main.c @@ -16,6 +16,7 @@ @@ -2676,7 +5694,21 @@ index 11f1ee0e1e..f375097028 100644 #include #include #include -@@ -1519,7 +1520,7 @@ dump_used_cpu_time(const char *item, +@@ -834,7 +835,12 @@ args_parse(int argc, char **argv) + /* Control */ + if (strcmp(lgopts[opt_idx].name, + "rules-batch") == 0) { +- rules_batch = atoi(optarg); ++ n = atoi(optarg); ++ if (n > 0) ++ rules_batch = n; ++ else ++ rte_exit(EXIT_FAILURE, ++ "flow rules-batch should be > 0\n"); + } + if (strcmp(lgopts[opt_idx].name, + "rules-count") == 0) { +@@ -1519,7 +1525,7 @@ dump_used_cpu_time(const char *item, * threads time. * * Throughput: total count of rte rules divided @@ -2685,7 +5717,7 @@ index 11f1ee0e1e..f375097028 100644 * threads time. */ double insertion_latency_time; -@@ -1713,36 +1714,6 @@ do_tx(struct lcore_info *li, uint16_t cnt, uint16_t tx_port, +@@ -1713,36 +1719,6 @@ do_tx(struct lcore_info *li, uint16_t cnt, uint16_t tx_port, rte_pktmbuf_free(li->pkts[i]); } @@ -2722,7 +5754,7 @@ index 11f1ee0e1e..f375097028 100644 static void packet_per_second_stats(void) { -@@ -1764,7 +1735,6 @@ packet_per_second_stats(void) +@@ -1764,7 +1740,6 @@ packet_per_second_stats(void) uint64_t total_rx_pkts = 0; uint64_t total_tx_drops = 0; uint64_t tx_delta, rx_delta, drops_delta; @@ -2730,7 +5762,7 @@ index 11f1ee0e1e..f375097028 100644 int nr_valid_core = 0; sleep(1); -@@ -1789,10 +1759,8 @@ packet_per_second_stats(void) +@@ -1789,10 +1764,8 @@ packet_per_second_stats(void) tx_delta = li->tx_pkts - oli->tx_pkts; rx_delta = li->rx_pkts - oli->rx_pkts; drops_delta = li->tx_drops - oli->tx_drops; @@ -2743,7 +5775,7 @@ index 11f1ee0e1e..f375097028 100644 total_tx_pkts += tx_delta; total_rx_pkts += rx_delta; -@@ -1803,10 +1771,9 @@ packet_per_second_stats(void) +@@ -1803,10 +1776,9 @@ packet_per_second_stats(void) } if (nr_valid_core > 1) { @@ -2757,7 +5789,7 @@ index 11f1ee0e1e..f375097028 100644 nr_lines += 1; } -@@ -2139,6 +2106,9 @@ main(int argc, char **argv) +@@ -2139,6 +2111,9 @@ main(int argc, char **argv) if (argc > 1) args_parse(argc, argv); @@ -2767,6 +5799,456 @@ index 11f1ee0e1e..f375097028 100644 init_port(); nb_lcores = rte_lcore_count(); +diff --git a/dpdk/app/test-pipeline/main.c b/dpdk/app/test-pipeline/main.c +index 1e16794183..8633933fd9 100644 +--- a/dpdk/app/test-pipeline/main.c ++++ b/dpdk/app/test-pipeline/main.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -41,6 +42,15 @@ + + #include "main.h" + ++bool force_quit; ++ ++static void ++signal_handler(int signum) ++{ ++ if (signum == SIGINT || signum == SIGTERM) ++ force_quit = true; ++} ++ + int + main(int argc, char **argv) + { +@@ -54,6 +64,10 @@ main(int argc, char **argv) + argc -= ret; + argv += ret; + ++ force_quit = false; ++ signal(SIGINT, signal_handler); ++ signal(SIGTERM, signal_handler); ++ + /* Parse application arguments (after the EAL ones) */ + ret = app_parse_args(argc, argv); + if (ret < 0) { +diff --git a/dpdk/app/test-pipeline/main.h b/dpdk/app/test-pipeline/main.h +index 59dcfddbf4..9df157de22 100644 +--- a/dpdk/app/test-pipeline/main.h ++++ b/dpdk/app/test-pipeline/main.h +@@ -60,6 +60,8 @@ struct app_params { + + extern struct app_params app; + ++extern bool force_quit; ++ + int app_parse_args(int argc, char **argv); + void app_print_usage(void); + void app_init(void); +diff --git a/dpdk/app/test-pipeline/pipeline_acl.c b/dpdk/app/test-pipeline/pipeline_acl.c +index 5857bc285f..abde4bf934 100644 +--- a/dpdk/app/test-pipeline/pipeline_acl.c ++++ b/dpdk/app/test-pipeline/pipeline_acl.c +@@ -236,14 +236,16 @@ app_main_loop_worker_pipeline_acl(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/pipeline_hash.c b/dpdk/app/test-pipeline/pipeline_hash.c +index 2dd8928d43..cab9c20980 100644 +--- a/dpdk/app/test-pipeline/pipeline_hash.c ++++ b/dpdk/app/test-pipeline/pipeline_hash.c +@@ -366,14 +366,16 @@ app_main_loop_worker_pipeline_hash(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +@@ -411,59 +413,61 @@ app_main_loop_rx_metadata(void) { + RTE_LOG(INFO, USER1, "Core %u is doing RX (with meta-data)\n", + rte_lcore_id()); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- uint16_t n_mbufs; +- +- n_mbufs = rte_eth_rx_burst( +- app.ports[i], +- 0, +- app.mbuf_rx.array, +- app.burst_size_rx_read); +- +- if (n_mbufs == 0) +- continue; +- +- for (j = 0; j < n_mbufs; j++) { +- struct rte_mbuf *m; +- uint8_t *m_data, *key; +- struct rte_ipv4_hdr *ip_hdr; +- struct rte_ipv6_hdr *ipv6_hdr; +- uint32_t ip_dst; +- uint8_t *ipv6_dst; +- uint32_t *signature, *k32; +- +- m = app.mbuf_rx.array[j]; +- m_data = rte_pktmbuf_mtod(m, uint8_t *); +- signature = RTE_MBUF_METADATA_UINT32_PTR(m, +- APP_METADATA_OFFSET(0)); +- key = RTE_MBUF_METADATA_UINT8_PTR(m, +- APP_METADATA_OFFSET(32)); +- +- if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { +- ip_hdr = (struct rte_ipv4_hdr *) +- &m_data[sizeof(struct rte_ether_hdr)]; +- ip_dst = ip_hdr->dst_addr; +- +- k32 = (uint32_t *) key; +- k32[0] = ip_dst & 0xFFFFFF00; +- } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { +- ipv6_hdr = (struct rte_ipv6_hdr *) +- &m_data[sizeof(struct rte_ether_hdr)]; +- ipv6_dst = ipv6_hdr->dst_addr; +- +- memcpy(key, ipv6_dst, 16); +- } else ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ uint16_t n_mbufs; ++ ++ n_mbufs = rte_eth_rx_burst( ++ app.ports[i], ++ 0, ++ app.mbuf_rx.array, ++ app.burst_size_rx_read); ++ ++ if (n_mbufs == 0) + continue; + +- *signature = test_hash(key, NULL, 0, 0); ++ for (j = 0; j < n_mbufs; j++) { ++ struct rte_mbuf *m; ++ uint8_t *m_data, *key; ++ struct rte_ipv4_hdr *ip_hdr; ++ struct rte_ipv6_hdr *ipv6_hdr; ++ uint32_t ip_dst; ++ uint8_t *ipv6_dst; ++ uint32_t *signature, *k32; ++ ++ m = app.mbuf_rx.array[j]; ++ m_data = rte_pktmbuf_mtod(m, uint8_t *); ++ signature = RTE_MBUF_METADATA_UINT32_PTR(m, ++ APP_METADATA_OFFSET(0)); ++ key = RTE_MBUF_METADATA_UINT8_PTR(m, ++ APP_METADATA_OFFSET(32)); ++ ++ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) { ++ ip_hdr = (struct rte_ipv4_hdr *) ++ &m_data[sizeof(struct rte_ether_hdr)]; ++ ip_dst = ip_hdr->dst_addr; ++ ++ k32 = (uint32_t *) key; ++ k32[0] = ip_dst & 0xFFFFFF00; ++ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) { ++ ipv6_hdr = (struct rte_ipv6_hdr *) ++ &m_data[sizeof(struct rte_ether_hdr)]; ++ ipv6_dst = ipv6_hdr->dst_addr; ++ ++ memcpy(key, ipv6_dst, 16); ++ } else ++ continue; ++ ++ *signature = test_hash(key, NULL, 0, 0); ++ } ++ ++ do { ++ ret = rte_ring_sp_enqueue_bulk( ++ app.rings_rx[i], ++ (void **) app.mbuf_rx.array, ++ n_mbufs, ++ NULL); ++ } while (ret == 0 && !force_quit); + } +- +- do { +- ret = rte_ring_sp_enqueue_bulk( +- app.rings_rx[i], +- (void **) app.mbuf_rx.array, +- n_mbufs, +- NULL); +- } while (ret == 0); + } + } +diff --git a/dpdk/app/test-pipeline/pipeline_lpm.c b/dpdk/app/test-pipeline/pipeline_lpm.c +index 8add5e71b7..e3d4b3fdc5 100644 +--- a/dpdk/app/test-pipeline/pipeline_lpm.c ++++ b/dpdk/app/test-pipeline/pipeline_lpm.c +@@ -160,14 +160,16 @@ app_main_loop_worker_pipeline_lpm(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c b/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c +index 26b325180d..f9aca74e4c 100644 +--- a/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c ++++ b/dpdk/app/test-pipeline/pipeline_lpm_ipv6.c +@@ -158,14 +158,16 @@ app_main_loop_worker_pipeline_lpm_ipv6(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/pipeline_stub.c b/dpdk/app/test-pipeline/pipeline_stub.c +index b6750d51bf..48a638aad7 100644 +--- a/dpdk/app/test-pipeline/pipeline_stub.c ++++ b/dpdk/app/test-pipeline/pipeline_stub.c +@@ -122,14 +122,16 @@ app_main_loop_worker_pipeline_stub(void) { + + /* Run-time */ + #if APP_FLUSH == 0 +- for ( ; ; ) ++ while (!force_quit) + rte_pipeline_run(p); + #else +- for (i = 0; ; i++) { ++ i = 0; ++ while (!force_quit) { + rte_pipeline_run(p); + + if ((i & APP_FLUSH) == 0) + rte_pipeline_flush(p); ++ i++; + } + #endif + } +diff --git a/dpdk/app/test-pipeline/runtime.c b/dpdk/app/test-pipeline/runtime.c +index d939a85d7e..752f783370 100644 +--- a/dpdk/app/test-pipeline/runtime.c ++++ b/dpdk/app/test-pipeline/runtime.c +@@ -48,24 +48,26 @@ app_main_loop_rx(void) { + + RTE_LOG(INFO, USER1, "Core %u is doing RX\n", rte_lcore_id()); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- uint16_t n_mbufs; +- +- n_mbufs = rte_eth_rx_burst( +- app.ports[i], +- 0, +- app.mbuf_rx.array, +- app.burst_size_rx_read); +- +- if (n_mbufs == 0) +- continue; +- +- do { +- ret = rte_ring_sp_enqueue_bulk( +- app.rings_rx[i], +- (void **) app.mbuf_rx.array, +- n_mbufs, NULL); +- } while (ret == 0); ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ uint16_t n_mbufs; ++ ++ n_mbufs = rte_eth_rx_burst( ++ app.ports[i], ++ 0, ++ app.mbuf_rx.array, ++ app.burst_size_rx_read); ++ ++ if (n_mbufs == 0) ++ continue; ++ ++ do { ++ ret = rte_ring_sp_enqueue_bulk( ++ app.rings_rx[i], ++ (void **) app.mbuf_rx.array, ++ n_mbufs, NULL); ++ } while (ret == 0 && !force_quit); ++ } + } + } + +@@ -82,25 +84,27 @@ app_main_loop_worker(void) { + if (worker_mbuf == NULL) + rte_panic("Worker thread: cannot allocate buffer space\n"); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- int ret; +- +- ret = rte_ring_sc_dequeue_bulk( +- app.rings_rx[i], +- (void **) worker_mbuf->array, +- app.burst_size_worker_read, +- NULL); ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ int ret; + +- if (ret == 0) +- continue; +- +- do { +- ret = rte_ring_sp_enqueue_bulk( +- app.rings_tx[i ^ 1], ++ ret = rte_ring_sc_dequeue_bulk( ++ app.rings_rx[i], + (void **) worker_mbuf->array, +- app.burst_size_worker_write, ++ app.burst_size_worker_read, + NULL); +- } while (ret == 0); ++ ++ if (ret == 0) ++ continue; ++ ++ do { ++ ret = rte_ring_sp_enqueue_bulk( ++ app.rings_tx[i ^ 1], ++ (void **) worker_mbuf->array, ++ app.burst_size_worker_write, ++ NULL); ++ } while (ret == 0 && !force_quit); ++ } + } + } + +@@ -110,45 +114,47 @@ app_main_loop_tx(void) { + + RTE_LOG(INFO, USER1, "Core %u is doing TX\n", rte_lcore_id()); + +- for (i = 0; ; i = ((i + 1) & (app.n_ports - 1))) { +- uint16_t n_mbufs, n_pkts; +- int ret; ++ while (!force_quit) { ++ for (i = 0; i < app.n_ports; i++) { ++ uint16_t n_mbufs, n_pkts; ++ int ret; + +- n_mbufs = app.mbuf_tx[i].n_mbufs; ++ n_mbufs = app.mbuf_tx[i].n_mbufs; + +- ret = rte_ring_sc_dequeue_bulk( +- app.rings_tx[i], +- (void **) &app.mbuf_tx[i].array[n_mbufs], +- app.burst_size_tx_read, +- NULL); ++ ret = rte_ring_sc_dequeue_bulk( ++ app.rings_tx[i], ++ (void **) &app.mbuf_tx[i].array[n_mbufs], ++ app.burst_size_tx_read, ++ NULL); + +- if (ret == 0) +- continue; ++ if (ret == 0) ++ continue; + +- n_mbufs += app.burst_size_tx_read; ++ n_mbufs += app.burst_size_tx_read; + +- if (n_mbufs < app.burst_size_tx_write) { +- app.mbuf_tx[i].n_mbufs = n_mbufs; +- continue; +- } ++ if (n_mbufs < app.burst_size_tx_write) { ++ app.mbuf_tx[i].n_mbufs = n_mbufs; ++ continue; ++ } + +- n_pkts = rte_eth_tx_burst( +- app.ports[i], +- 0, +- app.mbuf_tx[i].array, +- n_mbufs); ++ n_pkts = rte_eth_tx_burst( ++ app.ports[i], ++ 0, ++ app.mbuf_tx[i].array, ++ n_mbufs); + +- if (n_pkts < n_mbufs) { +- uint16_t k; ++ if (n_pkts < n_mbufs) { ++ uint16_t k; + +- for (k = n_pkts; k < n_mbufs; k++) { +- struct rte_mbuf *pkt_to_free; ++ for (k = n_pkts; k < n_mbufs; k++) { ++ struct rte_mbuf *pkt_to_free; + +- pkt_to_free = app.mbuf_tx[i].array[k]; +- rte_pktmbuf_free(pkt_to_free); ++ pkt_to_free = app.mbuf_tx[i].array[k]; ++ rte_pktmbuf_free(pkt_to_free); ++ } + } +- } + +- app.mbuf_tx[i].n_mbufs = 0; ++ app.mbuf_tx[i].n_mbufs = 0; ++ } + } + } diff --git a/dpdk/app/test-pmd/5tswap.c b/dpdk/app/test-pmd/5tswap.c index 629d3e0d31..f041a5e1d5 100644 --- a/dpdk/app/test-pmd/5tswap.c @@ -2873,10 +6355,23 @@ index 908bcb3f47..3e54724237 100644 port_flex_item_flush(portid_t port_id) { diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c -index 6e10afeedd..d9bf0eb3b3 100644 +index 6e10afeedd..43857c8008 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c -@@ -561,7 +561,7 @@ static void cmd_help_long_parsed(void *parsed_result, +@@ -504,6 +504,12 @@ static void cmd_help_long_parsed(void *parsed_result, + "mac_addr add port (port_id) vf (vf_id) (mac_address)\n" + " Add a MAC address for a VF on the port.\n\n" + ++ "mcast_addr add (port_id) (mcast_addr)\n" ++ " Add a multicast MAC addresses on port_id.\n\n" ++ ++ "mcast_addr remove (port_id) (mcast_addr)\n" ++ " Remove a multicast MAC address from port_id.\n\n" ++ + "set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n" + " Set the MAC address for a VF from the PF.\n\n" + +@@ -561,7 +567,7 @@ static void cmd_help_long_parsed(void *parsed_result, " Set the option to enable display of RX and TX bursts.\n" "set port (port_id) vf (vf_id) rx|tx on|off\n" @@ -2885,7 +6380,25 @@ index 6e10afeedd..d9bf0eb3b3 100644 "set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM" "|MPE) (on|off)\n" -@@ -2045,10 +2045,6 @@ cmd_config_mtu_parsed(void *parsed_result, +@@ -856,7 +862,7 @@ static void cmd_help_long_parsed(void *parsed_result, + "port config rx_offload vlan_strip|" + "ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|" + "outer_ipv4_cksum|macsec_strip|header_split|" +- "vlan_filter|vlan_extend|jumbo_frame|scatter|" ++ "vlan_filter|vlan_extend|scatter|" + "buffer_split|timestamp|security|keep_crc on|off\n" + " Enable or disable a per port Rx offloading" + " on all Rx queues of a port\n\n" +@@ -864,7 +870,7 @@ static void cmd_help_long_parsed(void *parsed_result, + "port (port_id) rxq (queue_id) rx_offload vlan_strip|" + "ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|" + "outer_ipv4_cksum|macsec_strip|header_split|" +- "vlan_filter|vlan_extend|jumbo_frame|scatter|" ++ "vlan_filter|vlan_extend|scatter|" + "buffer_split|timestamp|security|keep_crc on|off\n" + " Enable or disable a per queue Rx offloading" + " only on a specific Rx queue\n\n" +@@ -2045,10 +2051,6 @@ cmd_config_mtu_parsed(void *parsed_result, { struct cmd_config_mtu_result *res = parsed_result; @@ -2896,7 +6409,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 port_mtu_set(res->port_id, res->value); } -@@ -2651,8 +2647,10 @@ cmd_config_rxtx_queue_parsed(void *parsed_result, +@@ -2651,8 +2653,10 @@ cmd_config_rxtx_queue_parsed(void *parsed_result, __rte_unused void *data) { struct cmd_config_rxtx_queue *res = parsed_result; @@ -2907,7 +6420,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 int ret = 0; if (test_done == 0) { -@@ -2700,8 +2698,15 @@ cmd_config_rxtx_queue_parsed(void *parsed_result, +@@ -2700,8 +2704,15 @@ cmd_config_rxtx_queue_parsed(void *parsed_result, else ret = rte_eth_dev_tx_queue_stop(res->portid, res->qid); @@ -2924,7 +6437,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 } cmdline_parse_token_string_t cmd_config_rxtx_queue_port = -@@ -2770,11 +2775,11 @@ cmd_config_deferred_start_rxtx_queue_parsed(void *parsed_result, +@@ -2770,11 +2781,11 @@ cmd_config_deferred_start_rxtx_queue_parsed(void *parsed_result, ison = !strcmp(res->state, "on"); @@ -2940,7 +6453,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 needreconfig = 1; } -@@ -2892,7 +2897,7 @@ cmd_setup_rxtx_queue_parsed( +@@ -2892,7 +2903,7 @@ cmd_setup_rxtx_queue_parsed( res->qid, port->nb_rx_desc[res->qid], socket_id, @@ -2949,7 +6462,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 mp); if (ret) fprintf(stderr, "Failed to setup RX queue\n"); -@@ -2910,7 +2915,7 @@ cmd_setup_rxtx_queue_parsed( +@@ -2910,7 +2921,7 @@ cmd_setup_rxtx_queue_parsed( res->qid, port->nb_tx_desc[res->qid], socket_id, @@ -2958,7 +6471,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 if (ret) fprintf(stderr, "Failed to setup TX queue\n"); } -@@ -3120,7 +3125,7 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf, +@@ -3120,7 +3131,7 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf, return -1; } for (i = 0; i < ret; i++) @@ -2967,7 +6480,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 return 0; } -@@ -4686,7 +4691,7 @@ cmd_config_queue_tx_offloads(struct rte_port *port) +@@ -4686,7 +4697,7 @@ cmd_config_queue_tx_offloads(struct rte_port *port) /* Apply queue tx offloads configuration */ for (k = 0; k < port->dev_info.max_tx_queues; k++) @@ -2976,7 +6489,218 @@ index 6e10afeedd..d9bf0eb3b3 100644 port->dev_conf.txmode.offloads; } -@@ -5915,6 +5920,19 @@ static void cmd_set_bonding_mode_parsed(void *parsed_result, +@@ -4892,6 +4903,55 @@ cmdline_parse_inst_t cmd_csum_tunnel = { + }, + }; + ++struct cmd_csum_mac_swap_result { ++ cmdline_fixed_string_t csum; ++ cmdline_fixed_string_t parse; ++ cmdline_fixed_string_t onoff; ++ portid_t port_id; ++}; ++ ++static void ++cmd_csum_mac_swap_parsed(void *parsed_result, ++ __rte_unused struct cmdline *cl, ++ __rte_unused void *data) ++{ ++ struct cmd_csum_mac_swap_result *res = parsed_result; ++ ++ if (port_id_is_invalid(res->port_id, ENABLED_WARN)) ++ return; ++ if (strcmp(res->onoff, "on") == 0) ++ ports[res->port_id].fwd_mac_swap = 1; ++ else ++ ports[res->port_id].fwd_mac_swap = 0; ++} ++ ++static cmdline_parse_token_string_t cmd_csum_mac_swap_csum = ++ TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result, ++ csum, "csum"); ++static cmdline_parse_token_string_t cmd_csum_mac_swap_parse = ++ TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result, ++ parse, "mac-swap"); ++static cmdline_parse_token_string_t cmd_csum_mac_swap_onoff = ++ TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result, ++ onoff, "on#off"); ++static cmdline_parse_token_num_t cmd_csum_mac_swap_portid = ++ TOKEN_NUM_INITIALIZER(struct cmd_csum_mac_swap_result, ++ port_id, RTE_UINT16); ++ ++static cmdline_parse_inst_t cmd_csum_mac_swap = { ++ .f = cmd_csum_mac_swap_parsed, ++ .data = NULL, ++ .help_str = "csum mac-swap on|off : " ++ "Enable/Disable forward mac address swap", ++ .tokens = { ++ (void *)&cmd_csum_mac_swap_csum, ++ (void *)&cmd_csum_mac_swap_parse, ++ (void *)&cmd_csum_mac_swap_onoff, ++ (void *)&cmd_csum_mac_swap_portid, ++ NULL, ++ }, ++}; ++ + /* *** ENABLE HARDWARE SEGMENTATION IN TX NON-TUNNELED PACKETS *** */ + struct cmd_tso_set_result { + cmdline_fixed_string_t tso; +@@ -4941,19 +5001,6 @@ cmd_tso_set_parsed(void *parsed_result, + ports[res->port_id].tso_segsz); + } + cmd_config_queue_tx_offloads(&ports[res->port_id]); +- +- /* display warnings if configuration is not supported by the NIC */ +- ret = eth_dev_info_get_print_err(res->port_id, &dev_info); +- if (ret != 0) +- return; +- +- if ((ports[res->port_id].tso_segsz != 0) && +- (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) { +- fprintf(stderr, +- "Warning: TSO enabled but not supported by port %d\n", +- res->port_id); +- } +- + cmd_reconfig_device_queue(res->port_id, 1, 1); + } + +@@ -5011,39 +5058,27 @@ struct cmd_tunnel_tso_set_result { + portid_t port_id; + }; + +-static struct rte_eth_dev_info +-check_tunnel_tso_nic_support(portid_t port_id) ++static void ++check_tunnel_tso_nic_support(portid_t port_id, uint64_t tx_offload_capa) + { +- struct rte_eth_dev_info dev_info; +- +- if (eth_dev_info_get_print_err(port_id, &dev_info) != 0) +- return dev_info; +- +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) +- fprintf(stderr, +- "Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)) ++ printf("Warning: VXLAN TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) +- fprintf(stderr, +- "Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)) ++ printf("Warning: GRE TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO)) +- fprintf(stderr, +- "Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO)) ++ printf("Warning: IPIP TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) +- fprintf(stderr, +- "Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) ++ printf("Warning: GENEVE TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO)) +- fprintf(stderr, +- "Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO)) ++ printf("Warning: IP TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- if (!(dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) +- fprintf(stderr, +- "Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n", ++ if (!(tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)) ++ printf("Warning: UDP TUNNEL TSO not supported therefore not enabled for port %d\n", + port_id); +- return dev_info; + } + + static void +@@ -5053,6 +5088,13 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, + { + struct cmd_tunnel_tso_set_result *res = parsed_result; + struct rte_eth_dev_info dev_info; ++ uint64_t all_tunnel_tso = RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO; ++ int ret; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; +@@ -5064,28 +5106,19 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, + if (!strcmp(res->mode, "set")) + ports[res->port_id].tunnel_tso_segsz = res->tso_segsz; + +- dev_info = check_tunnel_tso_nic_support(res->port_id); + if (ports[res->port_id].tunnel_tso_segsz == 0) { +- ports[res->port_id].dev_conf.txmode.offloads &= +- ~(RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO); ++ ports[res->port_id].dev_conf.txmode.offloads &= ~all_tunnel_tso; + printf("TSO for tunneled packets is disabled\n"); + } else { +- uint64_t tso_offloads = (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO); ++ ret = eth_dev_info_get_print_err(res->port_id, &dev_info); ++ if (ret != 0) ++ return; + +- ports[res->port_id].dev_conf.txmode.offloads |= +- (tso_offloads & dev_info.tx_offload_capa); +- printf("TSO segment size for tunneled packets is %d\n", +- ports[res->port_id].tunnel_tso_segsz); ++ if ((all_tunnel_tso & dev_info.tx_offload_capa) == 0) { ++ fprintf(stderr, "Error: port=%u don't support tunnel TSO offloads.\n", ++ res->port_id); ++ return; ++ } + + /* Below conditions are needed to make it work: + * (1) tunnel TSO is supported by the NIC; +@@ -5098,14 +5131,23 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, + * is not necessary for IPv6 tunneled pkts because there's no + * checksum in IP header anymore. + */ +- +- if (!ports[res->port_id].parse_tunnel) ++ if (!ports[res->port_id].parse_tunnel) { + fprintf(stderr, +- "Warning: csum parse_tunnel must be set so that tunneled packets are recognized\n"); ++ "Error: csum parse_tunnel must be set so that tunneled packets are recognized\n"); ++ return; ++ } + if (!(ports[res->port_id].dev_conf.txmode.offloads & +- RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) ++ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { + fprintf(stderr, +- "Warning: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n"); ++ "Error: csum set outer-ip must be set to hw if outer L3 is IPv4; not necessary for IPv6\n"); ++ return; ++ } ++ ++ check_tunnel_tso_nic_support(res->port_id, dev_info.tx_offload_capa); ++ ports[res->port_id].dev_conf.txmode.offloads |= ++ (all_tunnel_tso & dev_info.tx_offload_capa); ++ printf("TSO segment size for tunneled packets is %d\n", ++ ports[res->port_id].tunnel_tso_segsz); + } + + cmd_config_queue_tx_offloads(&ports[res->port_id]); +@@ -5915,6 +5957,19 @@ static void cmd_set_bonding_mode_parsed(void *parsed_result, { struct cmd_set_bonding_mode_result *res = parsed_result; portid_t port_id = res->port_id; @@ -2996,7 +6720,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 /* Set the bonding mode for the relevant port. */ if (0 != rte_eth_bond_mode_set(port_id, res->value)) -@@ -6651,6 +6669,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result, +@@ -6651,6 +6706,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result, "Failed to enable promiscuous mode for port %u: %s - ignore\n", port_id, rte_strerror(-ret)); @@ -3004,7 +6728,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 ports[port_id].need_setup = 0; ports[port_id].port_status = RTE_PORT_STOPPED; } -@@ -8754,6 +8773,7 @@ static void cmd_quit_parsed(__rte_unused void *parsed_result, +@@ -8754,6 +8810,7 @@ static void cmd_quit_parsed(__rte_unused void *parsed_result, __rte_unused void *data) { cmdline_quit(cl); @@ -3012,7 +6736,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 } cmdline_parse_token_string_t cmd_quit_quit = -@@ -9273,6 +9293,7 @@ cmd_set_vf_rxmode_parsed(void *parsed_result, +@@ -9273,6 +9330,7 @@ cmd_set_vf_rxmode_parsed(void *parsed_result, } RTE_SET_USED(is_on); @@ -3020,7 +6744,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 #ifdef RTE_NET_IXGBE if (ret == -ENOTSUP) -@@ -16068,7 +16089,7 @@ cmd_rx_offload_get_configuration_parsed( +@@ -16068,7 +16126,7 @@ cmd_rx_offload_get_configuration_parsed( nb_rx_queues = dev_info.nb_rx_queues; for (q = 0; q < nb_rx_queues; q++) { @@ -3029,7 +6753,16 @@ index 6e10afeedd..d9bf0eb3b3 100644 printf(" Queue[%2d] :", q); print_rx_offloads(queue_offloads); printf("\n"); -@@ -16188,11 +16209,11 @@ cmd_config_per_port_rx_offload_parsed(void *parsed_result, +@@ -16121,7 +16179,7 @@ cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_offload = + (struct cmd_config_per_port_rx_offload_result, + offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#" + "qinq_strip#outer_ipv4_cksum#macsec_strip#" +- "header_split#vlan_filter#vlan_extend#jumbo_frame#" ++ "header_split#vlan_filter#vlan_extend#" + "scatter#buffer_split#timestamp#security#" + "keep_crc#rss_hash"); + cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off = +@@ -16188,11 +16246,11 @@ cmd_config_per_port_rx_offload_parsed(void *parsed_result, if (!strcmp(res->on_off, "on")) { port->dev_conf.rxmode.offloads |= single_offload; for (q = 0; q < nb_rx_queues; q++) @@ -3043,7 +6776,25 @@ index 6e10afeedd..d9bf0eb3b3 100644 } cmd_reconfig_device_queue(port_id, 1, 1); -@@ -16298,9 +16319,9 @@ cmd_config_per_queue_rx_offload_parsed(void *parsed_result, +@@ -16204,7 +16262,7 @@ cmdline_parse_inst_t cmd_config_per_port_rx_offload = { + .help_str = "port config rx_offload vlan_strip|ipv4_cksum|" + "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|" + "macsec_strip|header_split|vlan_filter|vlan_extend|" +- "jumbo_frame|scatter|buffer_split|timestamp|security|" ++ "scatter|buffer_split|timestamp|security|" + "keep_crc|rss_hash on|off", + .tokens = { + (void *)&cmd_config_per_port_rx_offload_result_port, +@@ -16253,7 +16311,7 @@ cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_offload = + (struct cmd_config_per_queue_rx_offload_result, + offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#" + "qinq_strip#outer_ipv4_cksum#macsec_strip#" +- "header_split#vlan_filter#vlan_extend#jumbo_frame#" ++ "header_split#vlan_filter#vlan_extend#" + "scatter#buffer_split#timestamp#security#keep_crc"); + cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off = + TOKEN_STRING_INITIALIZER +@@ -16298,9 +16356,9 @@ cmd_config_per_queue_rx_offload_parsed(void *parsed_result, } if (!strcmp(res->on_off, "on")) @@ -3055,7 +6806,16 @@ index 6e10afeedd..d9bf0eb3b3 100644 cmd_reconfig_device_queue(port_id, 1, 1); } -@@ -16487,7 +16508,7 @@ cmd_tx_offload_get_configuration_parsed( +@@ -16312,7 +16370,7 @@ cmdline_parse_inst_t cmd_config_per_queue_rx_offload = { + "vlan_strip|ipv4_cksum|" + "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|" + "macsec_strip|header_split|vlan_filter|vlan_extend|" +- "jumbo_frame|scatter|buffer_split|timestamp|security|" ++ "scatter|buffer_split|timestamp|security|" + "keep_crc on|off", + .tokens = { + (void *)&cmd_config_per_queue_rx_offload_result_port, +@@ -16487,7 +16545,7 @@ cmd_tx_offload_get_configuration_parsed( nb_tx_queues = dev_info.nb_tx_queues; for (q = 0; q < nb_tx_queues; q++) { @@ -3064,7 +6824,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 printf(" Queue[%2d] :", q); print_tx_offloads(queue_offloads); printf("\n"); -@@ -16611,11 +16632,11 @@ cmd_config_per_port_tx_offload_parsed(void *parsed_result, +@@ -16611,11 +16669,11 @@ cmd_config_per_port_tx_offload_parsed(void *parsed_result, if (!strcmp(res->on_off, "on")) { port->dev_conf.txmode.offloads |= single_offload; for (q = 0; q < nb_tx_queues; q++) @@ -3078,7 +6838,7 @@ index 6e10afeedd..d9bf0eb3b3 100644 } cmd_reconfig_device_queue(port_id, 1, 1); -@@ -16724,9 +16745,9 @@ cmd_config_per_queue_tx_offload_parsed(void *parsed_result, +@@ -16724,9 +16782,9 @@ cmd_config_per_queue_tx_offload_parsed(void *parsed_result, } if (!strcmp(res->on_off, "on")) @@ -3090,7 +6850,15 @@ index 6e10afeedd..d9bf0eb3b3 100644 cmd_reconfig_device_queue(port_id, 1, 1); } -@@ -17829,6 +17850,7 @@ cmdline_parse_ctx_t main_ctx[] = { +@@ -17740,6 +17798,7 @@ cmdline_parse_ctx_t main_ctx[] = { + (cmdline_parse_inst_t *)&cmd_csum_set, + (cmdline_parse_inst_t *)&cmd_csum_show, + (cmdline_parse_inst_t *)&cmd_csum_tunnel, ++ (cmdline_parse_inst_t *)&cmd_csum_mac_swap, + (cmdline_parse_inst_t *)&cmd_tso_set, + (cmdline_parse_inst_t *)&cmd_tso_show, + (cmdline_parse_inst_t *)&cmd_tunnel_tso_set, +@@ -17829,6 +17888,7 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_show_port_meter_cap, (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_srtcm, (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm, @@ -3227,7 +6995,7 @@ index bfbd43ca9b..c058b8946e 100644 (void *)&cmd_show_port_tm_level_cap_show, (void *)&cmd_show_port_tm_level_cap_port, diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c -index 1722d6c8f8..ad1b5f51d5 100644 +index 1722d6c8f8..fef7fa71e4 100644 --- a/dpdk/app/test-pmd/config.c +++ b/dpdk/app/test-pmd/config.c @@ -66,8 +66,6 @@ @@ -3445,7 +7213,20 @@ index 1722d6c8f8..ad1b5f51d5 100644 if (port->need_reconfig == 0) { diag = rte_eth_dev_set_mtu(port_id, mtu); if (diag != 0) { -@@ -1682,6 +1769,37 @@ port_action_handle_destroy(portid_t port_id, +@@ -1642,7 +1729,6 @@ port_action_handle_destroy(portid_t port_id, + { + struct rte_port *port; + struct port_indirect_action **tmp; +- uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || +@@ -1677,11 +1763,41 @@ port_action_handle_destroy(portid_t port_id, + } + if (i == n) + tmp = &(*tmp)->next; +- ++c; + } return ret; } @@ -3483,7 +7264,23 @@ index 1722d6c8f8..ad1b5f51d5 100644 /** Get indirect action by port + id */ struct rte_flow_action_handle * -@@ -2758,8 +2876,8 @@ rxtx_config_display(void) +@@ -2078,7 +2194,6 @@ port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) + { + struct rte_port *port; + struct port_flow **tmp; +- uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || +@@ -2111,7 +2226,6 @@ port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) + } + if (i == n) + tmp = &(*tmp)->next; +- ++c; + } + return ret; + } +@@ -2758,8 +2872,8 @@ rxtx_config_display(void) nb_fwd_lcores, nb_fwd_ports); RTE_ETH_FOREACH_DEV(pid) { @@ -3494,7 +7291,7 @@ index 1722d6c8f8..ad1b5f51d5 100644 uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; struct rte_eth_rxq_info rx_qinfo; -@@ -3017,7 +3135,7 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, +@@ -3017,7 +3131,7 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, fs = fwd_streams[sm_id]; port = &ports[fs->rx_port]; dev_info = &port->dev_info; @@ -3503,7 +7300,7 @@ index 1722d6c8f8..ad1b5f51d5 100644 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0 || rxq_conf->share_group == 0) /* Not shared rxq. */ -@@ -3077,7 +3195,7 @@ pkt_fwd_shared_rxq_check(void) +@@ -3077,7 +3191,7 @@ pkt_fwd_shared_rxq_check(void) fs->lcore = fwd_lcores[lc_id]; port = &ports[fs->rx_port]; dev_info = &port->dev_info; @@ -3512,7 +7309,7 @@ index 1722d6c8f8..ad1b5f51d5 100644 if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0 || rxq_conf->share_group == 0) /* Not shared rxq. */ -@@ -4719,6 +4837,8 @@ set_record_burst_stats(uint8_t on_off) +@@ -4719,6 +4833,8 @@ set_record_burst_stats(uint8_t on_off) record_burst_stats = on_off; } @@ -3521,7 +7318,7 @@ index 1722d6c8f8..ad1b5f51d5 100644 static char* flowtype_to_str(uint16_t flow_type) { -@@ -4762,8 +4882,6 @@ flowtype_to_str(uint16_t flow_type) +@@ -4762,8 +4878,6 @@ flowtype_to_str(uint16_t flow_type) return NULL; } @@ -3530,7 +7327,7 @@ index 1722d6c8f8..ad1b5f51d5 100644 static inline void print_fdir_mask(struct rte_eth_fdir_masks *mask) { -@@ -5185,6 +5303,25 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) +@@ -5185,6 +5299,25 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); } @@ -3557,7 +7354,7 @@ index 1722d6c8f8..ad1b5f51d5 100644 eth_port_multicast_addr_list_set(portid_t port_id) { diff --git a/dpdk/app/test-pmd/csumonly.c b/dpdk/app/test-pmd/csumonly.c -index 2aeea243b6..d661e21e02 100644 +index 2aeea243b6..5e494c4129 100644 --- a/dpdk/app/test-pmd/csumonly.c +++ b/dpdk/app/test-pmd/csumonly.c @@ -222,15 +222,14 @@ parse_gtp(struct rte_udp_hdr *udp_hdr, @@ -3579,7 +7376,14 @@ index 2aeea243b6..d661e21e02 100644 ip_ver = (ip_ver) & 0xf0; if (ip_ver == RTE_GTP_TYPE_IPV4) { -@@ -257,8 +256,7 @@ parse_gtp(struct rte_udp_hdr *udp_hdr, +@@ -251,14 +250,13 @@ parse_gtp(struct rte_udp_hdr *udp_hdr, + info->l4_proto = 0; + } + +- info->l2_len += RTE_ETHER_GTP_HLEN; ++ info->l2_len += gtp_len + sizeof(*udp_hdr); + } + /* Parse a vxlan header */ static void parse_vxlan(struct rte_udp_hdr *udp_hdr, @@ -3637,7 +7441,7 @@ index 2aeea243b6..d661e21e02 100644 * IP, UDP, TCP and SCTP flags always concern the inner layer. The * OUTER_IP is only useful for tunnel packets. */ -@@ -887,10 +906,6 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -887,10 +906,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) * and inner headers */ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); @@ -3645,10 +7449,16 @@ index 2aeea243b6..d661e21e02 100644 - ð_hdr->dst_addr); - rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, - ð_hdr->src_addr); ++ if (ports[fs->tx_port].fwd_mac_swap) { ++ rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ++ ð_hdr->dst_addr); ++ rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ++ ð_hdr->src_addr); ++ } parse_ethernet(eth_hdr, &info); l3_hdr = (char *)eth_hdr + info.l2_len; -@@ -912,8 +927,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -912,8 +933,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE; goto tunnel_update; } @@ -3658,7 +7468,7 @@ index 2aeea243b6..d661e21e02 100644 if (info.is_tunnel) { tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_VXLAN; -@@ -925,6 +939,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -925,6 +945,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) RTE_MBUF_F_TX_TUNNEL_GENEVE; goto tunnel_update; } @@ -3671,7 +7481,7 @@ index 2aeea243b6..d661e21e02 100644 } else if (info.l4_proto == IPPROTO_GRE) { struct simple_gre_hdr *gre_hdr; -@@ -1089,6 +1109,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -1089,6 +1115,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) fs->gro_times = 0; } } @@ -3680,7 +7490,7 @@ index 2aeea243b6..d661e21e02 100644 } #endif -@@ -1122,6 +1144,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) +@@ -1122,16 +1150,21 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) tx_pkts_burst = gso_segments; nb_rx = nb_segments; @@ -3689,7 +7499,50 @@ index 2aeea243b6..d661e21e02 100644 } else #endif tx_pkts_burst = pkts_burst; -@@ -1164,9 +1188,22 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) + + nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue, + tx_pkts_burst, nb_rx); +- if (nb_prep != nb_rx) ++ if (nb_prep != nb_rx) { + fprintf(stderr, + "Preparing packet burst to transmit failed: %s\n", + rte_strerror(rte_errno)); ++ fs->fwd_dropped += (nb_rx - nb_prep); ++ rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_prep], nb_rx - nb_prep); ++ } + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst, + nb_prep); +@@ -1139,12 +1172,12 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) + /* + * Retry if necessary + */ +- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { ++ if (unlikely(nb_tx < nb_prep) && fs->retry_enabled) { + retry = 0; +- while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { ++ while (nb_tx < nb_prep && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, +- &tx_pkts_burst[nb_tx], nb_rx - nb_tx); ++ &tx_pkts_burst[nb_tx], nb_prep - nb_tx); + } + } + fs->tx_packets += nb_tx; +@@ -1154,19 +1187,32 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) + fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum; + + inc_tx_burst_stats(fs, nb_tx); +- if (unlikely(nb_tx < nb_rx)) { +- fs->fwd_dropped += (nb_rx - nb_tx); ++ if (unlikely(nb_tx < nb_prep)) { ++ fs->fwd_dropped += (nb_prep - nb_tx); + do { + rte_pktmbuf_free(tx_pkts_burst[nb_tx]); +- } while (++nb_tx < nb_rx); ++ } while (++nb_tx < nb_prep); + } + get_end_cycles(fs, start_tsc); } @@ -3767,9 +7620,24 @@ index 99c94cb282..066f2a3ab7 100644 .packet_fwd = reply_to_icmp_echo_rqsts, }; diff --git a/dpdk/app/test-pmd/ieee1588fwd.c b/dpdk/app/test-pmd/ieee1588fwd.c -index 9ff817aa68..fc4e2d014c 100644 +index 9ff817aa68..896d5ef26a 100644 --- a/dpdk/app/test-pmd/ieee1588fwd.c +++ b/dpdk/app/test-pmd/ieee1588fwd.c +@@ -184,13 +184,13 @@ ieee1588_packet_fwd(struct fwd_stream *fs) + + /* Forward PTP packet with hardware TX timestamp */ + mb->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST; +- fs->tx_packets += 1; + if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) { + printf("Port %u sent PTP packet dropped\n", fs->rx_port); + fs->fwd_dropped += 1; + rte_pktmbuf_free(mb); + return; + } ++ fs->tx_packets += 1; + + /* + * Check the TX timestamp. @@ -211,9 +211,22 @@ port_ieee1588_fwd_end(portid_t pi) rte_eth_timesync_disable(pi); } @@ -3874,11 +7742,46 @@ index 4627ff83e9..acb0fd7fb4 100644 + .stream_init = stream_init_mac_swap, .packet_fwd = pkt_burst_mac_swap, }; +diff --git a/dpdk/app/test-pmd/meson.build b/dpdk/app/test-pmd/meson.build +index 43130c8856..99bf383971 100644 +--- a/dpdk/app/test-pmd/meson.build ++++ b/dpdk/app/test-pmd/meson.build +@@ -68,6 +68,7 @@ if dpdk_conf.has('RTE_NET_I40E') + deps += 'net_i40e' + endif + if dpdk_conf.has('RTE_NET_IXGBE') ++ cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS'] + deps += 'net_ixgbe' + endif + if dpdk_conf.has('RTE_NET_DPAA') diff --git a/dpdk/app/test-pmd/noisy_vnf.c b/dpdk/app/test-pmd/noisy_vnf.c -index e4434bea95..a92e810190 100644 +index e4434bea95..1be5f77efe 100644 --- a/dpdk/app/test-pmd/noisy_vnf.c +++ b/dpdk/app/test-pmd/noisy_vnf.c -@@ -277,9 +277,22 @@ noisy_fwd_begin(portid_t pi) +@@ -56,8 +56,8 @@ do_write(char *vnf_mem) + static inline void + do_read(char *vnf_mem) + { ++ uint64_t r __rte_unused; + uint64_t i = rte_rand(); +- uint64_t r; + + r = vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) / + RTE_CACHE_LINE_SIZE)]; +@@ -213,9 +213,10 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs) + sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + tmp_pkts, nb_deqd); + if (unlikely(sent < nb_deqd) && fs->retry_enabled) +- nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs); +- inc_tx_burst_stats(fs, nb_tx); ++ sent += do_retry(nb_deqd, sent, tmp_pkts, fs); ++ inc_tx_burst_stats(fs, sent); + fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent); ++ nb_tx += sent; + ncf->prev_time = rte_get_timer_cycles(); + } + } +@@ -277,9 +278,22 @@ noisy_fwd_begin(portid_t pi) return 0; } @@ -3984,7 +7887,7 @@ index da54a383fd..2e9047804b 100644 .packet_fwd = shared_rxq_fwd, }; diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c -index 55eb293cc0..3699c5fd64 100644 +index 55eb293cc0..5f34641e90 100644 --- a/dpdk/app/test-pmd/testpmd.c +++ b/dpdk/app/test-pmd/testpmd.c @@ -66,6 +66,9 @@ @@ -4012,10 +7915,12 @@ index 55eb293cc0..3699c5fd64 100644 uint16_t verbose_level = 0; /**< Silent by default. */ int testpmd_logtype; /**< Log type for testpmd logs */ -@@ -220,6 +229,7 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */ +@@ -219,7 +228,8 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */ + * In container, it cannot terminate the process which running with 'stats-period' * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. */ - uint8_t f_quit; +-uint8_t f_quit; ++static volatile uint8_t f_quit; +uint8_t cl_quit; /* Quit testpmd from cmdline. */ /* @@ -4163,7 +8068,12 @@ index 55eb293cc0..3699c5fd64 100644 int i; memset(ports_stats, 0, sizeof(ports_stats)); -@@ -2009,7 +2080,13 @@ fwd_stats_display(void) +@@ -2006,10 +2077,18 @@ fwd_stats_display(void) + fwd_cycles += fs->core_cycles; + } + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { ++ uint64_t tx_dropped = 0; ++ pt_id = fwd_ports_ids[i]; port = &ports[pt_id]; @@ -4178,7 +8088,30 @@ index 55eb293cc0..3699c5fd64 100644 stats.ipackets -= port->stats.ipackets; stats.opackets -= port->stats.opackets; stats.ibytes -= port->stats.ibytes; -@@ -2104,11 +2181,16 @@ fwd_stats_reset(void) +@@ -2021,8 +2100,9 @@ fwd_stats_display(void) + total_recv += stats.ipackets; + total_xmit += stats.opackets; + total_rx_dropped += stats.imissed; +- total_tx_dropped += ports_stats[pt_id].tx_dropped; +- total_tx_dropped += stats.oerrors; ++ tx_dropped += ports_stats[pt_id].tx_dropped; ++ tx_dropped += stats.oerrors; ++ total_tx_dropped += tx_dropped; + total_rx_nombuf += stats.rx_nombuf; + + printf("\n %s Forward statistics for port %-2d %s\n", +@@ -2049,8 +2129,8 @@ fwd_stats_display(void) + + printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 + "TX-total: %-"PRIu64"\n", +- stats.opackets, ports_stats[pt_id].tx_dropped, +- stats.opackets + ports_stats[pt_id].tx_dropped); ++ stats.opackets, tx_dropped, ++ stats.opackets + tx_dropped); + + if (record_burst_stats) { + if (ports_stats[pt_id].rx_stream) +@@ -2104,11 +2184,16 @@ fwd_stats_reset(void) { streamid_t sm_id; portid_t pt_id; @@ -4196,7 +8129,7 @@ index 55eb293cc0..3699c5fd64 100644 } for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { struct fwd_stream *fs = fwd_streams[sm_id]; -@@ -2152,6 +2234,12 @@ flush_fwd_rx_queues(void) +@@ -2152,6 +2237,12 @@ flush_fwd_rx_queues(void) for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { for (rxq = 0; rxq < nb_rxq; rxq++) { port_id = fwd_ports_ids[rxp]; @@ -4209,7 +8142,7 @@ index 55eb293cc0..3699c5fd64 100644 /** * testpmd can stuck in the below do while loop * if rte_eth_rx_burst() always returns nonzero -@@ -2197,7 +2285,8 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) +@@ -2197,7 +2288,8 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) nb_fs = fc->stream_nb; do { for (sm_id = 0; sm_id < nb_fs; sm_id++) @@ -4219,7 +8152,95 @@ index 55eb293cc0..3699c5fd64 100644 #ifdef RTE_LIB_BITRATESTATS if (bitrate_enabled != 0 && bitrate_lcore_id == rte_lcore_id()) { -@@ -2279,6 +2368,7 @@ start_packet_forwarding(int with_tx_first) +@@ -2271,6 +2363,87 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) + } + } + ++static void ++update_rx_queue_state(uint16_t port_id, uint16_t queue_id) ++{ ++ struct rte_eth_rxq_info rx_qinfo; ++ int32_t rc; ++ ++ rc = rte_eth_rx_queue_info_get(port_id, ++ queue_id, &rx_qinfo); ++ if (rc == 0) { ++ ports[port_id].rxq[queue_id].state = ++ rx_qinfo.queue_state; ++ } else if (rc == -ENOTSUP) { ++ /* ++ * Do not change the rxq state for primary process ++ * to ensure that the PMDs do not implement ++ * rte_eth_rx_queue_info_get can forward as before. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) ++ return; ++ /* ++ * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED ++ * to ensure that the PMDs do not implement ++ * rte_eth_rx_queue_info_get can forward. ++ */ ++ ports[port_id].rxq[queue_id].state = ++ RTE_ETH_QUEUE_STATE_STARTED; ++ } else { ++ TESTPMD_LOG(WARNING, ++ "Failed to get rx queue info\n"); ++ } ++} ++ ++static void ++update_tx_queue_state(uint16_t port_id, uint16_t queue_id) ++{ ++ struct rte_eth_txq_info tx_qinfo; ++ int32_t rc; ++ ++ rc = rte_eth_tx_queue_info_get(port_id, ++ queue_id, &tx_qinfo); ++ if (rc == 0) { ++ ports[port_id].txq[queue_id].state = ++ tx_qinfo.queue_state; ++ } else if (rc == -ENOTSUP) { ++ /* ++ * Do not change the txq state for primary process ++ * to ensure that the PMDs do not implement ++ * rte_eth_tx_queue_info_get can forward as before. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) ++ return; ++ /* ++ * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED ++ * to ensure that the PMDs do not implement ++ * rte_eth_tx_queue_info_get can forward. ++ */ ++ ports[port_id].txq[queue_id].state = ++ RTE_ETH_QUEUE_STATE_STARTED; ++ } else { ++ TESTPMD_LOG(WARNING, ++ "Failed to get tx queue info\n"); ++ } ++} ++ ++static void ++update_queue_state(portid_t pid) ++{ ++ portid_t pi; ++ queueid_t qi; ++ ++ RTE_ETH_FOREACH_DEV(pi) { ++ if (pid != pi && pid != (portid_t)RTE_PORT_ALL) ++ continue; ++ ++ for (qi = 0; qi < nb_rxq; qi++) ++ update_rx_queue_state(pi, qi); ++ for (qi = 0; qi < nb_txq; qi++) ++ update_tx_queue_state(pi, qi); ++ } ++} ++ + /* + * Launch packet forwarding configuration. + */ +@@ -2279,6 +2452,7 @@ start_packet_forwarding(int with_tx_first) { port_fwd_begin_t port_fwd_begin; port_fwd_end_t port_fwd_end; @@ -4227,18 +8248,20 @@ index 55eb293cc0..3699c5fd64 100644 unsigned int i; if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) -@@ -2309,6 +2399,10 @@ start_packet_forwarding(int with_tx_first) +@@ -2309,6 +2483,12 @@ start_packet_forwarding(int with_tx_first) if (!pkt_fwd_shared_rxq_check()) return; -+ if (stream_init != NULL) ++ if (stream_init != NULL) { ++ update_queue_state(RTE_PORT_ALL); + for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) + stream_init(fwd_streams[i]); ++ } + port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; if (port_fwd_begin != NULL) { for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { -@@ -2570,7 +2664,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, +@@ -2570,7 +2750,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, socket_id, rx_conf, mp); @@ -4247,7 +8270,7 @@ index 55eb293cc0..3699c5fd64 100644 } for (i = 0; i < rx_pkt_nb_segs; i++) { struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; -@@ -2579,7 +2673,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, +@@ -2579,7 +2759,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, * Use last valid pool for the segments with number * exceeding the pool index. */ @@ -4256,7 +8279,7 @@ index 55eb293cc0..3699c5fd64 100644 mpx = mbuf_pool_find(socket_id, mp_n); /* Handle zero as mbuf data buffer size. */ rx_seg->length = rx_pkt_seg_lengths[i] ? -@@ -2595,6 +2689,10 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, +@@ -2595,6 +2775,10 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, socket_id, rx_conf, NULL); rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; @@ -4267,10 +8290,30 @@ index 55eb293cc0..3699c5fd64 100644 return ret; } -@@ -2722,6 +2820,13 @@ start_port(portid_t pid) +@@ -2703,7 +2887,7 @@ fill_xstats_display_info(void) + int + start_port(portid_t pid) + { +- int diag, need_check_link_status = -1; ++ int diag; + portid_t pi; + portid_t p_pi = RTE_MAX_ETHPORTS; + portid_t pl[RTE_MAX_ETHPORTS]; +@@ -2714,6 +2898,9 @@ start_port(portid_t pid) + queueid_t qi; + struct rte_port *port; + struct rte_eth_hairpin_cap cap; ++ bool at_least_one_port_exist = false; ++ bool all_ports_already_started = true; ++ bool at_least_one_port_successfully_started = false; + + if (port_id_is_invalid(pid, ENABLED_WARN)) + return 0; +@@ -2722,11 +2909,20 @@ start_port(portid_t pid) if (pid != pi && pid != (portid_t)RTE_PORT_ALL) continue; +- need_check_link_status = 0; + if (port_is_bonding_slave(pi)) { + fprintf(stderr, + "Please remove port %d from bonded device.\n", @@ -4278,10 +8321,19 @@ index 55eb293cc0..3699c5fd64 100644 + continue; + } + - need_check_link_status = 0; ++ at_least_one_port_exist = true; ++ port = &ports[pi]; - if (port->port_status == RTE_PORT_STOPPED) -@@ -2790,7 +2895,7 @@ start_port(portid_t pid) +- if (port->port_status == RTE_PORT_STOPPED) ++ if (port->port_status == RTE_PORT_STOPPED) { + port->port_status = RTE_PORT_HANDLING; +- else { ++ all_ports_already_started = false; ++ } else { + fprintf(stderr, "Port %d is now not stopped\n", pi); + continue; + } +@@ -2790,7 +2986,7 @@ start_port(portid_t pid) for (k = 0; k < port->dev_info.max_rx_queues; k++) @@ -4290,7 +8342,7 @@ index 55eb293cc0..3699c5fd64 100644 dev_conf.rxmode.offloads; } /* Apply Tx offloads configuration */ -@@ -2801,7 +2906,7 @@ start_port(portid_t pid) +@@ -2801,7 +2997,7 @@ start_port(portid_t pid) for (k = 0; k < port->dev_info.max_tx_queues; k++) @@ -4299,7 +8351,7 @@ index 55eb293cc0..3699c5fd64 100644 dev_conf.txmode.offloads; } } -@@ -2809,20 +2914,28 @@ start_port(portid_t pid) +@@ -2809,20 +3005,28 @@ start_port(portid_t pid) port->need_reconfig_queues = 0; /* setup tx queues */ for (qi = 0; qi < nb_txq; qi++) { @@ -4331,7 +8383,7 @@ index 55eb293cc0..3699c5fd64 100644 /* Fail to setup tx queue, return */ if (port->port_status == RTE_PORT_HANDLING) -@@ -2855,7 +2968,7 @@ start_port(portid_t pid) +@@ -2855,7 +3059,7 @@ start_port(portid_t pid) diag = rx_queue_setup(pi, qi, port->nb_rx_desc[qi], rxring_numa[pi], @@ -4340,7 +8392,7 @@ index 55eb293cc0..3699c5fd64 100644 mp); } else { struct rte_mempool *mp = -@@ -2870,7 +2983,7 @@ start_port(portid_t pid) +@@ -2870,7 +3074,7 @@ start_port(portid_t pid) diag = rx_queue_setup(pi, qi, port->nb_rx_desc[qi], port->socket_id, @@ -4349,7 +8401,28 @@ index 55eb293cc0..3699c5fd64 100644 mp); } if (diag == 0) -@@ -3090,11 +3203,48 @@ remove_invalid_ports(void) +@@ -2933,15 +3137,16 @@ start_port(portid_t pid) + printf("Port %d: " RTE_ETHER_ADDR_PRT_FMT "\n", pi, + RTE_ETHER_ADDR_BYTES(&port->eth_addr)); + +- /* at least one port started, need checking link status */ +- need_check_link_status = 1; ++ at_least_one_port_successfully_started = true; + + pl[cfg_pi++] = pi; + } + +- if (need_check_link_status == 1 && !no_link_check) ++ update_queue_state(pi); ++ ++ if (at_least_one_port_successfully_started && !no_link_check) + check_all_ports_link_status(RTE_PORT_ALL); +- else if (need_check_link_status == 0) ++ else if (at_least_one_port_exist & all_ports_already_started) + fprintf(stderr, "Please stop the ports first\n"); + + if (hairpin_mode & 0xf) { +@@ -3090,11 +3295,48 @@ remove_invalid_ports(void) nb_cfg_ports = nb_fwd_ports; } @@ -4398,7 +8471,7 @@ index 55eb293cc0..3699c5fd64 100644 if (port_id_is_invalid(pid, ENABLED_WARN)) return; -@@ -3126,9 +3276,20 @@ close_port(portid_t pid) +@@ -3126,9 +3368,20 @@ close_port(portid_t pid) } if (is_proc_primary()) { @@ -4421,7 +8494,32 @@ index 55eb293cc0..3699c5fd64 100644 } free_xstats_display_info(pi); -@@ -3272,7 +3433,7 @@ detach_device(struct rte_device *dev) +@@ -3175,14 +3428,16 @@ reset_port(portid_t pid) + continue; + } + +- diag = rte_eth_dev_reset(pi); +- if (diag == 0) { +- port = &ports[pi]; +- port->need_reconfig = 1; +- port->need_reconfig_queues = 1; +- } else { +- fprintf(stderr, "Failed to reset port %d. diag=%d\n", +- pi, diag); ++ if (is_proc_primary()) { ++ diag = rte_eth_dev_reset(pi); ++ if (diag == 0) { ++ port = &ports[pi]; ++ port->need_reconfig = 1; ++ port->need_reconfig_queues = 1; ++ } else { ++ fprintf(stderr, "Failed to reset port %d. diag=%d\n", ++ pi, diag); ++ } + } + } + +@@ -3272,7 +3527,7 @@ detach_device(struct rte_device *dev) sibling); return; } @@ -4430,7 +8528,7 @@ index 55eb293cc0..3699c5fd64 100644 } } -@@ -3339,7 +3500,7 @@ detach_devargs(char *identifier) +@@ -3339,7 +3594,7 @@ detach_devargs(char *identifier) rte_devargs_reset(&da); return; } @@ -4439,7 +8537,7 @@ index 55eb293cc0..3699c5fd64 100644 } } -@@ -3645,59 +3806,59 @@ rxtx_port_config(portid_t pid) +@@ -3645,59 +3900,59 @@ rxtx_port_config(portid_t pid) struct rte_port *port = &ports[pid]; for (qid = 0; qid < nb_rxq; qid++) { @@ -4517,7 +8615,7 @@ index 55eb293cc0..3699c5fd64 100644 port->nb_tx_desc[qid] = nb_txd; } -@@ -3778,7 +3939,7 @@ init_port_config(void) +@@ -3778,7 +4033,7 @@ init_port_config(void) for (i = 0; i < port->dev_info.nb_rx_queues; i++) @@ -4526,7 +8624,7 @@ index 55eb293cc0..3699c5fd64 100644 ~RTE_ETH_RX_OFFLOAD_RSS_HASH; } } -@@ -3952,7 +4113,7 @@ init_port_dcb_config(portid_t pid, +@@ -3952,7 +4207,7 @@ init_port_dcb_config(portid_t pid, if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; for (i = 0; i < nb_rxq; i++) @@ -4535,8 +8633,58 @@ index 55eb293cc0..3699c5fd64 100644 ~RTE_ETH_RX_OFFLOAD_RSS_HASH; } +@@ -4034,10 +4289,11 @@ init_port(void) + "rte_zmalloc(%d struct rte_port) failed\n", + RTE_MAX_ETHPORTS); + } +- for (i = 0; i < RTE_MAX_ETHPORTS; i++) ++ for (i = 0; i < RTE_MAX_ETHPORTS; i++) { ++ ports[i].fwd_mac_swap = 1; + ports[i].xstats_info.allocated = false; +- for (i = 0; i < RTE_MAX_ETHPORTS; i++) + LIST_INIT(&ports[i].flow_tunnel_list); ++ } + /* Initialize ports NUMA structures */ + memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); + memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); +@@ -4114,6 +4370,9 @@ main(int argc, char** argv) + rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", + rte_strerror(rte_errno)); + ++ /* allocate port structures, and init them */ ++ init_port(); ++ + ret = register_eth_event_callback(); + if (ret != 0) + rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); +@@ -4132,9 +4391,6 @@ main(int argc, char** argv) + if (nb_ports == 0) + TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); + +- /* allocate port structures, and init them */ +- init_port(); +- + set_def_fwd_config(); + if (nb_lcores == 0) + rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" +@@ -4212,8 +4468,13 @@ main(int argc, char** argv) + } + } + +- if (!no_device_start && start_port(RTE_PORT_ALL) != 0) +- rte_exit(EXIT_FAILURE, "Start ports failed\n"); ++ if (!no_device_start && start_port(RTE_PORT_ALL) != 0) { ++ if (!interactive) { ++ rte_eal_cleanup(); ++ rte_exit(EXIT_FAILURE, "Start ports failed\n"); ++ } ++ fprintf(stderr, "Start ports failed\n"); ++ } + + /* set all ports to promiscuous mode by default */ + RTE_ETH_FOREACH_DEV(port_id) { diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h -index 2149ecd93a..18abee907c 100644 +index 2149ecd93a..e53320e630 100644 --- a/dpdk/app/test-pmd/testpmd.h +++ b/dpdk/app/test-pmd/testpmd.h @@ -32,6 +32,8 @@ @@ -4592,7 +8740,7 @@ index 2149ecd93a..18abee907c 100644 /** * The data structure associated with each port. */ -@@ -238,11 +256,12 @@ struct rte_port { +@@ -238,11 +256,13 @@ struct rte_port { uint8_t dcb_flag; /**< enable dcb */ uint16_t nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */ uint16_t nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */ @@ -4604,11 +8752,12 @@ index 2149ecd93a..18abee907c 100644 uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */ - uint8_t slave_flag; /**< bonding slave port */ + uint8_t slave_flag : 1, /**< bonding slave port */ -+ bond_flag : 1; /**< port is bond device */ ++ bond_flag : 1, /**< port is bond device */ ++ fwd_mac_swap : 1; /**< swap packet MAC before forward */ struct port_flow *flow_list; /**< Associated flows. */ struct port_indirect_action *actions_list; /**< Associated indirect actions. */ -@@ -296,12 +315,14 @@ struct fwd_lcore { +@@ -296,12 +316,14 @@ struct fwd_lcore { */ typedef int (*port_fwd_begin_t)(portid_t pi); typedef void (*port_fwd_end_t)(portid_t pi); @@ -4623,7 +8772,7 @@ index 2149ecd93a..18abee907c 100644 packet_fwd_t packet_fwd; /**< Mandatory. */ }; -@@ -880,6 +901,7 @@ int port_action_handle_create(portid_t port_id, uint32_t id, +@@ -880,6 +902,7 @@ int port_action_handle_create(portid_t port_id, uint32_t id, const struct rte_flow_action *action); int port_action_handle_destroy(portid_t port_id, uint32_t n, const uint32_t *action); @@ -4631,7 +8780,7 @@ index 2149ecd93a..18abee907c 100644 struct rte_flow_action_handle *port_action_handle_get_by_id(portid_t port_id, uint32_t id); int port_action_handle_update(portid_t port_id, uint32_t id, -@@ -897,6 +919,7 @@ int port_flow_create(portid_t port_id, +@@ -897,6 +920,7 @@ int port_flow_create(portid_t port_id, int port_action_handle_query(portid_t port_id, uint32_t id); void update_age_action_context(const struct rte_flow_action *actions, struct port_flow *pf); @@ -4639,6 +8788,14 @@ index 2149ecd93a..18abee907c 100644 int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule); int port_flow_flush(portid_t port_id); int port_flow_dump(portid_t port_id, bool dump_all, +@@ -1091,7 +1115,6 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue, + void add_tx_dynf_callback(portid_t portid); + void remove_tx_dynf_callback(portid_t portid); + int update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen); +-int update_jumbo_frame_offload(portid_t portid); + void flex_item_create(portid_t port_id, uint16_t flex_id, const char *filename); + void flex_item_destroy(portid_t port_id, uint16_t flex_id); + void port_flex_item_flush(portid_t port_id); @@ -1101,6 +1124,8 @@ extern int flow_parse(const char *src, void *result, unsigned int size, struct rte_flow_item **pattern, struct rte_flow_action **actions); @@ -4879,10 +9036,26 @@ index 8e665df73c..ca0b0a5d6a 100644 rte_exit(EXIT_FAILURE, "Failed to distribute queues to lcores!\n"); ret = init_port(&nb_max_payload, rules_file, diff --git a/dpdk/app/test/meson.build b/dpdk/app/test/meson.build -index 2b480adfba..c13776c0b9 100644 +index 2b480adfba..845ce96d0e 100644 --- a/dpdk/app/test/meson.build +++ b/dpdk/app/test/meson.build -@@ -288,8 +288,6 @@ fast_tests = [ +@@ -227,6 +227,7 @@ fast_tests = [ + ['fib6_autotest', true], + ['func_reentrancy_autotest', false], + ['flow_classify_autotest', false], ++ ['graph_autotest', true], + ['hash_autotest', true], + ['interrupt_autotest', true], + ['ipfrag_autotest', false], +@@ -243,6 +244,7 @@ fast_tests = [ + ['memzone_autotest', false], + ['meter_autotest', true], + ['multiprocess_autotest', false], ++ ['node_list_dump', true], + ['per_lcore_autotest', true], + ['pflock_autotest', true], + ['prefetch_autotest', true], +@@ -288,8 +290,6 @@ fast_tests = [ # Tests known to have issues or which don't belong in other tests lists. extra_test_names = [ 'alarm_autotest', # ee00af60170b ("test: remove strict timing requirements some tests") @@ -4891,7 +9064,15 @@ index 2b480adfba..c13776c0b9 100644 'red_autotest', # https://bugs.dpdk.org/show_bug.cgi?id=826 ] -@@ -492,7 +490,7 @@ dpdk_test = executable('dpdk-test', +@@ -331,6 +331,7 @@ perf_test_names = [ + 'trace_perf_autotest', + 'ipsec_perf_autotest', + 'thash_perf_autotest', ++ 'graph_perf_autotest', + ] + + driver_test_names = [ +@@ -492,7 +493,7 @@ dpdk_test = executable('dpdk-test', driver_install_path), install: true) @@ -4900,6 +9081,82 @@ index 2b480adfba..c13776c0b9 100644 message('hugepage availability: @0@'.format(has_hugepage)) # some perf tests (eg: memcpy perf autotest)take very long +diff --git a/dpdk/app/test/packet_burst_generator.c b/dpdk/app/test/packet_burst_generator.c +index 8ac24577ba..7556bb5512 100644 +--- a/dpdk/app/test/packet_burst_generator.c ++++ b/dpdk/app/test/packet_burst_generator.c +@@ -262,11 +262,11 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst, + void *ip_hdr, uint8_t ipv4, struct rte_udp_hdr *udp_hdr, + int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs) + { +- int i, nb_pkt = 0; +- size_t eth_hdr_size; +- ++ const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs; + struct rte_mbuf *pkt_seg; + struct rte_mbuf *pkt; ++ size_t eth_hdr_size; ++ int i, nb_pkt = 0; + + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_pktmbuf_alloc(mp); +@@ -277,7 +277,7 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst, + break; + } + +- pkt->data_len = pkt_len; ++ pkt->data_len = pkt_seg_data_len; + pkt_seg = pkt; + for (i = 1; i < nb_pkt_segs; i++) { + pkt_seg->next = rte_pktmbuf_alloc(mp); +@@ -287,7 +287,10 @@ generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst, + goto nomore_mbuf; + } + pkt_seg = pkt_seg->next; +- pkt_seg->data_len = pkt_len; ++ if (i != nb_pkt_segs - 1) ++ pkt_seg->data_len = pkt_seg_data_len; ++ else ++ pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs; + } + pkt_seg->next = NULL; /* Last segment of packet. */ + +@@ -343,11 +346,11 @@ generate_packet_burst_proto(struct rte_mempool *mp, + uint8_t ipv4, uint8_t proto, void *proto_hdr, + int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs) + { +- int i, nb_pkt = 0; +- size_t eth_hdr_size; +- ++ const uint8_t pkt_seg_data_len = pkt_len / nb_pkt_segs; + struct rte_mbuf *pkt_seg; + struct rte_mbuf *pkt; ++ size_t eth_hdr_size; ++ int i, nb_pkt = 0; + + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_pktmbuf_alloc(mp); +@@ -358,7 +361,7 @@ generate_packet_burst_proto(struct rte_mempool *mp, + break; + } + +- pkt->data_len = pkt_len; ++ pkt->data_len = pkt_seg_data_len; + pkt_seg = pkt; + for (i = 1; i < nb_pkt_segs; i++) { + pkt_seg->next = rte_pktmbuf_alloc(mp); +@@ -368,7 +371,10 @@ generate_packet_burst_proto(struct rte_mempool *mp, + goto nomore_mbuf; + } + pkt_seg = pkt_seg->next; +- pkt_seg->data_len = pkt_len; ++ if (i != nb_pkt_segs - 1) ++ pkt_seg->data_len = pkt_seg_data_len; ++ else ++ pkt_seg->data_len = pkt_seg_data_len + pkt_len % nb_pkt_segs; + } + pkt_seg->next = NULL; /* Last segment of packet. */ + diff --git a/dpdk/app/test/test_barrier.c b/dpdk/app/test/test_barrier.c index 6d6d48749c..ec69af25bf 100644 --- a/dpdk/app/test/test_barrier.c @@ -4974,6 +9231,82 @@ index 46bcb51f86..d70bb0fe85 100644 #endif /* RTE_HAS_LIBPCAP */ + +REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert); +diff --git a/dpdk/app/test/test_common.c b/dpdk/app/test/test_common.c +index ef177cecb1..f89e1eb7ee 100644 +--- a/dpdk/app/test/test_common.c ++++ b/dpdk/app/test/test_common.c +@@ -25,31 +25,53 @@ test_macros(int __rte_unused unused_parm) + #define SMALLER 0x1000U + #define BIGGER 0x2000U + #define PTR_DIFF BIGGER - SMALLER +-#define FAIL_MACRO(x)\ +- {printf(#x "() test failed!\n");\ +- return -1;} + + uintptr_t unused = 0; + unsigned int smaller = SMALLER, bigger = BIGGER; ++ uint32_t arr[3]; + + RTE_SET_USED(unused); + + RTE_SWAP(smaller, bigger); +- if (smaller != BIGGER && bigger != SMALLER) +- FAIL_MACRO(RTE_SWAP); +- if ((uintptr_t)RTE_PTR_ADD(SMALLER, PTR_DIFF) != BIGGER) +- FAIL_MACRO(RTE_PTR_ADD); +- if ((uintptr_t)RTE_PTR_SUB(BIGGER, PTR_DIFF) != SMALLER) +- FAIL_MACRO(RTE_PTR_SUB); +- if (RTE_PTR_DIFF(BIGGER, SMALLER) != PTR_DIFF) +- FAIL_MACRO(RTE_PTR_DIFF); +- if (RTE_MAX(SMALLER, BIGGER) != BIGGER) +- FAIL_MACRO(RTE_MAX); +- if (RTE_MIN(SMALLER, BIGGER) != SMALLER) +- FAIL_MACRO(RTE_MIN); +- +- if (strncmp(RTE_STR(test), "test", sizeof("test"))) +- FAIL_MACRO(RTE_STR); ++ RTE_TEST_ASSERT(smaller == BIGGER && bigger == SMALLER, ++ "RTE_SWAP"); ++ RTE_TEST_ASSERT_EQUAL((uintptr_t)RTE_PTR_ADD(SMALLER, PTR_DIFF), BIGGER, ++ "RTE_PTR_ADD"); ++ RTE_TEST_ASSERT_EQUAL((uintptr_t)RTE_PTR_SUB(BIGGER, PTR_DIFF), SMALLER, ++ "RTE_PTR_SUB"); ++ RTE_TEST_ASSERT_EQUAL(RTE_PTR_DIFF(BIGGER, SMALLER), PTR_DIFF, ++ "RTE_PTR_DIFF"); ++ RTE_TEST_ASSERT_EQUAL(RTE_MAX(SMALLER, BIGGER), BIGGER, ++ "RTE_MAX"); ++ RTE_TEST_ASSERT_EQUAL(RTE_MIN(SMALLER, BIGGER), SMALLER, ++ "RTE_MIN"); ++ ++ RTE_TEST_ASSERT_EQUAL(RTE_PTR_ADD(arr + 1, sizeof(arr[0])), &arr[2], ++ "RTE_PTR_ADD(expr, x)"); ++ RTE_TEST_ASSERT_EQUAL(RTE_PTR_SUB(arr + 1, sizeof(arr[0])), &arr[0], ++ "RTE_PTR_SUB(expr, x)"); ++ RTE_TEST_ASSERT_EQUAL(RTE_PTR_ALIGN_FLOOR(arr + 2, 4), &arr[2], ++ "RTE_PTR_ALIGN_FLOOR(expr, x)"); ++ RTE_TEST_ASSERT_EQUAL(RTE_PTR_ALIGN_CEIL(arr + 2, 4), &arr[2], ++ "RTE_PTR_ALIGN_CEIL(expr, x)"); ++ RTE_TEST_ASSERT_EQUAL(RTE_PTR_ALIGN(arr + 2, 4), &arr[2], ++ "RTE_PTR_ALIGN(expr, x)"); ++ ++ RTE_TEST_ASSERT_EQUAL( ++ RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(&arr[1], 1), 4), &arr[1], ++ "RTE_PTR_ALIGN_FLOOR(x < y/2, y)"); ++ RTE_TEST_ASSERT_EQUAL( ++ RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(&arr[1], 3), 4), &arr[1], ++ "RTE_PTR_ALIGN_FLOOR(x > y/2, y)"); ++ RTE_TEST_ASSERT_EQUAL( ++ RTE_PTR_ALIGN_CEIL(RTE_PTR_ADD(&arr[1], 3), 4), &arr[2], ++ "RTE_PTR_ALIGN_CEIL(x < y/2, y)"); ++ RTE_TEST_ASSERT_EQUAL( ++ RTE_PTR_ALIGN_CEIL(RTE_PTR_ADD(&arr[1], 1), 4), &arr[2], ++ "RTE_PTR_ALIGN_CEIL(x > y/2, y)"); ++ ++ RTE_TEST_ASSERT(strncmp(RTE_STR(test), "test", sizeof("test")) == 0, ++ "RTE_STR"); + + return 0; + } diff --git a/dpdk/app/test/test_compressdev.c b/dpdk/app/test/test_compressdev.c index c63b5b6737..57c566aa92 100644 --- a/dpdk/app/test/test_compressdev.c @@ -5001,10 +9334,28 @@ index bf1d344359..8231f81e4a 100644 for (i = 0; i < CRC32_VEC_LEN1; i += 12) rte_memcpy(&test_data[i], crc32_vec1, 12); diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c -index 10b48cdadb..b11be735d0 100644 +index 10b48cdadb..0bd4517bf8 100644 --- a/dpdk/app/test/test_cryptodev.c +++ b/dpdk/app/test/test_cryptodev.c -@@ -209,6 +209,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -135,6 +135,17 @@ security_proto_supported(enum rte_security_session_action_type action, + static int + dev_configure_and_start(uint64_t ff_disable); + ++static int ++check_cipher_capability(const struct crypto_testsuite_params *ts_params, ++ const enum rte_crypto_cipher_algorithm cipher_algo, ++ const uint16_t key_size, const uint16_t iv_size); ++ ++static int ++check_auth_capability(const struct crypto_testsuite_params *ts_params, ++ const enum rte_crypto_auth_algorithm auth_algo, ++ const uint16_t key_size, const uint16_t iv_size, ++ const uint16_t tag_size); ++ + static struct rte_mbuf * + setup_test_string(struct rte_mempool *mpool, + const char *string, size_t len, uint8_t blocksize) +@@ -209,6 +220,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, int enqueue_status, dequeue_status; struct crypto_unittest_params *ut_params = &unittest_params; int is_sgl = sop->m_src->nb_segs > 1; @@ -5012,7 +9363,7 @@ index 10b48cdadb..b11be735d0 100644 ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id); if (ctx_service_size < 0) { -@@ -247,6 +248,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -247,6 +259,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, ofs.raw = 0; @@ -5022,7 +9373,7 @@ index 10b48cdadb..b11be735d0 100644 if (is_cipher && is_auth) { cipher_offset = sop->cipher.data.offset; cipher_len = sop->cipher.data.length; -@@ -277,6 +281,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -277,6 +292,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, if (is_sgl) { uint32_t remaining_off = auth_offset + auth_len; struct rte_mbuf *sgl_buf = sop->m_src; @@ -5031,7 +9382,7 @@ index 10b48cdadb..b11be735d0 100644 while (remaining_off >= rte_pktmbuf_data_len(sgl_buf) && sgl_buf->next != NULL) { -@@ -293,7 +299,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -293,7 +310,8 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, /* Then check if digest-encrypted conditions are met */ if ((auth_offset + auth_len < cipher_offset + cipher_len) && (digest.iova == auth_end_iova) && is_sgl) @@ -5041,7 +9392,7 @@ index 10b48cdadb..b11be735d0 100644 ut_params->auth_xform.auth.digest_length); } else if (is_cipher) { -@@ -356,7 +363,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -356,7 +374,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, sgl.num = n; /* Out of place */ @@ -5050,7 +9401,66 @@ index 10b48cdadb..b11be735d0 100644 dest_sgl.vec = dest_data_vec; vec.dest_sgl = &dest_sgl; n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len, -@@ -6023,7 +6030,7 @@ test_zuc_encryption(const struct wireless_test_data *tdata) +@@ -3031,6 +3049,16 @@ create_wireless_algo_auth_cipher_operation( + remaining_off -= rte_pktmbuf_data_len(sgl_buf); + sgl_buf = sgl_buf->next; + } ++ ++ /* The last segment should be large enough to hold full digest */ ++ if (sgl_buf->data_len < auth_tag_len) { ++ rte_pktmbuf_free(sgl_buf->next); ++ sgl_buf->next = NULL; ++ TEST_ASSERT_NOT_NULL(rte_pktmbuf_append(sgl_buf, ++ auth_tag_len - sgl_buf->data_len), ++ "No room to append auth tag"); ++ } ++ + sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(sgl_buf, + uint8_t *, remaining_off); + sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(sgl_buf, +@@ -4777,7 +4805,6 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) + unsigned int plaintext_len; + + struct rte_cryptodev_info dev_info; +- struct rte_cryptodev_sym_capability_idx cap_idx; + + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + uint64_t feat_flags = dev_info.feature_flags; +@@ -4799,19 +4826,14 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) + return TEST_SKIPPED; + + /* Check if device supports ZUC EEA3 */ +- cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +- cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_ZUC_EEA3; +- +- if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], +- &cap_idx) == NULL) ++ if (check_cipher_capability(ts_params, RTE_CRYPTO_CIPHER_ZUC_EEA3, ++ tdata->key.len, tdata->cipher_iv.len) < 0) + return TEST_SKIPPED; + + /* Check if device supports ZUC EIA3 */ +- cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; +- cap_idx.algo.auth = RTE_CRYPTO_AUTH_ZUC_EIA3; +- +- if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], +- &cap_idx) == NULL) ++ if (check_auth_capability(ts_params, RTE_CRYPTO_AUTH_ZUC_EIA3, ++ tdata->key.len, tdata->auth_iv.len, ++ tdata->digest.len) < 0) + return TEST_SKIPPED; + + /* Create ZUC session */ +@@ -4869,7 +4891,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) + TEST_ASSERT_BUFFERS_ARE_EQUAL( + ut_params->digest, + tdata->digest.data, +- 4, ++ tdata->digest.len, + "ZUC Generated auth tag not as expected"); + return 0; + } +@@ -6023,7 +6045,7 @@ test_zuc_encryption(const struct wireless_test_data *tdata) retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->plaintext.len, @@ -5059,7 +9469,7 @@ index 10b48cdadb..b11be735d0 100644 if (retval < 0) return retval; -@@ -6118,7 +6125,7 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata) +@@ -6118,7 +6140,7 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata) /* Create ZUC operation */ retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data, tdata->cipher_iv.len, tdata->plaintext.len, @@ -5068,7 +9478,7 @@ index 10b48cdadb..b11be735d0 100644 if (retval < 0) return retval; -@@ -6226,8 +6233,8 @@ test_zuc_authentication(const struct wireless_test_data *tdata) +@@ -6226,8 +6248,8 @@ test_zuc_authentication(const struct wireless_test_data *tdata) else ut_params->op = process_crypto_request(ts_params->valid_devs[0], ut_params->op); @@ -5078,7 +9488,26 @@ index 10b48cdadb..b11be735d0 100644 ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) + plaintext_pad_len; -@@ -6553,7 +6560,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6431,7 +6453,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, + TEST_ASSERT_BUFFERS_ARE_EQUAL( + ut_params->digest, + tdata->digest.data, +- DIGEST_BYTE_LENGTH_KASUMI_F9, ++ tdata->digest.len, + "ZUC Generated auth tag not as expected"); + } + return 0; +@@ -6469,6 +6491,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, + tdata->digest.len) < 0) + return TEST_SKIPPED; + ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + + uint64_t feat_flags = dev_info.feature_flags; +@@ -6553,7 +6578,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, retval = create_wireless_algo_auth_cipher_operation( tdata->digest.data, tdata->digest.len, tdata->cipher_iv.data, tdata->cipher_iv.len, @@ -5087,7 +9516,24 @@ index 10b48cdadb..b11be735d0 100644 (tdata->digest.offset_bytes == 0 ? (verify ? ciphertext_pad_len : plaintext_pad_len) : tdata->digest.offset_bytes), -@@ -6870,7 +6877,7 @@ test_snow3g_decryption_with_digest_test_case_1(void) +@@ -6638,7 +6663,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, + TEST_ASSERT_BUFFERS_ARE_EQUAL( + digest, + tdata->digest.data, +- DIGEST_BYTE_LENGTH_KASUMI_F9, ++ tdata->digest.len, + "ZUC Generated auth tag not as expected"); + } + return 0; +@@ -6857,6 +6882,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, + static int + test_snow3g_decryption_with_digest_test_case_1(void) + { ++ int ret; + struct snow3g_hash_test_data snow3g_hash_data; + struct rte_cryptodev_info dev_info; + struct crypto_testsuite_params *ts_params = &testsuite_params; +@@ -6870,13 +6896,16 @@ test_snow3g_decryption_with_digest_test_case_1(void) } /* @@ -5096,7 +9542,55 @@ index 10b48cdadb..b11be735d0 100644 * Digest is allocated in 4 last bytes in plaintext, pattern. */ snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data); -@@ -10540,9 +10547,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata) + +- return test_snow3g_decryption(&snow3g_test_case_7) & +- test_snow3g_authentication_verify(&snow3g_hash_data); ++ ret = test_snow3g_decryption(&snow3g_test_case_7); ++ if (ret != 0) ++ return ret; ++ ++ return test_snow3g_authentication_verify(&snow3g_hash_data); + } + + static int +@@ -7545,6 +7574,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, + if (global_api_test_type == CRYPTODEV_RAW_API_TEST) + return TEST_SKIPPED; + ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); + + uint64_t feat_flags = dev_info.feature_flags; +@@ -8068,7 +8100,7 @@ create_aead_operation(enum rte_crypto_aead_operation op, + rte_pktmbuf_iova(ut_params->ibuf); + /* Copy AAD 18 bytes after the AAD pointer, according to the API */ + memcpy(sym_op->aead.aad.data + 18, tdata->aad.data, tdata->aad.len); +- debug_hexdump(stdout, "aad:", sym_op->aead.aad.data, ++ debug_hexdump(stdout, "aad:", sym_op->aead.aad.data + 18, + tdata->aad.len); + + /* Append IV at the end of the crypto operation*/ +@@ -8077,7 +8109,7 @@ create_aead_operation(enum rte_crypto_aead_operation op, + + /* Copy IV 1 byte after the IV pointer, according to the API */ + rte_memcpy(iv_ptr + 1, tdata->iv.data, tdata->iv.len); +- debug_hexdump(stdout, "iv:", iv_ptr, ++ debug_hexdump(stdout, "iv:", iv_ptr + 1, + tdata->iv.len); + } else { + aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16); +@@ -8230,7 +8262,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) + tdata->key.data, tdata->key.len, + tdata->aad.len, tdata->auth_tag.len, + tdata->iv.len); +- if (retval < 0) ++ if (retval != TEST_SUCCESS) + return retval; + + if (tdata->aad.len > MBUF_SIZE) { +@@ -10540,9 +10572,11 @@ test_authenticated_encryption_oop(const struct aead_test_data *tdata) rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; @@ -5110,7 +9604,52 @@ index 10b48cdadb..b11be735d0 100644 /* not supported with CPU crypto */ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) -@@ -15690,7 +15699,7 @@ test_cryptodev_dpaa2_sec_raw_api(void) +@@ -10876,7 +10910,7 @@ test_authenticated_decryption_sessionless( + key, tdata->key.len, + tdata->aad.len, tdata->auth_tag.len, + tdata->iv.len); +- if (retval < 0) ++ if (retval != TEST_SUCCESS) + return retval; + + ut_params->op->sym->m_src = ut_params->ibuf; +@@ -11073,11 +11107,11 @@ test_stats(void) + TEST_ASSERT((stats.enqueued_count == 1), + "rte_cryptodev_stats_get returned unexpected enqueued stat"); + TEST_ASSERT((stats.dequeued_count == 1), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected dequeued stat"); + TEST_ASSERT((stats.enqueue_err_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected enqueued error count stat"); + TEST_ASSERT((stats.dequeue_err_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected dequeued error count stat"); + + /* invalid device but should ignore and not reset device stats*/ + rte_cryptodev_stats_reset(ts_params->valid_devs[0] + 300); +@@ -11085,7 +11119,7 @@ test_stats(void) + &stats), + "rte_cryptodev_stats_get failed"); + TEST_ASSERT((stats.enqueued_count == 1), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected enqueued stat after invalid reset"); + + /* check that a valid reset clears stats */ + rte_cryptodev_stats_reset(ts_params->valid_devs[0]); +@@ -11093,9 +11127,9 @@ test_stats(void) + &stats), + "rte_cryptodev_stats_get failed"); + TEST_ASSERT((stats.enqueued_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected enqueued stat after valid reset"); + TEST_ASSERT((stats.dequeued_count == 0), +- "rte_cryptodev_stats_get returned unexpected enqueued stat"); ++ "rte_cryptodev_stats_get returned unexpected dequeued stat after valid reset"); + + return TEST_SUCCESS; + } +@@ -15690,7 +15724,7 @@ test_cryptodev_dpaa2_sec_raw_api(void) static int test_cryptodev_dpaa_sec_raw_api(void) { @@ -5119,10 +9658,97 @@ index 10b48cdadb..b11be735d0 100644 int ret; ret = require_feature_flag(pmd_name, RTE_CRYPTODEV_FF_SYM_RAW_DP, +diff --git a/dpdk/app/test/test_cryptodev_aes_test_vectors.h b/dpdk/app/test/test_cryptodev_aes_test_vectors.h +index a797af1b00..6c4f6b6f13 100644 +--- a/dpdk/app/test/test_cryptodev_aes_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_aes_test_vectors.h +@@ -4689,7 +4689,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (512-byte plaintext" +- " Dataunit 512) Scater gather OOP", ++ " Dataunit 512) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_512, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4697,7 +4697,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (512-byte plaintext" +- " Dataunit 512) Scater gather OOP", ++ " Dataunit 512) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_512, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4705,7 +4705,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (512-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4713,7 +4713,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (512-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_512_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4721,7 +4721,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (4096-byte plaintext" +- " Dataunit 4096) Scater gather OOP", ++ " Dataunit 4096) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_4096, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4729,7 +4729,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (4096-byte plaintext" +- " Dataunit 4096) Scater gather OOP", ++ " Dataunit 4096) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_4096, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4737,7 +4737,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Encryption (4096-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | +@@ -4745,7 +4745,7 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = { + }, + { + .test_descr = "AES-256-XTS Decryption (4096-byte plaintext" +- " Dataunit 0) Scater gather OOP", ++ " Dataunit 0) Scatter gather OOP", + .test_data = &aes_test_data_xts_wrapped_key_48_pt_4096_du_0, + .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, + .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | diff --git a/dpdk/app/test/test_cryptodev_asym.c b/dpdk/app/test/test_cryptodev_asym.c -index 9d19a6d6d9..1131290e88 100644 +index 9d19a6d6d9..952b927d60 100644 --- a/dpdk/app/test/test_cryptodev_asym.c +++ b/dpdk/app/test/test_cryptodev_asym.c +@@ -208,8 +208,8 @@ queue_ops_rsa_enc_dec(struct rte_cryptodev_asym_session *sess) + status = TEST_FAILED; + goto error_exit; + } +- debug_hexdump(stdout, "encrypted message", asym_op->rsa.message.data, +- asym_op->rsa.message.length); ++ debug_hexdump(stdout, "encrypted message", asym_op->rsa.cipher.data, ++ asym_op->rsa.cipher.length); + + /* Use the resulted output as decryption Input vector*/ + asym_op = result_op->asym; @@ -558,7 +558,7 @@ test_one_case(const void *test_case, int sessionless) status = test_cryptodev_asym_op( &testsuite_params, @@ -5132,6 +9758,52 @@ index 9d19a6d6d9..1131290e88 100644 } if (status) break; +@@ -1717,7 +1717,7 @@ test_mod_exp(void) + } + + static int +-test_dh_keygenration(void) ++test_dh_key_generation(void) + { + int status; + +@@ -2291,7 +2291,7 @@ static struct unit_test_suite cryptodev_openssl_asym_testsuite = { + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_capability), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_dsa), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, +- test_dh_keygenration), ++ test_dh_key_generation), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_rsa_enc_dec), + TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, + test_rsa_sign_verify), +diff --git a/dpdk/app/test/test_cryptodev_mixed_test_vectors.h b/dpdk/app/test/test_cryptodev_mixed_test_vectors.h +index f50dcb0457..db58dbdd72 100644 +--- a/dpdk/app/test/test_cryptodev_mixed_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_mixed_test_vectors.h +@@ -284,8 +284,10 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_snow_test_case_1 = { + }, + .cipher_iv = { + .data = { ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }, +- .len = 0, ++ .len = 16, + }, + .cipher = { + .len_bits = 516 << 3, +@@ -723,8 +725,10 @@ struct mixed_cipher_auth_test_data auth_aes_cmac_cipher_zuc_test_case_1 = { + }, + .cipher_iv = { + .data = { ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + }, +- .len = 0, ++ .len = 16, + }, + .cipher = { + .len_bits = 516 << 3, diff --git a/dpdk/app/test/test_cryptodev_rsa_test_vectors.h b/dpdk/app/test/test_cryptodev_rsa_test_vectors.h index 48a72e1492..04539a1ecf 100644 --- a/dpdk/app/test/test_cryptodev_rsa_test_vectors.h @@ -5145,6 +9817,2219 @@ index 48a72e1492..04539a1ecf 100644 .qt = { .p = { .data = rsa_p, +diff --git a/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h b/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h +index f43f693edb..b0fa0ec458 100644 +--- a/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_security_pdcp_sdap_test_vectors.h +@@ -769,7 +769,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -781,7 +781,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, + 0xf5, 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce, + 0xbe, 0x48, 0xb5, 0x0b, 0x6a, 0x73, 0x9a, + 0x5a, 0xa3, 0x06, 0x47, 0x40, 0x96, 0xcf, +@@ -790,7 +790,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, 0xe8, + 0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2, + 0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c, +- 0x23, 0xfa, 0x16, 0x39, 0xf7, 0x15, 0x11 }, ++ 0x23, 0xfa, 0x16, 0xb2, 0xb0, 0x17, 0x4a }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -817,7 +817,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -829,7 +829,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, + 0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5, + 0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78, + 0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07, +@@ -838,7 +838,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21, + 0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f, + 0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17, +- 0xae, 0xde, 0xfb, 0x90, 0x62, 0x59, 0xcb }, ++ 0xae, 0xde, 0xfb, 0x19, 0xDa, 0x9a, 0xc2 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -865,7 +865,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -877,7 +877,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, + 0xf5, 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce, + 0xbe, 0x48, 0xb5, 0x0b, 0x6a, 0x73, 0x9a, + 0x5a, 0xa3, 0x06, 0x47, 0x40, 0x96, 0xcf, +@@ -886,7 +886,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, 0xe8, + 0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2, + 0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c, +- 0x23, 0xfa, 0x16, 0x72, 0x3e, 0x14, 0xa9 }, ++ 0x23, 0xfa, 0x16, 0x6c, 0xcb, 0x92, 0xdf }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -913,7 +913,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -925,7 +925,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, + 0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5, + 0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78, + 0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07, +@@ -934,7 +934,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21, + 0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f, + 0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17, +- 0xae, 0xde, 0xfb, 0x3f, 0x47, 0xaa, 0x9b }, ++ 0xae, 0xde, 0xfb, 0x5b, 0xc2, 0x9f, 0x29 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -961,7 +961,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -974,7 +974,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + .in_len = 66, + .data_out = + (uint8_t[]){ +- 0x50, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, 0xf5, ++ 0x80, 0x01, 0x86, 0x69, 0xf2, 0x5d, 0xd7, 0xf5, + 0xc1, 0xf7, 0x1e, 0x47, 0x5d, 0xce, 0xbe, 0x48, + 0xb5, 0x0b, 0x6a, 0x73, 0x9a, 0x5a, 0xa3, 0x06, + 0x47, 0x40, 0x96, 0xcf, 0x86, 0x98, 0x3d, 0x6f, +@@ -982,7 +982,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xa6, 0x24, 0xc9, 0x7f, 0x11, 0x79, 0x24, 0x44, + 0xe8, 0x39, 0x11, 0x03, 0x0a, 0x9d, 0x4f, 0xe2, + 0x95, 0x9f, 0x47, 0x73, 0x37, 0x83, 0x8c, 0x23, +- 0xfa, 0x16, 0x52, 0x69, 0x16, 0xfc, ++ 0xfa, 0x16, 0x5d, 0x83, 0x73, 0x34, + }, + .sn_size = 12, + .hfn = 0x1, +@@ -1010,7 +1010,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1022,7 +1022,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x22, 0x2a, 0x8f, 0x86, + 0x25, 0x92, 0xcd, 0xa9, 0xa1, 0xa0, 0xf5, + 0x86, 0x0d, 0xe8, 0xe4, 0xef, 0xe0, 0x78, + 0x45, 0x7c, 0x0c, 0x41, 0x5c, 0x8f, 0x07, +@@ -1031,7 +1031,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xff, 0x7a, 0x42, 0xac, 0x94, 0x21, + 0x60, 0x1c, 0x13, 0xcc, 0x7e, 0x6b, 0x2f, + 0x6f, 0x91, 0x89, 0xc6, 0xd4, 0xe6, 0x17, +- 0xae, 0xde, 0xfb, 0xf5, 0xda, 0x73, 0xa7 }, ++ 0xae, 0xde, 0xfb, 0xff, 0xf9, 0xef, 0xff }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1154,7 +1154,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1166,7 +1166,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, + 0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e, + 0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40, + 0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22, +@@ -1175,7 +1175,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1, + 0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3, + 0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97, +- 0xbd, 0xba, 0x08, 0x39, 0x63, 0x21, 0x82 }, ++ 0xbd, 0xba, 0x08, 0xb2, 0x24, 0x23, 0xd9 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1202,7 +1202,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1214,7 +1214,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, + 0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36, + 0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8, + 0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e, +@@ -1223,7 +1223,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7, + 0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47, + 0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde, +- 0xc9, 0x0a, 0x64, 0x8e, 0x79, 0xde, 0xaa }, ++ 0xc9, 0x0a, 0x64, 0x07, 0xc1, 0x1d, 0xa3 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1250,7 +1250,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1262,7 +1262,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, + 0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e, + 0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40, + 0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22, +@@ -1271,7 +1271,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1, + 0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3, + 0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97, +- 0xbd, 0xba, 0x08, 0x72, 0xaa, 0x20, 0x3a }, ++ 0xbd, 0xba, 0x08, 0x6c, 0x5f, 0xa6, 0x4c }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1298,7 +1298,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1310,7 +1310,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, + 0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36, + 0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8, + 0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e, +@@ -1319,7 +1319,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7, + 0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47, + 0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde, +- 0xc9, 0x0a, 0x64, 0x21, 0x5c, 0x2d, 0xfa }, ++ 0xc9, 0x0a, 0x64, 0x45, 0xd9, 0x18, 0x48 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1346,7 +1346,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1358,7 +1358,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xe8, 0x8c, 0x96, 0x38, + 0x6e, 0xe4, 0x1f, 0xb9, 0x85, 0x61, 0x2e, + 0x64, 0x31, 0x22, 0x97, 0x5b, 0xf6, 0x40, + 0x08, 0x65, 0xc3, 0xfa, 0x72, 0xd2, 0x22, +@@ -1367,7 +1367,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x23, 0x80, 0xcc, 0x5f, 0xfd, 0x74, 0xb1, + 0xfb, 0xe1, 0xea, 0x6f, 0xf9, 0x9d, 0xf3, + 0x90, 0x02, 0x6e, 0xf1, 0xf8, 0x95, 0x97, +- 0xbd, 0xba, 0x08, 0x52, 0xfd, 0x22, 0x6f }, ++ 0xbd, 0xba, 0x08, 0x5d, 0x17, 0x47, 0xa7 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1394,7 +1394,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1406,7 +1406,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xd2, 0xc0, 0x3a, 0x41, + 0xbc, 0x0b, 0x95, 0xa4, 0x57, 0x3e, 0x36, + 0x10, 0xb2, 0xff, 0x48, 0x1f, 0x10, 0xa8, + 0x06, 0x42, 0xc5, 0xa2, 0x33, 0xed, 0x2e, +@@ -1415,7 +1415,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x35, 0xd2, 0x31, 0x1a, 0xb8, 0x33, 0xc7, + 0x9c, 0xa4, 0xa4, 0x9b, 0x20, 0xc6, 0x47, + 0xdd, 0xe5, 0xa3, 0x4b, 0x9e, 0x66, 0xde, +- 0xc9, 0x0a, 0x64, 0xeb, 0xc1, 0xf4, 0xc6 }, ++ 0xc9, 0x0a, 0x64, 0xe1, 0xe2, 0x68, 0x9e }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1538,7 +1538,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1550,7 +1550,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, + 0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b, + 0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67, + 0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37, +@@ -1559,7 +1559,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d, + 0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56, + 0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5, +- 0xaf, 0x96, 0x5c, 0xb6, 0x6c, 0xeb, 0x14 }, ++ 0xaf, 0x96, 0x5c, 0x3d, 0x2b, 0xe9, 0x4f }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1586,7 +1586,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1598,7 +1598,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, + 0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f, + 0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29, + 0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59, +@@ -1607,7 +1607,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51, + 0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64, + 0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13, +- 0x91, 0xaf, 0x24, 0xb2, 0x82, 0xfb, 0x27 }, ++ 0x91, 0xaf, 0x24, 0x3b, 0x3a, 0x38, 0x2e }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1634,7 +1634,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1646,7 +1646,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, + 0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b, + 0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67, + 0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37, +@@ -1655,7 +1655,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d, + 0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56, + 0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5, +- 0xaf, 0x96, 0x5c, 0xfd, 0xa5, 0xea, 0xac }, ++ 0xaf, 0x96, 0x5c, 0xe3, 0x50, 0x6c, 0xda }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1682,7 +1682,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1694,7 +1694,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, + 0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f, + 0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29, + 0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59, +@@ -1703,7 +1703,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51, + 0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64, + 0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13, +- 0x91, 0xaf, 0x24, 0x1d, 0xa7, 0x08, 0x77 }, ++ 0x91, 0xaf, 0x24, 0x79, 0x22, 0x3d, 0xc5 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1730,7 +1730,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1742,7 +1742,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x79, 0xdb, 0x02, 0x27, + 0xa0, 0x85, 0x82, 0x87, 0x6b, 0x35, 0x2b, + 0xe1, 0x96, 0x5b, 0xfd, 0x67, 0xe1, 0x67, + 0x69, 0x17, 0x5b, 0x0a, 0x8f, 0xb8, 0x37, +@@ -1751,7 +1751,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xda, 0x94, 0x61, 0x8e, 0x2d, 0x7a, 0x5d, + 0x64, 0x0f, 0x4a, 0xc7, 0x7c, 0x9d, 0x56, + 0x5a, 0x2a, 0xcb, 0xff, 0x79, 0x07, 0xa5, +- 0xaf, 0x96, 0x5c, 0xdd, 0xf2, 0xe8, 0xf9 }, ++ 0xaf, 0x96, 0x5c, 0xd2, 0x18, 0x8d, 0x31 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -1778,7 +1778,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0a, 0x98, 0xc1, 0x3c, 0x98, 0x82, + 0xdc, 0xb6, 0xc2, 0x36 }, + .data_in = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0xb8, 0xf8, 0xdb, 0x2d, + 0x3f, 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, + 0x88, 0x63, 0x08, 0x4f, 0xd3, 0x71, 0xfb, + 0xeb, 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, +@@ -1790,7 +1790,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0xf9, 0xdd }, + .in_len = 66, + .data_out = +- (uint8_t[]){ 0x50, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, ++ (uint8_t[]){ 0x80, 0x01, 0x86, 0x2c, 0x0f, 0xc1, 0x08, + 0x2e, 0xa7, 0x97, 0xd2, 0x6c, 0x17, 0x8f, + 0x22, 0x1f, 0x99, 0x1b, 0x48, 0x04, 0x29, + 0x54, 0x3f, 0x07, 0x01, 0xda, 0x19, 0x59, +@@ -1799,7 +1799,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x04, 0xf4, 0xfe, 0xe6, 0xf7, 0x53, 0x51, + 0x70, 0x10, 0x56, 0x83, 0x59, 0xb5, 0x64, + 0x0b, 0xa6, 0x2e, 0xa5, 0x03, 0x62, 0x13, +- 0x91, 0xaf, 0x24, 0xd7, 0x3a, 0xd1, 0x4b }, ++ 0x91, 0xaf, 0x24, 0xdd, 0x19, 0x4d, 0x13 }, + .sn_size = 12, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2556,7 +2556,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2568,7 +2568,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, + 0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b, + 0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7, + 0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a, +@@ -2577,8 +2577,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76, + 0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47, + 0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6, +- 0x0e, 0xf4, 0xe7, 0xe8, 0x78, 0xdd, 0xc1, +- 0x92 }, ++ 0x0e, 0xf4, 0xe7, 0xe8, 0xc0, 0x48, 0x6a, ++ 0x7c }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2605,7 +2605,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2617,7 +2617,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, + 0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50, + 0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab, + 0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01, +@@ -2626,8 +2626,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87, + 0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09, + 0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e, +- 0x9c, 0x85, 0x0b, 0xf7, 0xb1, 0x80, 0x30, +- 0xa5 }, ++ 0x9c, 0x85, 0x0b, 0xf7, 0x17, 0x28, 0x0f, ++ 0x7d }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2654,7 +2654,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2666,7 +2666,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, + 0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b, + 0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7, + 0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a, +@@ -2675,8 +2675,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76, + 0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47, + 0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6, +- 0x0e, 0xf4, 0xe7, 0xe8, 0x08, 0xa6, 0xdb, +- 0x19 }, ++ 0x0e, 0xf4, 0xe7, 0xe8, 0x8e, 0x76, 0x4a, ++ 0x4e }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2703,7 +2703,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2715,7 +2715,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, + 0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50, + 0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab, + 0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01, +@@ -2724,8 +2724,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87, + 0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09, + 0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e, +- 0x9c, 0x85, 0x0b, 0xf7, 0x97, 0x5a, 0x56, +- 0xab }, ++ 0x9c, 0x85, 0x0b, 0xf7, 0xc1, 0x27, 0x82, ++ 0xc3 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2752,7 +2752,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2764,7 +2764,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x78, 0xee, 0x01, + 0x4c, 0x79, 0xc7, 0x14, 0x79, 0x2c, 0x1b, + 0x8a, 0xd0, 0x17, 0xb4, 0xb4, 0xfe, 0xa7, + 0x17, 0x9a, 0x93, 0x5d, 0xad, 0x27, 0x8a, +@@ -2773,8 +2773,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x37, 0xfb, 0x98, 0x1b, 0x06, 0x1a, 0x76, + 0xf8, 0x07, 0x72, 0xe9, 0xa2, 0x89, 0x47, + 0x95, 0x8f, 0x8f, 0x4e, 0x45, 0x55, 0xd6, +- 0x0e, 0xf4, 0xe7, 0xe8, 0x08, 0x68, 0xff, +- 0x7c }, ++ 0x0e, 0xf4, 0xe7, 0xe8, 0x97, 0x76, 0xce, ++ 0xac }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2801,7 +2801,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2813,7 +2813,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xbf, 0x21, 0xc9, + 0x7d, 0x02, 0x8d, 0xf7, 0xf7, 0x80, 0x50, + 0x60, 0x32, 0x09, 0xb8, 0x69, 0x48, 0xab, + 0x58, 0xf0, 0xd9, 0x63, 0x63, 0x36, 0x01, +@@ -2822,8 +2822,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4c, 0x00, 0xd0, 0x0e, 0x31, 0x1c, 0x87, + 0xee, 0x1c, 0xc3, 0x81, 0xb4, 0xb6, 0x09, + 0xb5, 0x3c, 0x85, 0xb9, 0xcc, 0x2d, 0x2e, +- 0x9c, 0x85, 0x0b, 0xf7, 0x41, 0xdd, 0x19, +- 0x32 }, ++ 0x9c, 0x85, 0x0b, 0xf7, 0x69, 0x56, 0x6f, ++ 0xaf }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2948,7 +2948,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -2960,7 +2960,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, + 0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f, + 0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde, + 0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51, +@@ -2969,8 +2969,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a, + 0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95, + 0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c, +- 0xae, 0x22, 0x59, 0x11, 0xf6, 0x97, 0x0b, +- 0x7b }, ++ 0xae, 0x22, 0x59, 0x11, 0x4e, 0x02, 0xa0, ++ 0x95 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -2997,7 +2997,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3009,7 +3009,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, + 0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30, + 0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e, + 0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f, +@@ -3018,8 +3018,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17, + 0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a, + 0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f, +- 0xad, 0x3d, 0x99, 0x4a, 0xa3, 0xab, 0xd5, +- 0x7c }, ++ 0xad, 0x3d, 0x99, 0x4a, 0x05, 0x03, 0xea, ++ 0xa4 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3046,7 +3046,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3058,7 +3058,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, + 0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f, + 0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde, + 0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51, +@@ -3067,8 +3067,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a, + 0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95, + 0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c, +- 0xae, 0x22, 0x59, 0x11, 0x86, 0xec, 0x11, +- 0xf0 }, ++ 0xae, 0x22, 0x59, 0x11, 0x00, 0x3c, 0x80, ++ 0xa7 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3095,7 +3095,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3107,7 +3107,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, + 0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30, + 0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e, + 0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f, +@@ -3116,8 +3116,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17, + 0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a, + 0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f, +- 0xad, 0x3d, 0x99, 0x4a, 0x85, 0x71, 0xb3, +- 0x72 }, ++ 0xad, 0x3d, 0x99, 0x4a, 0xd3, 0x0c, 0x67, ++ 0x1a }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3144,7 +3144,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3156,7 +3156,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xa8, 0x9d, + 0x9e, 0xcc, 0xf0, 0x1a, 0xc0, 0xf2, 0x9f, + 0x8c, 0xc9, 0x57, 0xc7, 0x99, 0x4d, 0xde, + 0xc5, 0x19, 0x69, 0x58, 0x5b, 0x1a, 0x51, +@@ -3165,8 +3165,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xb3, 0x24, 0x88, 0x68, 0x5f, 0x78, 0x3a, + 0xd2, 0x6c, 0xcc, 0xa0, 0xb5, 0xf0, 0x95, + 0x3e, 0xf1, 0xf4, 0x3e, 0x43, 0x8f, 0x6c, +- 0xae, 0x22, 0x59, 0x11, 0x86, 0x22, 0x35, +- 0x95 }, ++ 0xae, 0x22, 0x59, 0x11, 0x19, 0x3c, 0x04, ++ 0x45 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3193,7 +3193,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3205,7 +3205,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x5a, 0xa9, 0xb5, + 0x61, 0x8b, 0x8a, 0xb7, 0x6a, 0x98, 0x30, + 0x6c, 0xed, 0x84, 0x69, 0xff, 0x6b, 0x7e, + 0x30, 0x59, 0x55, 0x80, 0x32, 0xd0, 0x0f, +@@ -3214,8 +3214,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x0e, 0x2e, 0x33, 0xe6, 0xa4, 0xea, 0x17, + 0xd6, 0x56, 0xa2, 0x3b, 0x5f, 0x56, 0x3a, + 0xa7, 0x6f, 0x4c, 0xc1, 0xca, 0xbd, 0x5f, +- 0xad, 0x3d, 0x99, 0x4a, 0x53, 0xf6, 0xfc, +- 0xeb }, ++ 0xad, 0x3d, 0x99, 0x4a, 0x7b, 0x7d, 0x8a, ++ 0x76 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3340,7 +3340,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3352,7 +3352,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, + 0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93, + 0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7, + 0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d, +@@ -3361,8 +3361,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12, + 0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60, + 0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7, +- 0x0c, 0x61, 0x76, 0xdc, 0x25, 0x8a, 0x31, +- 0xed }, ++ 0x0c, 0x61, 0x76, 0xdc, 0x9d, 0x1f, 0x9a, ++ 0x03 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3389,7 +3389,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3401,7 +3401,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, + 0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23, + 0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26, + 0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac, +@@ -3410,8 +3410,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e, + 0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25, + 0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f, +- 0x6c, 0xed, 0x6a, 0x50, 0xf3, 0x5e, 0x90, +- 0x42 }, ++ 0x6c, 0xed, 0x6a, 0x50, 0x55, 0xf6, 0xaf, ++ 0x9a }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3438,7 +3438,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3450,7 +3450,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, + 0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93, + 0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7, + 0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d, +@@ -3459,8 +3459,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12, + 0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60, + 0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7, +- 0x0c, 0x61, 0x76, 0xdc, 0x55, 0xf1, 0x2b, +- 0x66 }, ++ 0x0c, 0x61, 0x76, 0xdc, 0xd3, 0x21, 0xba, ++ 0x31 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3487,7 +3487,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3499,7 +3499,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, + 0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23, + 0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26, + 0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac, +@@ -3508,8 +3508,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e, + 0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25, + 0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f, +- 0x6c, 0xed, 0x6a, 0x50, 0xd5, 0x84, 0xf6, +- 0x4c }, ++ 0x6c, 0xed, 0x6a, 0x50, 0x83, 0xf9, 0x22, ++ 0x24 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3536,7 +3536,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3548,7 +3548,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x9b, 0x9c, 0xde, + 0xa1, 0x69, 0x9b, 0x27, 0xd3, 0x38, 0x93, + 0xf2, 0x12, 0xb1, 0xc6, 0x60, 0xac, 0xb7, + 0xf2, 0x37, 0xf3, 0x72, 0xaf, 0x50, 0x9d, +@@ -3557,8 +3557,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xbb, 0x15, 0xf3, 0x6a, 0x5d, 0x61, 0x12, + 0x6e, 0x6d, 0x1b, 0x92, 0xd8, 0x42, 0x60, + 0x3e, 0x1f, 0xe0, 0x6c, 0x28, 0x89, 0xf7, +- 0x0c, 0x61, 0x76, 0xdc, 0x55, 0x3f, 0x0f, +- 0x03 }, ++ 0x0c, 0x61, 0x76, 0xdc, 0xca, 0x21, 0x3e, ++ 0xd3 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +@@ -3585,7 +3585,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x4b, 0x32, 0x87, 0xf9, 0xdb, 0xe0, + 0x31, 0x5f, 0x3a, 0x15 }, + .data_in = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0xdb, 0x2d, 0x3f, + 0x23, 0x82, 0x53, 0xfd, 0x37, 0xde, 0x88, + 0x63, 0x08, 0x4f, 0xD3, 0x71, 0xfb, 0xeb, + 0x35, 0xf3, 0x64, 0xd3, 0x5e, 0xaf, 0x3f, +@@ -3597,7 +3597,7 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0xf9, 0xdd, 0xcc, 0x69 }, + .in_len = 67, + .data_out = +- (uint8_t[]){ 0xf8, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, ++ (uint8_t[]){ 0x80, 0x00, 0x00, 0xf8, 0x0c, 0xef, 0x82, + 0x59, 0x6b, 0x6b, 0x61, 0xbe, 0x54, 0x23, + 0x7d, 0x29, 0x6d, 0xa7, 0xd2, 0xfa, 0x26, + 0xcc, 0x1d, 0x18, 0x39, 0x99, 0xea, 0xac, +@@ -3606,8 +3606,8 @@ static const struct pdcp_sdap_test list_pdcp_sdap_tests[] = { + 0x7a, 0x8a, 0xe5, 0x00, 0x35, 0x4d, 0x0e, + 0x69, 0x9f, 0x4b, 0x72, 0x94, 0x8a, 0x25, + 0x69, 0x43, 0x28, 0xdc, 0x40, 0x60, 0x4f, +- 0x6c, 0xed, 0x6a, 0x50, 0x03, 0x03, 0xb9, +- 0xd5 }, ++ 0x6c, 0xed, 0x6a, 0x50, 0x2b, 0x88, 0xcf, ++ 0x48 }, + .sn_size = 18, + .hfn = 0x1, + .hfn_threshold = 0xfa558, +diff --git a/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h b/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h +index 81fd6e606b..54c7e2420a 100644 +--- a/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h ++++ b/dpdk/app/test/test_cryptodev_security_pdcp_test_vectors.h +@@ -4248,7 +4248,7 @@ static uint8_t *pdcp_test_data_in[] = { + + /*************** 12-bit C-plane ****************/ + /* Control Plane w/NULL enc. + NULL int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4262,7 +4262,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/NULL enc. + SNOW f9 int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4276,7 +4276,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/NULL enc. + AES CMAC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4290,7 +4290,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/NULL enc. + ZUC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4305,7 +4305,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + + /* Control Plane w/SNOW f8 enc. + NULL int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4319,7 +4319,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/SNOW f8 enc. + SNOW f9 int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4333,7 +4333,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/SNOW f8 enc. + AES CMAC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4347,7 +4347,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/SNOW f8 enc. + ZUC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4362,7 +4362,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + + /* Control Plane w/AES CTR enc. + NULL int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4376,7 +4376,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/AES CTR enc. + SNOW f9 int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4390,7 +4390,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/AES CTR enc. + AES CMAC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4404,7 +4404,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/AES CTR enc. + ZUC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4419,7 +4419,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + + /* Control Plane w/ZUC enc. + NULL int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4433,7 +4433,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/ZUC enc. + SNOW f9 int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4447,7 +4447,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/ZUC enc. + AES CMAC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4461,7 +4461,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* Control Plane w/ZUC enc. + ZUC int. UL */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4494,7 +4494,7 @@ static uint8_t *pdcp_test_data_in[] = { + (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, + 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, + /* User Plane w/NULL enc. UL for 18-bit SN*/ +- (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, ++ (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, + 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, + 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, +@@ -4528,7 +4528,7 @@ static uint8_t *pdcp_test_data_in[] = { + (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, + 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, + /* User Plane w/SNOW enc. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4562,7 +4562,7 @@ static uint8_t *pdcp_test_data_in[] = { + (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, + 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, + /* User Plane w/AES enc. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4596,7 +4596,7 @@ static uint8_t *pdcp_test_data_in[] = { + (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, + 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, + /* User Plane w/ZUC enc. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4614,7 +4614,7 @@ static uint8_t *pdcp_test_data_in[] = { + + /*************** u-plane with integrity for 12-bit SN *****/ + /* User Plane w/NULL enc. + NULL int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4628,7 +4628,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4642,7 +4642,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/NULL enc. + AES CMAC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4656,7 +4656,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/NULL enc. + ZUC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4671,7 +4671,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + + /* User Plane w/SNOW f8 enc. + NULL int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4685,7 +4685,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4699,7 +4699,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4713,7 +4713,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/SNOW f8 enc. + ZUC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4728,7 +4728,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + + /* User Plane w/AES CTR enc. + NULL int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4742,7 +4742,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4756,7 +4756,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/AES CTR enc. + AES CMAC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4770,7 +4770,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/AES CTR enc. + ZUC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4785,7 +4785,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + + /* User Plane w/ZUC enc. + NULL int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4799,7 +4799,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/ZUC enc. + SNOW f9 int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4813,7 +4813,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/ZUC enc. + AES CMAC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4827,7 +4827,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, + 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, + /* User Plane w/ZUC enc. + ZUC int. UL for 12-bit SN*/ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, + 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, + 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, +@@ -4843,7 +4843,7 @@ static uint8_t *pdcp_test_data_in[] = { + + /*************** u-plane with integrity for 18-bit SN *****/ + /* User Plane w/NULL enc. + NULL int. UL for 18-bit SN*/ +- (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, ++ (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, + 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, + 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, +@@ -4859,7 +4859,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/NULL enc. + SNOW f9 int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4875,7 +4875,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/NULL enc. + AES CMAC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4891,7 +4891,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/NULL enc. + ZUC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4907,7 +4907,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/SNOW f8 enc. + NULL int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4923,7 +4923,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4939,7 +4939,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4955,7 +4955,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/SNOW f8 enc. + ZUC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4971,7 +4971,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/AES CTR enc. + NULL int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -4987,7 +4987,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -5003,7 +5003,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/AES CTR enc. + AES CMAC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -5019,7 +5019,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/AES CTR enc. + ZUC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -5035,7 +5035,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/ZUC enc. + NULL int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -5051,7 +5051,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/ZUC enc. + SNOW f9 int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -5067,7 +5067,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/ZUC enc. + AES CMAC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -5083,7 +5083,7 @@ static uint8_t *pdcp_test_data_in[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69}, + /* User Plane w/ZUC enc. + ZUC int. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, +@@ -5538,13 +5538,13 @@ static uint8_t *pdcp_test_data_out[] = { + + /************ C-plane 12-bit ****************************/ + /* Control Plane w/NULL enc. + NULL int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* Control Plane w/NULL enc. + NULL int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5552,15 +5552,15 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* Control Plane w/NULL enc. + SNOW f9 int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x74, 0xB8, 0x27, 0x96}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x33, 0x22, 0x02, 0x10}, + /* Control Plane w/NULL enc. + SNOW f9 int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5570,13 +5570,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x97, 0x50, 0x3F, 0xF7}, + /* Control Plane w/NULL enc. + AES CMAC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x3F, 0x71, 0x26, 0x2E}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x1B, 0xB0, 0x4A, 0xBF}, + /* Control Plane w/NULL enc. + AES CMAC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5586,13 +5586,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xE8, 0xBB, 0xE9, 0x36}, + /* Control Plane w/NULL enc. + ZUC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x54, 0xEF, 0x25, 0xC3}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x28, 0x41, 0xAB, 0x16}, + /* Control Plane w/NULL enc. + ZUC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5603,7 +5603,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x76, 0xD0, 0x5B, 0x2C}, + + /* Control Plane w/SNOW f8 enc. + NULL int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, +@@ -5619,13 +5619,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xDC, 0x32, 0x96, 0x65}, + /* Control Plane w/SNOW f8 enc. + SNOW f9 int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, + 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, + 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, +- 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x66, 0xBF, 0x8B, 0x05}, ++ 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x21, 0x25, 0xAE, 0x83}, + /* Control Plane w/SNOW f8 enc. + SNOW f9 int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, + 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, +@@ -5635,13 +5635,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x4B, 0x62, 0xA9, 0x92}, + /* Control Plane w/SNOW f8 enc. + AES CMAC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, + 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, + 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, +- 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x2D, 0x76, 0x8A, 0xBD}, ++ 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x09, 0xB7, 0xE6, 0x2C}, + /* Control Plane w/SNOW f8 enc. + AES CMAC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, + 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, +@@ -5651,13 +5651,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x34, 0x89, 0x7F, 0x53}, + /* Control Plane w/SNOW f8 enc. + ZUC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, + 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, + 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, +- 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x46, 0xE8, 0x89, 0x50}, ++ 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x3A, 0x46, 0x07, 0x85}, + /* Control Plane w/SNOW f8 enc. + ZUC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, + 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, +@@ -5668,7 +5668,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xAA, 0xE2, 0xCD, 0x49}, + + /* Control Plane w/AES CTR enc. + NULL int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, +@@ -5685,13 +5685,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x87, 0x7A, 0x32, 0x1B}, + /* Control Plane w/AES CTR enc. + SNOW f9 int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, + 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, + 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, +- 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xF2, 0x8B, 0x18, 0xAA}, ++ 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xB5, 0x11, 0x3D, 0x2C}, + + /* Control Plane w/AES CTR enc. + SNOW f9 int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, +@@ -5702,13 +5702,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x10, 0x2A, 0x0D, 0xEC}, + /* Control Plane w/AES CTR enc. + AES CMAC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, + 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, + 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, +- 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xB9, 0x42, 0x19, 0x12}, ++ 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0x9D, 0x83, 0x75, 0x83}, + /* Control Plane w/AES CTR enc. + AES CMAC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, + 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, +@@ -5718,13 +5718,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x6F, 0xC1, 0xDB, 0x2D}, + /* Control Plane w/AES CTR enc. + ZUC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, + 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, + 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, +- 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xD2, 0xDC, 0x1A, 0xFF}, ++ 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xAE, 0x72, 0x94, 0x2A}, + /* Control Plane w/AES CTR enc. + ZUC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, + 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, +@@ -5734,7 +5734,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0xF1, 0xAA, 0x69, 0x37}, + /* Control Plane w/ZUC enc. + NULL int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, +@@ -5750,13 +5750,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, + 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x90, 0xF5, 0xBD, 0x56}, + /* Control Plane w/ZUC enc. + SNOW f9 int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, + 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, + 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, +- 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x69, 0x75, 0x1D, 0x76}, ++ 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x2E, 0xEF, 0x38, 0xF0}, + /* Control Plane w/ZUC enc. + SNOW f9 int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, + 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, +@@ -5766,13 +5766,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, + 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x07, 0xA5, 0x82, 0xA1}, + /* Control Plane w/ZUC enc. + AES CMAC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, + 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, + 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, +- 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x22, 0xBC, 0x1C, 0xCE}, ++ 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x06, 0x7D, 0x70, 0x5F}, + /* Control Plane w/ZUC enc. + AES CMAC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, + 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, +@@ -5782,13 +5782,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, + 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x78, 0x4E, 0x54, 0x60}, + /* Control Plane w/ZUC enc. + ZUC int. UL LONG SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, + 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, + 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, +- 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x49, 0x22, 0x1F, 0x23}, ++ 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x35, 0x8C, 0x91, 0xF6}, + /* Control Plane w/ZUC enc. + ZUC int. DL LONG SN */ + (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, + 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, +@@ -5817,7 +5817,7 @@ static uint8_t *pdcp_test_data_out[] = { + (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, + 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, + /* User Plane w/NULL enc. UL for 18-bit SN*/ +- (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, ++ (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, + 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, + 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, +@@ -5851,7 +5851,7 @@ static uint8_t *pdcp_test_data_out[] = { + (uint8_t[]){0x8b, 0x26, 0x0b, 0x50, 0xf3, 0xff, 0x37, 0xe3, 0x6b, 0xaf, + 0x08, 0xd8, 0xf6, 0x1f, 0xca, 0x6f, 0xbc}, + /* User Plane w/SNOW enc. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, + 0x03, 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, + 0x37, 0xB1, 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, +@@ -5884,7 +5884,7 @@ static uint8_t *pdcp_test_data_out[] = { + (uint8_t[]){0x8b, 0x26, 0xc7, 0xf2, 0x23, 0xb3, 0xbe, 0xc0, 0xdf, 0xc5, + 0xed, 0x37, 0x35, 0x7c, 0x66, 0xa3, 0xf9}, + /* User Plane w/AES enc. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, + 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, + 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, +@@ -5916,7 +5916,7 @@ static uint8_t *pdcp_test_data_out[] = { + (uint8_t[]){0x8b, 0x26, 0xa3, 0x1a, 0x1e, 0x22, 0xf7, 0x17, 0x8a, 0xb5, + 0x59, 0xd8, 0x2b, 0x13, 0xdd, 0x12, 0x4e}, + /* User Plane w/ZUC enc. UL for 18-bit SN*/ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, + 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, + 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, +@@ -5932,13 +5932,13 @@ static uint8_t *pdcp_test_data_out[] = { + + /************************* 12-bit u-plane with int ************/ + /* User Plane w/NULL enc. + NULL int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* User Plane w/NULL enc. + NULL int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5946,15 +5946,15 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x00, 0x00, 0x00, 0x00}, + /* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x74, 0xB8, 0x27, 0x96}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x6A, 0x4D, 0xA1, 0xE0}, + /* User Plane w/NULL enc. + SNOW f9 int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5964,13 +5964,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x97, 0x50, 0x3F, 0xF7}, + /* User Plane w/NULL enc. + AES CMAC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x3F, 0x71, 0x26, 0x2E}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xB4, 0x36, 0x24, 0x75}, + /* User Plane w/NULL enc. + AES CMAC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5980,13 +5980,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xE8, 0xBB, 0xE9, 0x36}, + /* User Plane w/NULL enc. + ZUC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, ++ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, + 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, + 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, + 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, + 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, +- 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x54, 0xEF, 0x25, 0xC3}, ++ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x5B, 0x05, 0x40, 0x0B}, + /* User Plane w/NULL enc. + ZUC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +@@ -5997,7 +5997,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x76, 0xD0, 0x5B, 0x2C}, + + /* User Plane w/SNOW f8 enc. + NULL int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, +@@ -6013,13 +6013,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xDC, 0x32, 0x96, 0x65}, + /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, + 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, + 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, +- 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x66, 0xBF, 0x8B, 0x05}, ++ 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x78, 0x4A, 0x0D, 0x73}, + /* User Plane w/SNOW f8 enc. + SNOW f9 int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, + 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, +@@ -6029,13 +6029,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x4B, 0x62, 0xA9, 0x92}, + /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, + 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, + 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, +- 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x2D, 0x76, 0x8A, 0xBD}, ++ 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0xA6, 0x31, 0x88, 0xE6}, + /* User Plane w/SNOW f8 enc. + AES CMAC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, + 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, +@@ -6045,13 +6045,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x34, 0x89, 0x7F, 0x53}, + /* User Plane w/SNOW f8 enc. + ZUC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, ++ (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, + 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, + 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, + 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, + 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, +- 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x46, 0xE8, 0x89, 0x50}, ++ 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x49, 0x02, 0xEC, 0x98}, + /* User Plane w/SNOW f8 enc. + ZUC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, + 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, +@@ -6061,7 +6061,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, + 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xAA, 0xE2, 0xCD, 0x49}, + /* User Plane w/AES CTR enc. + NULL int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, +@@ -6078,13 +6078,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x87, 0x7A, 0x32, 0x1B}, + /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, + 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, + 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, +- 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xF2, 0x8B, 0x18, 0xAA}, ++ 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xEC, 0x7E, 0x9E, 0xDC}, + + /* User Plane w/AES CTR enc. + SNOW f9 int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, +@@ -6095,13 +6095,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x10, 0x2A, 0x0D, 0xEC}, + /* User Plane w/AES CTR enc. + AES CMAC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, + 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, + 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, +- 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xB9, 0x42, 0x19, 0x12}, ++ 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0x32, 0x05, 0x1B, 0x49}, + /* User Plane w/AES CTR enc. + AES CMAC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, + 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, +@@ -6111,13 +6111,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x6F, 0xC1, 0xDB, 0x2D}, + /* User Plane w/AES CTR enc. + ZUC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, ++ (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, + 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, + 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, + 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, + 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, +- 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xD2, 0xDC, 0x1A, 0xFF}, ++ 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xDD, 0x36, 0x7F, 0x37}, + /* User Plane w/AES CTR enc. + ZUC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, + 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, +@@ -6128,7 +6128,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0xF1, 0xAA, 0x69, 0x37}, + + /* User Plane w/ZUC enc. + NULL int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, +@@ -6144,13 +6144,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, + 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x90, 0xF5, 0xBD, 0x56}, + /* User Plane w/ZUC enc. + SNOW f9 int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, + 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, + 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, +- 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x69, 0x75, 0x1D, 0x76}, ++ 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x77, 0x80, 0x9B, 0x00}, + /* User Plane w/ZUC enc. + SNOW f9 int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, + 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, +@@ -6160,13 +6160,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, + 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x07, 0xA5, 0x82, 0xA1}, + /* User Plane w/ZUC enc. + AES CMAC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, + 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, + 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, +- 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x22, 0xBC, 0x1C, 0xCE}, ++ 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0xA9, 0xFB, 0x1E, 0x95}, + /* User Plane w/ZUC enc. + AES CMAC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, + 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, +@@ -6176,13 +6176,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, + 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x78, 0x4E, 0x54, 0x60}, + /* User Plane w/ZUC enc. + ZUC int. UL for 12-bit SN */ +- (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, ++ (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, + 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, + 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, + 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, + 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, +- 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x49, 0x22, 0x1F, 0x23}, ++ 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x46, 0xC8, 0x7A, 0xEB}, + /* User Plane w/ZUC enc. + ZUC int. DL for 12-bit SN */ + (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, + 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, +@@ -6194,7 +6194,7 @@ static uint8_t *pdcp_test_data_out[] = { + + /************************* 18-bit u-plane with int ************/ + /* User Plane w/NULL enc. + NULL int. UL for 18-bit SN */ +- (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, ++ (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, + 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, + 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, +@@ -6210,13 +6210,13 @@ static uint8_t *pdcp_test_data_out[] = { + 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, + 0x69, 0x00, 0x00, 0x00, 0x00}, + /* User Plane w/NULL enc. + SNOW f9 int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, + 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, + 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, + 0x31, 0xA2, 0x76, 0xBA, 0xFC, 0x5A, 0xDB, 0xAA, 0xA3, 0x0B, 0x6A, + 0xD2, 0xEE, 0xD6, 0x93, 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, +- 0x91, 0x7F, 0x71, 0x17, 0x69}, ++ 0x91, 0x7F, 0x58, 0x24, 0x17}, + /* User Plane w/NULL enc. + SNOW f9 int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, +@@ -6225,12 +6225,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69, 0x84, 0x45, 0xA8, 0x88}, + /* User Plane w/NULL enc. + AES CMAC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, + 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, + 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, 0x31, 0xA2, 0x76, + 0xBA, 0xFC, 0x5A, 0xDB, 0xAA, 0xA3, 0x0B, 0x6A, 0xD2, 0xEE, 0xD6, 0x93, +- 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0x33, 0x9B, 0x38, 0xF7}, ++ 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0x83, 0xB7, 0xF2, 0x0B}, + /* User Plane w/NULL enc. + AES CMAC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, +@@ -6239,12 +6239,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69, 0xD9, 0x0B, 0x89, 0x7F}, + /* User Plane w/NULL enc. + ZUC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, + 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, + 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, 0x31, 0xA2, 0x76, + 0xBA, 0xFC, 0x5A, 0xDB, 0xAA, 0xA3, 0x0B, 0x6A, 0xD2, 0xEE, 0xD6, 0x93, +- 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0xB5, 0xD9, 0x5D, 0xE0}, ++ 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0xAB, 0x98, 0xC0, 0x1A}, + /* User Plane w/NULL enc. + ZUC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, +@@ -6253,7 +6253,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69, 0xDA, 0xE9, 0x17, 0x96}, + /* User Plane w/SNOW f8 enc. + NULL int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, + 0x03, 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, + 0x37, 0xB1, 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, +@@ -6268,12 +6268,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, + 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0x53, 0x93, 0xEF, 0x7C}, + /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, + 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, + 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, 0xC8, 0x96, 0x7A, + 0x0A, 0x25, 0x08, 0xEB, 0x41, 0x30, 0x00, 0x33, 0xC7, 0xFF, 0x33, 0x4E, +- 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0x2A, 0xAB, 0x0F, 0x24}, ++ 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0x2A, 0x82, 0x3C, 0x5A}, + /* User Plane w/SNOW f8 enc. + SNOW f9 int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x22, 0x2D, 0x15, 0xBA, 0x95, 0xAC, 0x47, 0x5A, + 0xE3, 0x90, 0x82, 0xEA, 0xC2, 0x93, 0x80, 0x23, 0xE9, 0xAC, 0xEA, 0x5D, +@@ -6282,12 +6282,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, + 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0xD7, 0xD6, 0x47, 0xF4}, + /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, + 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, + 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, 0xC8, 0x96, 0x7A, + 0x0A, 0x25, 0x08, 0xEB, 0x41, 0x30, 0x00, 0x33, 0xC7, 0xFF, 0x33, 0x4E, +- 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0x66, 0x41, 0x20, 0xBA}, ++ 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0xD6, 0x6D, 0xEA, 0x46}, + /* User Plane w/SNOW f8 enc. + AES CMAC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x22, 0x2D, 0x15, 0xBA, 0x95, 0xAC, 0x47, 0x5A, + 0xE3, 0x90, 0x82, 0xEA, 0xC2, 0x93, 0x80, 0x23, 0xE9, 0xAC, 0xEA, 0x5D, +@@ -6296,12 +6296,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, + 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0x8A, 0x98, 0x66, 0x03}, + /* User Plane w/SNOW f8 enc. + ZUC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, + 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, + 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, 0xC8, 0x96, 0x7A, + 0x0A, 0x25, 0x08, 0xEB, 0x41, 0x30, 0x00, 0x33, 0xC7, 0xFF, 0x33, 0x4E, +- 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0xE0, 0x03, 0x45, 0xAD}, ++ 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0xFE, 0x42, 0xD8, 0x57}, + /* User Plane w/SNOW f8 enc. + ZUC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x22, 0x2D, 0x15, 0xBA, 0x95, 0xAC, 0x47, 0x5A, + 0xE3, 0x90, 0x82, 0xEA, 0xC2, 0x93, 0x80, 0x23, 0xE9, 0xAC, 0xEA, 0x5D, +@@ -6310,7 +6310,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, + 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0x89, 0x7A, 0xF8, 0xEA}, + /* User Plane w/AES CTR enc. + NULL int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, + 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, + 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, +@@ -6324,12 +6324,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, + 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x98, 0x15, 0x68, 0x35}, + /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, + 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, + 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, + 0x14, 0x73, 0x76, 0xDE, 0x54, 0xA0, 0xF9, 0x4C, 0xC2, 0x8F, 0x02, 0x88, +- 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0xBE, 0x17, 0x81, 0xA1}, ++ 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0xBE, 0x3E, 0xB2, 0xDF}, + /* User Plane w/AES CTR enc. + SNOW f9 int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x01, 0x0D, 0x4B, 0x5E, 0xD3, 0xCE, 0x96, 0xE1, + 0x9A, 0x9D, 0xB3, 0x01, 0xD6, 0x40, 0x50, 0x00, 0x6C, 0x63, 0xFD, 0x37, +@@ -6338,12 +6338,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, + 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x1C, 0x50, 0xC0, 0xBD}, + /* User Plane w/AES CTR enc. + AES CMAC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, + 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, + 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, + 0x14, 0x73, 0x76, 0xDE, 0x54, 0xA0, 0xF9, 0x4C, 0xC2, 0x8F, 0x02, 0x88, +- 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0xF2, 0xFD, 0xAE, 0x3F}, ++ 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0x42, 0xD1, 0x64, 0xC3}, + /* User Plane w/AES CTR enc. + AES CMAC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x01, 0x0D, 0x4B, 0x5E, 0xD3, 0xCE, 0x96, 0xE1, + 0x9A, 0x9D, 0xB3, 0x01, 0xD6, 0x40, 0x50, 0x00, 0x6C, 0x63, 0xFD, 0x37, +@@ -6352,12 +6352,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, + 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x41, 0x1E, 0xE1, 0x4A}, + /* User Plane w/AES CTR enc. + ZUC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, ++ (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, + 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, + 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, + 0x14, 0x73, 0x76, 0xDE, 0x54, 0xA0, 0xF9, 0x4C, 0xC2, 0x8F, 0x02, 0x88, +- 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0x74, 0xBF, 0xCB, 0x28}, ++ 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0x6A, 0xFE, 0x56, 0xD2}, + /* User Plane w/AES CTR enc. + ZUC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x01, 0x0D, 0x4B, 0x5E, 0xD3, 0xCE, 0x96, 0xE1, + 0x9A, 0x9D, 0xB3, 0x01, 0xD6, 0x40, 0x50, 0x00, 0x6C, 0x63, 0xFD, 0x37, +@@ -6366,7 +6366,7 @@ static uint8_t *pdcp_test_data_out[] = { + 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, + 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x42, 0xFC, 0x7F, 0xA3}, + /* User Plane w/ZUC enc. + NULL int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, + 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, + 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, +@@ -6380,12 +6380,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7D, 0x2D, 0xE0, 0x3C, 0xE3, 0x81, 0xAA, 0xEA, 0xCC, 0xD7, 0xFC, 0x46, + 0x07, 0x7C, 0x8E, 0x8E, 0x0E, 0x99, 0xB8, 0x31, 0x65, 0x17, 0xF6, 0xE3}, + /* User Plane w/ZUC enc. + SNOW f9 int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, + 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, + 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, + 0x51, 0x54, 0x82, 0x69, 0x4C, 0x45, 0x0B, 0xFA, 0x87, 0x4D, 0x97, 0x6E, +- 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0x3C, 0x13, 0x64, 0xB1}, ++ 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0x3C, 0x3A, 0x57, 0xCF}, + /* User Plane w/ZUC enc. + SNOW f9 int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x30, 0x62, 0x48, 0xC0, 0xB1, 0xED, 0x1F, 0x13, + 0x8A, 0x7A, 0x62, 0x40, 0x12, 0x35, 0x54, 0x03, 0x93, 0xBD, 0xE5, 0x88, +@@ -6394,12 +6394,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7D, 0x2D, 0xE0, 0x3C, 0xE3, 0x81, 0xAA, 0xEA, 0xCC, 0xD7, 0xFC, 0x46, + 0x07, 0x7C, 0x8E, 0x8E, 0x0E, 0x99, 0xB8, 0x31, 0xE1, 0x52, 0x5E, 0x6B}, + /* User Plane w/ZUC enc. + AES CMAC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, + 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, + 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, + 0x51, 0x54, 0x82, 0x69, 0x4C, 0x45, 0x0B, 0xFA, 0x87, 0x4D, 0x97, 0x6E, +- 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0x70, 0xF9, 0x4B, 0x2F}, ++ 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0xC0, 0xD5, 0x81, 0xD3}, + /* User Plane w/ZUC enc. + AES CMAC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x30, 0x62, 0x48, 0xC0, 0xB1, 0xED, 0x1F, 0x13, + 0x8A, 0x7A, 0x62, 0x40, 0x12, 0x35, 0x54, 0x03, 0x93, 0xBD, 0xE5, 0x88, +@@ -6408,12 +6408,12 @@ static uint8_t *pdcp_test_data_out[] = { + 0x7D, 0x2D, 0xE0, 0x3C, 0xE3, 0x81, 0xAA, 0xEA, 0xCC, 0xD7, 0xFC, 0x46, + 0x07, 0x7C, 0x8E, 0x8E, 0x0E, 0x99, 0xB8, 0x31, 0xBC, 0x1C, 0x7F, 0x9C}, + /* User Plane w/ZUC enc. + ZUC int. UL for 18-bit SN */ +- (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, ++ (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, + 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, + 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, + 0x51, 0x54, 0x82, 0x69, 0x4C, 0x45, 0x0B, 0xFA, 0x87, 0x4D, 0x97, 0x6E, +- 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0xF6, 0xBB, 0x2E, 0x38}, ++ 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0xE8, 0xFA, 0xB3, 0xC2}, + /* User Plane w/ZUC enc. + ZUC int. DL for 18-bit SN */ + (uint8_t[]){0xF8, 0x00, 0x00, 0x30, 0x62, 0x48, 0xC0, 0xB1, 0xED, 0x1F, 0x13, + 0x8A, 0x7A, 0x62, 0x40, 0x12, 0x35, 0x54, 0x03, 0x93, 0xBD, 0xE5, 0x88, diff --git a/dpdk/app/test/test_cryptodev_snow3g_test_vectors.h b/dpdk/app/test/test_cryptodev_snow3g_test_vectors.h index bbe05662be..b49a07bcf2 100644 --- a/dpdk/app/test/test_cryptodev_snow3g_test_vectors.h @@ -5569,6 +12454,454 @@ index 1b249e0447..c10c48cf37 100644 next_lcore = rte_get_next_lcore(next_lcore, val_false, val_true); } +diff --git a/dpdk/app/test/test_efd_perf.c b/dpdk/app/test/test_efd_perf.c +index f3fe3b1736..1dd71e1008 100644 +--- a/dpdk/app/test/test_efd_perf.c ++++ b/dpdk/app/test/test_efd_perf.c +@@ -143,7 +143,6 @@ setup_keys_and_data(struct efd_perf_params *params, unsigned int cycle) + qsort(keys, KEYS_TO_ADD, MAX_KEYSIZE, key_compare); + + /* Sift through the list of keys and look for duplicates */ +- int num_duplicates = 0; + for (i = 0; i < KEYS_TO_ADD - 1; i++) { + if (memcmp(keys[i], keys[i + 1], params->key_size) == 0) { + /* This key already exists, try again */ +diff --git a/dpdk/app/test/test_event_crypto_adapter.c b/dpdk/app/test/test_event_crypto_adapter.c +index 3d7e9fb93c..ea14094f02 100644 +--- a/dpdk/app/test/test_event_crypto_adapter.c ++++ b/dpdk/app/test/test_event_crypto_adapter.c +@@ -523,11 +523,10 @@ configure_cryptodev(void) + return TEST_FAILED; + } + +- /* Create a NULL crypto device */ +- nb_devs = rte_cryptodev_device_count_by_driver( +- rte_cryptodev_driver_id_get( +- RTE_STR(CRYPTODEV_NAME_NULL_PMD))); ++ ++ nb_devs = rte_cryptodev_count(); + if (!nb_devs) { ++ /* Create a NULL crypto device */ + ret = rte_vdev_init( + RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); + +diff --git a/dpdk/app/test/test_event_timer_adapter.c b/dpdk/app/test/test_event_timer_adapter.c +index 25bac2d155..3ded1c1efa 100644 +--- a/dpdk/app/test/test_event_timer_adapter.c ++++ b/dpdk/app/test/test_event_timer_adapter.c +@@ -46,9 +46,10 @@ static uint64_t global_bkt_tck_ns; + static uint64_t global_info_bkt_tck_ns; + static volatile uint8_t arm_done; + +-#define CALC_TICKS(tks) \ +- ceil((double)(tks * global_bkt_tck_ns) / global_info_bkt_tck_ns) ++#define CALC_TICKS(tks) ceil((double)((tks) * global_bkt_tck_ns) / global_info_bkt_tck_ns) + ++/* Wait double timeout ticks for software and an extra tick for hardware */ ++#define WAIT_TICKS(tks) (using_services ? 2 * (tks) : tks + 1) + + static bool using_services; + static uint32_t test_lcore1; +@@ -419,10 +420,31 @@ timdev_teardown(void) + rte_mempool_free(eventdev_test_mempool); + } + ++static inline uint16_t ++timeout_event_dequeue(struct rte_event *evs, uint64_t nb_evs, uint64_t ticks) ++{ ++ uint16_t ev_cnt = 0; ++ uint64_t end_cycle; ++ ++ if (using_services && nb_evs == MAX_TIMERS) ++ ticks = 2 * ticks; ++ ++ end_cycle = rte_rdtsc() + ticks * global_bkt_tck_ns * rte_get_tsc_hz() / 1E9; ++ ++ while (ev_cnt < nb_evs && rte_rdtsc() < end_cycle) { ++ ev_cnt += rte_event_dequeue_burst(evdev, TEST_PORT_ID, &evs[ev_cnt], nb_evs, 0); ++ rte_pause(); ++ } ++ ++ return ev_cnt; ++} ++ + static inline int + test_timer_state(void) + { + struct rte_event_timer *ev_tim; ++ const uint64_t max_ticks = 100; ++ uint64_t ticks, wait_ticks; + struct rte_event ev; + const struct rte_event_timer tim = { + .ev.op = RTE_EVENT_OP_NEW, +@@ -433,11 +455,10 @@ test_timer_state(void) + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; + +- + rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim); + *ev_tim = tim; + ev_tim->ev.event_ptr = ev_tim; +- ev_tim->timeout_ticks = CALC_TICKS(120); ++ ev_tim->timeout_ticks = CALC_TICKS(max_ticks + 20); + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0, + "Armed timer exceeding max_timeout."); +@@ -445,8 +466,9 @@ test_timer_state(void) + "Improper timer state set expected %d returned %d", + RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state); + ++ ticks = 10; + ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; +- ev_tim->timeout_ticks = CALC_TICKS(10); ++ ev_tim->timeout_ticks = CALC_TICKS(ticks); + + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, + "Failed to arm timer with proper timeout."); +@@ -455,14 +477,15 @@ test_timer_state(void) + RTE_EVENT_TIMER_ARMED, ev_tim->state); + + if (!using_services) +- rte_delay_us(20); ++ wait_ticks = 2 * ticks; + else +- rte_delay_us(1000 + 200); +- TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1, +- "Armed timer failed to trigger."); ++ wait_ticks = ticks; ++ ++ TEST_ASSERT_EQUAL(timeout_event_dequeue(&ev, 1, WAIT_TICKS(wait_ticks)), 1, ++ "Armed timer failed to trigger."); + + ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED; +- ev_tim->timeout_ticks = CALC_TICKS(90); ++ ev_tim->timeout_ticks = CALC_TICKS(max_ticks - 10); + TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1, + "Failed to arm timer with proper timeout."); + TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1), +@@ -867,7 +890,6 @@ _cancel_thread(void *args) + { + RTE_SET_USED(args); + struct rte_event_timer *ev_tim = NULL; +- uint64_t cancel_count = 0; + uint16_t ret; + + while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { +@@ -877,7 +899,6 @@ _cancel_thread(void *args) + ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); + rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); +- cancel_count++; + } + + return TEST_SUCCESS; +@@ -1166,8 +1187,9 @@ stat_inc_reset_ev_enq(void) + int ret, i, n; + int num_evtims = MAX_TIMERS; + struct rte_event_timer *evtims[num_evtims]; +- struct rte_event evs[BATCH_SIZE]; ++ struct rte_event evs[num_evtims]; + struct rte_event_timer_adapter_stats stats; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1175,7 +1197,7 @@ stat_inc_reset_ev_enq(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, +@@ -1200,31 +1222,12 @@ stat_inc_reset_ev_enq(void) + "succeeded = %d, rte_errno = %s", + num_evtims, ret, rte_strerror(rte_errno)); + +- rte_delay_ms(1000); +- +-#define MAX_TRIES num_evtims +- int sum = 0; +- int tries = 0; +- bool done = false; +- while (!done) { +- sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, +- RTE_DIM(evs), 10); +- if (sum >= num_evtims || ++tries >= MAX_TRIES) +- done = true; +- +- rte_delay_ms(10); +- } +- +- TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " +- "got %d", num_evtims, sum); +- +- TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); +- +- rte_delay_ms(100); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); ++ TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d", ++ num_evtims, n); + + /* Make sure the eventdev is still empty */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), +- 10); ++ n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1)); + + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " + "events from event device"); +@@ -1261,6 +1264,7 @@ event_timer_arm(void) + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1268,7 +1272,7 @@ event_timer_arm(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); +@@ -1295,10 +1299,7 @@ event_timer_arm(void) + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after arming already armed timer"); + +- /* Let timer expire */ +- rte_delay_ms(1000); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + +@@ -1318,6 +1319,7 @@ event_timer_arm_double(void) + struct rte_event_timer_adapter *adapter = timdev; + struct rte_event_timer *evtim = NULL; + struct rte_event evs[BATCH_SIZE]; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1325,7 +1327,7 @@ event_timer_arm_double(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); +@@ -1345,10 +1347,7 @@ event_timer_arm_double(void) + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after double-arm"); + +- /* Let timer expire */ +- rte_delay_ms(600); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - " + "expected: 1, actual: %d", n); + +@@ -1375,6 +1374,7 @@ event_timer_arm_expiry(void) + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; ++ uint64_t ticks = 30; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1384,7 +1384,7 @@ event_timer_arm_expiry(void) + + /* Set up an event timer */ + *evtim = init_tim; +- evtim->timeout_ticks = CALC_TICKS(30), // expire in 3 secs ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 secs */ + evtim->ev.event_ptr = evtim; + + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); +@@ -1393,17 +1393,10 @@ event_timer_arm_expiry(void) + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event " + "timer in incorrect state"); + +- rte_delay_ms(2999); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), ticks - 1); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event"); + +- /* Delay 100 ms to account for the adapter tick window - should let us +- * dequeue one event +- */ +- rte_delay_ms(100); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(1)); + TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer " + "expiry events", n); + TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER, +@@ -1435,6 +1428,7 @@ event_timer_arm_rearm(void) + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; ++ uint64_t ticks = 1; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1444,7 +1438,7 @@ event_timer_arm_rearm(void) + + /* Set up a timer */ + *evtim = init_tim; +- evtim->timeout_ticks = CALC_TICKS(1); // expire in 0.1 sec ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 0.1 sec */ + evtim->ev.event_ptr = evtim; + + /* Arm it */ +@@ -1452,10 +1446,7 @@ event_timer_arm_rearm(void) + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + +- /* Add 100ms to account for the adapter tick window */ +- rte_delay_ms(100 + 100); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + +@@ -1472,10 +1463,7 @@ event_timer_arm_rearm(void) + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", + rte_strerror(rte_errno)); + +- /* Add 100ms to account for the adapter tick window */ +- rte_delay_ms(100 + 100); +- +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry " + "events from event device"); + +@@ -1497,7 +1485,8 @@ event_timer_arm_max(void) + int ret, i, n; + int num_evtims = MAX_TIMERS; + struct rte_event_timer *evtims[num_evtims]; +- struct rte_event evs[BATCH_SIZE]; ++ struct rte_event evs[num_evtims]; ++ uint64_t ticks = 5; + const struct rte_event_timer init_tim = { + .ev.op = RTE_EVENT_OP_NEW, + .ev.queue_id = TEST_QUEUE_ID, +@@ -1505,7 +1494,7 @@ event_timer_arm_max(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec ++ .timeout_ticks = CALC_TICKS(ticks), /**< expire in .5 sec */ + }; + + ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims, +@@ -1525,31 +1514,12 @@ event_timer_arm_max(void) + "succeeded = %d, rte_errno = %s", + num_evtims, ret, rte_strerror(rte_errno)); + +- rte_delay_ms(1000); +- +-#define MAX_TRIES num_evtims +- int sum = 0; +- int tries = 0; +- bool done = false; +- while (!done) { +- sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, +- RTE_DIM(evs), 10); +- if (sum >= num_evtims || ++tries >= MAX_TRIES) +- done = true; +- +- rte_delay_ms(10); +- } +- +- TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, " +- "got %d", num_evtims, sum); +- +- TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries"); +- +- rte_delay_ms(100); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); ++ TEST_ASSERT_EQUAL(n, num_evtims, "Expected %d timer expiry events, got %d", ++ num_evtims, n); + + /* Make sure the eventdev is still empty */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), +- 10); ++ n = timeout_event_dequeue(evs, 1, WAIT_TICKS(1)); + + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry " + "events from event device"); +@@ -1669,6 +1639,7 @@ event_timer_cancel(void) + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, + }; ++ uint64_t ticks = 30; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1686,7 +1657,7 @@ event_timer_cancel(void) + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; +- evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */ + + /* Check that cancelling an inited but unarmed timer fails */ + ret = rte_event_timer_cancel_burst(adapter, &evtim, 1); +@@ -1710,10 +1681,8 @@ event_timer_cancel(void) + TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED, + "evtim in incorrect state"); + +- rte_delay_ms(3000); +- + /* Make sure that no expiry event was generated */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); + + rte_mempool_put(eventdev_test_mempool, evtim); +@@ -1736,8 +1705,8 @@ event_timer_cancel_double(void) + .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL, + .ev.event_type = RTE_EVENT_TYPE_TIMER, + .state = RTE_EVENT_TIMER_NOT_ARMED, +- .timeout_ticks = CALC_TICKS(5), // expire in .5 sec + }; ++ uint64_t ticks = 30; + + rte_mempool_get(eventdev_test_mempool, (void **)&evtim); + if (evtim == NULL) { +@@ -1748,7 +1717,7 @@ event_timer_cancel_double(void) + /* Set up a timer */ + *evtim = init_tim; + evtim->ev.event_ptr = evtim; +- evtim->timeout_ticks = CALC_TICKS(30); // expire in 3 sec ++ evtim->timeout_ticks = CALC_TICKS(ticks); /**< expire in 3 sec */ + + ret = rte_event_timer_arm_burst(adapter, &evtim, 1); + TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n", +@@ -1770,10 +1739,8 @@ event_timer_cancel_double(void) + TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value " + "after double-cancel: rte_errno = %d", rte_errno); + +- rte_delay_ms(3000); +- + /* Still make sure that no expiry event was generated */ +- n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0); ++ n = timeout_event_dequeue(evs, RTE_DIM(evs), WAIT_TICKS(ticks)); + TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); + + rte_mempool_put(eventdev_test_mempool, evtim); diff --git a/dpdk/app/test/test_fib_perf.c b/dpdk/app/test/test_fib_perf.c index 86b2f832b8..7a25fe8df7 100644 --- a/dpdk/app/test/test_fib_perf.c @@ -5582,8 +12915,53 @@ index 86b2f832b8..7a25fe8df7 100644 begin = rte_rdtsc(); for (i = 0; i < NUM_ROUTE_ENTRIES; i++) { +diff --git a/dpdk/app/test/test_hash_perf.c b/dpdk/app/test/test_hash_perf.c +index 76cdac5d53..199564bba3 100644 +--- a/dpdk/app/test/test_hash_perf.c ++++ b/dpdk/app/test/test_hash_perf.c +@@ -480,6 +480,11 @@ timed_lookups_multi(unsigned int with_hash, unsigned int with_data, + (const void **)keys_burst, + &signatures[j * BURST_SIZE], + BURST_SIZE, positions_burst); ++ if (ret != 0) { ++ printf("rte_hash_lookup_with_hash_bulk failed with %d\n", ++ ret); ++ return -1; ++ } + for (k = 0; k < BURST_SIZE; k++) { + if (positions_burst[k] != + positions[j * +@@ -492,10 +497,14 @@ timed_lookups_multi(unsigned int with_hash, unsigned int with_data, + } + } + } else { +- rte_hash_lookup_bulk(h[table_index], ++ ret = rte_hash_lookup_bulk(h[table_index], + (const void **) keys_burst, + BURST_SIZE, + positions_burst); ++ if (ret != 0) { ++ printf("rte_hash_lookup_bulk failed with %d\n", ret); ++ return -1; ++ } + for (k = 0; k < BURST_SIZE; k++) { + if (positions_burst[k] != positions[j * BURST_SIZE + k]) { + printf("Key looked up in %d, should be in %d\n", +diff --git a/dpdk/app/test/test_hash_readwrite.c b/dpdk/app/test/test_hash_readwrite.c +index 9b192f2b5e..1ed615466d 100644 +--- a/dpdk/app/test/test_hash_readwrite.c ++++ b/dpdk/app/test/test_hash_readwrite.c +@@ -162,7 +162,7 @@ init_params(int use_ext, int use_htm, int rw_lf, int use_jhash) + + handle = rte_hash_create(&hash_params); + if (handle == NULL) { +- printf("hash creation failed"); ++ printf("hash creation failed\n"); + return -1; + } + diff --git a/dpdk/app/test/test_hash_readwrite_lf_perf.c b/dpdk/app/test/test_hash_readwrite_lf_perf.c -index 8120cf43be..32f9ec9250 100644 +index 8120cf43be..cf86046a2f 100644 --- a/dpdk/app/test/test_hash_readwrite_lf_perf.c +++ b/dpdk/app/test/test_hash_readwrite_lf_perf.c @@ -59,7 +59,7 @@ struct rwc_perf { @@ -5595,11 +12973,32 @@ index 8120cf43be..32f9ec9250 100644 uint32_t w_ks_r_hit_extbkt[2][NUM_TEST]; uint32_t writer_add_del[NUM_TEST]; }; +@@ -1102,7 +1102,6 @@ test_hash_multi_add_lookup(struct rwc_perf *rwc_perf_results, int rwc_lf, + rte_eal_remote_launch(test_rwc_reader, + (void *)(uintptr_t)read_type, + enabled_core_ids[i]); +- write_type = WRITE_KEY_SHIFT; + pos_core = 0; + + /* Launch writers */ diff --git a/dpdk/app/test/test_ipsec.c b/dpdk/app/test/test_ipsec.c -index bc2a3dbc2e..3c6dcdc604 100644 +index bc2a3dbc2e..584c132f37 100644 --- a/dpdk/app/test/test_ipsec.c +++ b/dpdk/app/test/test_ipsec.c -@@ -543,12 +543,14 @@ struct rte_ipv4_hdr ipv4_outer = { +@@ -309,8 +309,10 @@ testsuite_setup(void) + } + } + +- if (ts_params->valid_dev_found == 0) +- return TEST_FAILED; ++ if (ts_params->valid_dev_found == 0) { ++ RTE_LOG(WARNING, USER1, "No compatible crypto device found.\n"); ++ return TEST_SKIPPED; ++ } + + ts_params->mbuf_pool = rte_pktmbuf_pool_create( + "CRYPTO_MBUFPOOL", +@@ -543,12 +545,14 @@ struct rte_ipv4_hdr ipv4_outer = { }; static struct rte_mbuf * @@ -5616,7 +13015,17 @@ index bc2a3dbc2e..3c6dcdc604 100644 if (m) { memset(m->buf_addr, 0, m->buf_len); char *dst = rte_pktmbuf_append(m, t_len); -@@ -1354,7 +1356,8 @@ test_ipsec_crypto_outb_burst_null_null(int i) +@@ -614,7 +618,8 @@ setup_test_string_tunneled(struct rte_mempool *mpool, const char *string, + rte_memcpy(dst, string, len); + dst += len; + /* copy pad bytes */ +- rte_memcpy(dst, esp_pad_bytes, padlen); ++ rte_memcpy(dst, esp_pad_bytes, RTE_MIN(padlen, ++ sizeof(esp_pad_bytes))); + dst += padlen; + /* copy ESP tail header */ + rte_memcpy(dst, &espt, sizeof(espt)); +@@ -1354,7 +1359,8 @@ test_ipsec_crypto_outb_burst_null_null(int i) /* Generate input mbuf data */ for (j = 0; j < num_pkts && rc == 0; j++) { ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool, @@ -5626,7 +13035,7 @@ index bc2a3dbc2e..3c6dcdc604 100644 if (ut_params->ibuf[j] == NULL) rc = TEST_FAILED; else { -@@ -1472,7 +1475,8 @@ test_ipsec_inline_crypto_inb_burst_null_null(int i) +@@ -1472,7 +1478,8 @@ test_ipsec_inline_crypto_inb_burst_null_null(int i) /* Generate test mbuf data */ ut_params->obuf[j] = setup_test_string( ts_params->mbuf_pool, @@ -5636,7 +13045,7 @@ index bc2a3dbc2e..3c6dcdc604 100644 if (ut_params->obuf[j] == NULL) rc = TEST_FAILED; } -@@ -1540,16 +1544,17 @@ test_ipsec_inline_proto_inb_burst_null_null(int i) +@@ -1540,16 +1547,17 @@ test_ipsec_inline_proto_inb_burst_null_null(int i) /* Generate inbound mbuf data */ for (j = 0; j < num_pkts && rc == 0; j++) { @@ -5658,7 +13067,7 @@ index bc2a3dbc2e..3c6dcdc604 100644 if (ut_params->obuf[j] == NULL) rc = TEST_FAILED; } -@@ -1649,7 +1654,8 @@ test_ipsec_inline_crypto_outb_burst_null_null(int i) +@@ -1649,7 +1657,8 @@ test_ipsec_inline_crypto_outb_burst_null_null(int i) /* Generate test mbuf data */ for (j = 0; j < num_pkts && rc == 0; j++) { ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool, @@ -5668,7 +13077,7 @@ index bc2a3dbc2e..3c6dcdc604 100644 if (ut_params->ibuf[0] == NULL) rc = TEST_FAILED; -@@ -1727,15 +1733,17 @@ test_ipsec_inline_proto_outb_burst_null_null(int i) +@@ -1727,15 +1736,17 @@ test_ipsec_inline_proto_outb_burst_null_null(int i) /* Generate test mbuf data */ for (j = 0; j < num_pkts && rc == 0; j++) { ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool, @@ -5689,7 +13098,7 @@ index bc2a3dbc2e..3c6dcdc604 100644 if (ut_params->obuf[j] == NULL) rc = TEST_FAILED; } -@@ -1804,7 +1812,8 @@ test_ipsec_lksd_proto_inb_burst_null_null(int i) +@@ -1804,7 +1815,8 @@ test_ipsec_lksd_proto_inb_burst_null_null(int i) for (j = 0; j < num_pkts && rc == 0; j++) { /* packet with sequence number 0 is invalid */ ut_params->ibuf[j] = setup_test_string(ts_params->mbuf_pool, @@ -5791,9 +13200,18 @@ index a91ea8dc47..b7b97a0dd9 100644 goto fail; } diff --git a/dpdk/app/test/test_link_bonding.c b/dpdk/app/test/test_link_bonding.c -index dc6fc46b9c..12c50ef393 100644 +index dc6fc46b9c..0d5cfd43c4 100644 --- a/dpdk/app/test/test_link_bonding.c +++ b/dpdk/app/test/test_link_bonding.c +@@ -2,7 +2,7 @@ + * Copyright(c) 2010-2014 Intel Corporation + */ + +-#include "unistd.h" ++#include + #include + #include + #include @@ -181,6 +181,10 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr) test_params->nb_tx_q, &default_pmd_conf), "rte_eth_dev_configure for port %d failed", port_id); @@ -5805,8 +13223,32 @@ index dc6fc46b9c..12c50ef393 100644 for (q_id = 0; q_id < test_params->nb_rx_q; q_id++) TEST_ASSERT_SUCCESS(rte_eth_rx_queue_setup(port_id, q_id, RX_RING_SIZE, rte_eth_dev_socket_id(port_id), &rx_conf_default, +@@ -444,7 +448,8 @@ test_add_already_bonded_slave_to_bonded_device(void) + uint16_t slaves[RTE_MAX_ETHPORTS]; + char pmd_name[RTE_ETH_NAME_MAX_LEN]; + +- test_add_slave_to_bonded_device(); ++ TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(), ++ "Failed to add slave to bonded device"); + + current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, + slaves, RTE_MAX_ETHPORTS); +diff --git a/dpdk/app/test/test_link_bonding_mode4.c b/dpdk/app/test/test_link_bonding_mode4.c +index 351129de2f..6eb2620ffd 100644 +--- a/dpdk/app/test/test_link_bonding_mode4.c ++++ b/dpdk/app/test/test_link_bonding_mode4.c +@@ -643,8 +643,7 @@ bond_handshake(void) + /* If response didn't send - report failure */ + TEST_ASSERT_EQUAL(all_slaves_done, 1, "Bond handshake failed\n"); + +- /* If flags doesn't match - report failure */ +- return all_slaves_done == 1 ? TEST_SUCCESS : TEST_FAILED; ++ return TEST_SUCCESS; + } + + #define TEST_LACP_SLAVE_COUT RTE_DIM(test_params.slave_ports) diff --git a/dpdk/app/test/test_link_bonding_rssconf.c b/dpdk/app/test/test_link_bonding_rssconf.c -index f9eae93973..b3d71c6f3a 100644 +index f9eae93973..6eb473bb21 100644 --- a/dpdk/app/test/test_link_bonding_rssconf.c +++ b/dpdk/app/test/test_link_bonding_rssconf.c @@ -128,6 +128,10 @@ configure_ethdev(uint16_t port_id, struct rte_eth_conf *eth_conf, @@ -5820,6 +13262,15 @@ index f9eae93973..b3d71c6f3a 100644 for (rxq = 0; rxq < RXTX_QUEUE_COUNT; rxq++) { TEST_ASSERT(rte_eth_rx_queue_setup(port_id, rxq, RXTX_RING_SIZE, rte_eth_dev_socket_id(port_id), NULL, +@@ -322,7 +326,7 @@ test_propagate(void) + uint8_t n; + struct slave_conf *port; + uint8_t bond_rss_key[40]; +- struct rte_eth_rss_conf bond_rss_conf; ++ struct rte_eth_rss_conf bond_rss_conf = {0}; + + int retval = 0; + uint64_t rss_hf = 0; @@ -464,15 +468,85 @@ test_rss(void) TEST_ASSERT_SUCCESS(test_propagate(), "Propagation test failed"); @@ -5932,11 +13383,157 @@ index c3894f730e..da9b161f20 100644 * next_hop much simple. */ +diff --git a/dpdk/app/test/test_malloc.c b/dpdk/app/test/test_malloc.c +index 6d9249f831..9008038bfa 100644 +--- a/dpdk/app/test/test_malloc.c ++++ b/dpdk/app/test/test_malloc.c +@@ -301,11 +301,11 @@ test_multi_alloc_statistics(void) + rte_malloc_get_socket_stats(socket,&post_stats); + /* Check statistics reported are correct */ + /* All post stats should be equal to pre stats after alloc freed */ +- if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) && +- (post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) && +- (post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&& +- (post_stats.alloc_count!=pre_stats.alloc_count)&& +- (post_stats.free_count!=pre_stats.free_count)) { ++ if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) || ++ (post_stats.heap_freesz_bytes != pre_stats.heap_freesz_bytes) || ++ (post_stats.heap_allocsz_bytes != pre_stats.heap_allocsz_bytes) || ++ (post_stats.alloc_count != pre_stats.alloc_count) || ++ (post_stats.free_count != pre_stats.free_count)) { + printf("Malloc statistics are incorrect - freed alloc\n"); + return -1; + } +@@ -362,11 +362,11 @@ test_multi_alloc_statistics(void) + return -1; + } + +- if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) && +- (post_stats.heap_freesz_bytes!=pre_stats.heap_freesz_bytes) && +- (post_stats.heap_allocsz_bytes!=pre_stats.heap_allocsz_bytes)&& +- (post_stats.alloc_count!=pre_stats.alloc_count)&& +- (post_stats.free_count!=pre_stats.free_count)) { ++ if ((post_stats.heap_totalsz_bytes != pre_stats.heap_totalsz_bytes) || ++ (post_stats.heap_freesz_bytes != pre_stats.heap_freesz_bytes) || ++ (post_stats.heap_allocsz_bytes != pre_stats.heap_allocsz_bytes) || ++ (post_stats.alloc_count != pre_stats.alloc_count) || ++ (post_stats.free_count != pre_stats.free_count)) { + printf("Malloc statistics are incorrect - freed alloc\n"); + return -1; + } +@@ -927,6 +927,7 @@ test_alloc_single_socket(int32_t socket) + if (mem == NULL) + return -1; + if (addr_to_socket(mem) != desired_socket) { ++ rte_free(mem); + return -1; + } + rte_free(mem); diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c -index f54d1d7c00..2a037a12da 100644 +index f54d1d7c00..9c0ac63f92 100644 --- a/dpdk/app/test/test_mbuf.c +++ b/dpdk/app/test/test_mbuf.c -@@ -2031,8 +2031,6 @@ test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool) +@@ -1172,37 +1172,16 @@ test_refcnt_mbuf(void) + #endif + } + +-#include +-#include +-#include +-#include +- +-/* use fork() to test mbuf errors panic */ +-static int +-verify_mbuf_check_panics(struct rte_mbuf *buf) ++/* Verify if mbuf can pass the check */ ++static bool ++mbuf_check_pass(struct rte_mbuf *buf) + { +- int pid; +- int status; +- +- pid = fork(); ++ const char *reason; + +- if (pid == 0) { +- struct rlimit rl; ++ if (rte_mbuf_check(buf, 1, &reason) == 0) ++ return true; + +- /* No need to generate a coredump when panicking. */ +- rl.rlim_cur = rl.rlim_max = 0; +- setrlimit(RLIMIT_CORE, &rl); +- rte_mbuf_sanity_check(buf, 1); /* should panic */ +- exit(0); /* return normally if it doesn't panic */ +- } else if (pid < 0) { +- printf("Fork Failed\n"); +- return -1; +- } +- wait(&status); +- if(status == 0) +- return -1; +- +- return 0; ++ return false; + } + + static int +@@ -1219,47 +1198,47 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) + return -1; + + printf("Checking good mbuf initially\n"); +- if (verify_mbuf_check_panics(buf) != -1) ++ if (!mbuf_check_pass(buf)) + return -1; + + printf("Now checking for error conditions\n"); + +- if (verify_mbuf_check_panics(NULL)) { ++ if (mbuf_check_pass(NULL)) { + printf("Error with NULL mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.pool = NULL; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-pool mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.buf_iova = 0; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-physaddr mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.buf_addr = NULL; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-addr mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.refcnt = 0; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-refcnt(0) mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.refcnt = UINT16_MAX; +- if (verify_mbuf_check_panics(&badbuf)) { ++ if (mbuf_check_pass(&badbuf)) { + printf("Error with bad-refcnt(MAX) mbuf test\n"); + return -1; + } +@@ -2031,8 +2010,6 @@ test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool) NULL); if (data_copy == NULL) GOTO_FAIL("%s: Error in reading packet data!\n", __func__); @@ -5945,7 +13542,7 @@ index f54d1d7c00..2a037a12da 100644 for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) { if (data_copy[off] != (char)0xcc) GOTO_FAIL("Data corrupted at offset %u", off); -@@ -2054,8 +2052,6 @@ test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool) +@@ -2054,8 +2031,6 @@ test_pktmbuf_read_from_offset(struct rte_mempool *pktmbuf_pool) data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL); if (data_copy == NULL) GOTO_FAIL("%s: Error in reading packet data!\n", __func__); @@ -5954,8 +13551,24 @@ index f54d1d7c00..2a037a12da 100644 for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) { if (data_copy[off] != (char)0xcc) GOTO_FAIL("Data corrupted at offset %u", off); +@@ -2749,6 +2724,7 @@ test_nb_segs_and_next_reset(void) + + /* split m0 chain in two, between m1 and m2 */ + m0->nb_segs = 2; ++ m0->pkt_len -= m2->data_len; + m1->next = NULL; + m2->nb_segs = 1; + +@@ -2769,6 +2745,7 @@ test_nb_segs_and_next_reset(void) + m2->nb_segs != 1 || m2->next != NULL) + GOTO_FAIL("nb_segs or next was not reset properly"); + ++ rte_mempool_free(pool); + return 0; + + fail: diff --git a/dpdk/app/test/test_member.c b/dpdk/app/test/test_member.c -index 40aa4c8627..af9d50915c 100644 +index 40aa4c8627..bb9b0228df 100644 --- a/dpdk/app/test/test_member.c +++ b/dpdk/app/test/test_member.c @@ -459,7 +459,7 @@ static int test_member_multimatch(void) @@ -5967,6 +13580,26 @@ index 40aa4c8627..af9d50915c 100644 */ TEST_ASSERT(ret_ht == M_MATCH_CNT && ret_vbf == M_MATCH_CNT && ret_cache == 1, +@@ -545,7 +545,6 @@ setup_keys_and_data(void) + qsort(generated_keys, MAX_ENTRIES, KEY_SIZE, key_compare); + + /* Sift through the list of keys and look for duplicates */ +- int num_duplicates = 0; + for (i = 0; i < MAX_ENTRIES - 1; i++) { + if (memcmp(generated_keys[i], generated_keys[i + 1], + KEY_SIZE) == 0) { +diff --git a/dpdk/app/test/test_member_perf.c b/dpdk/app/test/test_member_perf.c +index e2840f12d3..a312401992 100644 +--- a/dpdk/app/test/test_member_perf.c ++++ b/dpdk/app/test/test_member_perf.c +@@ -150,7 +150,6 @@ setup_keys_and_data(struct member_perf_params *params, unsigned int cycle, + qsort(keys, KEYS_TO_ADD, MAX_KEYSIZE, key_compare); + + /* Sift through the list of keys and look for duplicates */ +- int num_duplicates = 0; + for (i = 0; i < KEYS_TO_ADD - 1; i++) { + if (memcmp(keys[i], keys[i + 1], + params->key_size) == 0) { diff --git a/dpdk/app/test/test_memory.c b/dpdk/app/test/test_memory.c index dbf6871e71..440e5ef838 100644 --- a/dpdk/app/test/test_memory.c @@ -6059,18 +13692,74 @@ index e736019ae4..11222133d0 100644 */ err = rte_metrics_update_value(9, KEY, VALUE); diff --git a/dpdk/app/test/test_pcapng.c b/dpdk/app/test/test_pcapng.c -index c2dbeaf603..34c5e12346 100644 +index c2dbeaf603..5fb1750750 100644 --- a/dpdk/app/test/test_pcapng.c +++ b/dpdk/app/test/test_pcapng.c -@@ -109,7 +109,7 @@ test_setup(void) +@@ -109,8 +109,8 @@ test_setup(void) return -1; } - /* Make a pool for cloned packeets */ +- mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", NUM_PACKETS, + /* Make a pool for cloned packets */ - mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", NUM_PACKETS, ++ mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", IOV_MAX + NUM_PACKETS, 0, 0, rte_pcapng_mbuf_size(pkt_len), + SOCKET_ID_ANY, "ring_mp_sc"); +@@ -237,6 +237,45 @@ test_validate(void) + return ret; + } + ++static int ++test_write_over_limit_iov_max(void) ++{ ++ struct rte_mbuf *orig; ++ struct rte_mbuf *clones[IOV_MAX + NUM_PACKETS] = { }; ++ struct dummy_mbuf mbfs; ++ unsigned int i; ++ ssize_t len; ++ ++ /* make a dummy packet */ ++ mbuf1_prepare(&mbfs, pkt_len); ++ ++ /* clone them */ ++ orig = &mbfs.mb[0]; ++ for (i = 0; i < IOV_MAX + NUM_PACKETS; i++) { ++ struct rte_mbuf *mc; ++ ++ mc = rte_pcapng_copy(port_id, 0, orig, mp, pkt_len, ++ rte_get_tsc_cycles(), 0); ++ if (mc == NULL) { ++ fprintf(stderr, "Cannot copy packet\n"); ++ return -1; ++ } ++ clones[i] = mc; ++ } ++ ++ /* write it to capture file */ ++ len = rte_pcapng_write_packets(pcapng, clones, IOV_MAX + NUM_PACKETS); ++ ++ rte_pktmbuf_free_bulk(clones, IOV_MAX + NUM_PACKETS); ++ ++ if (len <= 0) { ++ fprintf(stderr, "Write of packets failed\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ + static void + test_cleanup(void) + { +@@ -257,6 +296,7 @@ unit_test_suite test_pcapng_suite = { + TEST_CASE(test_write_packets), + TEST_CASE(test_write_stats), + TEST_CASE(test_validate), ++ TEST_CASE(test_write_over_limit_iov_max), + TEST_CASES_END() + } + }; diff --git a/dpdk/app/test/test_pmd_perf.c b/dpdk/app/test/test_pmd_perf.c index 0aa9dc1b1c..4094057b27 100644 --- a/dpdk/app/test/test_pmd_perf.c @@ -6179,6 +13868,26 @@ index 05936cfee8..33a9f4ebb7 100644 if (rte_red_config_init(&config, 0, 2, 1, 0) == 0) { printf("%i: rte_red_config_init should have failed!\n", __LINE__); return -1; +diff --git a/dpdk/app/test/test_reorder.c b/dpdk/app/test/test_reorder.c +index 1c4226da65..69b63bceba 100644 +--- a/dpdk/app/test/test_reorder.c ++++ b/dpdk/app/test/test_reorder.c +@@ -270,6 +270,7 @@ test_reorder_drain(void) + } + if (robufs[0] != NULL) + rte_pktmbuf_free(robufs[0]); ++ memset(robufs, 0, sizeof(robufs)); + + /* Insert more packets + * RB[] = {NULL, NULL, NULL, NULL} +@@ -306,6 +307,7 @@ test_reorder_drain(void) + if (robufs[i] != NULL) + rte_pktmbuf_free(robufs[i]); + } ++ memset(robufs, 0, sizeof(robufs)); + + /* + * RB[] = {NULL, NULL, NULL, NULL} diff --git a/dpdk/app/test/test_security.c b/dpdk/app/test/test_security.c index 060cf1ffa8..059731b65d 100644 --- a/dpdk/app/test/test_security.c @@ -6257,6 +13966,137 @@ index a62530673f..62ba4a9528 100644 */ for (i = 0; i < RTE_DIM(rng_arr); i++) { for (k = 0; k < 100; k++) { +diff --git a/dpdk/app/test/test_trace.c b/dpdk/app/test/test_trace.c +index 0f9df83c40..7f07de1aa6 100644 +--- a/dpdk/app/test/test_trace.c ++++ b/dpdk/app/test/test_trace.c +@@ -9,6 +9,8 @@ + #include "test.h" + #include "test_trace.h" + ++int app_dpdk_test_tp_count; ++ + static int32_t + test_trace_point_globbing(void) + { +@@ -70,8 +72,15 @@ test_trace_point_regex(void) + static int32_t + test_trace_point_disable_enable(void) + { ++ int expected; + int rc; + ++ /* At tp registration, the associated counter increases once. */ ++ expected = 1; ++ TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected, ++ "Expecting %d, but got %d for app_dpdk_test_tp_count", ++ expected, app_dpdk_test_tp_count); ++ + rc = rte_trace_point_disable(&__app_dpdk_test_tp); + if (rc < 0) + goto failed; +@@ -79,6 +88,12 @@ test_trace_point_disable_enable(void) + if (rte_trace_point_is_enabled(&__app_dpdk_test_tp)) + goto failed; + ++ /* No emission expected */ ++ app_dpdk_test_tp("app.dpdk.test.tp"); ++ TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected, ++ "Expecting %d, but got %d for app_dpdk_test_tp_count", ++ expected, app_dpdk_test_tp_count); ++ + rc = rte_trace_point_enable(&__app_dpdk_test_tp); + if (rc < 0) + goto failed; +@@ -88,6 +103,11 @@ test_trace_point_disable_enable(void) + + /* Emit the trace */ + app_dpdk_test_tp("app.dpdk.test.tp"); ++ expected++; ++ TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected, ++ "Expecting %d, but got %d for app_dpdk_test_tp_count", ++ expected, app_dpdk_test_tp_count); ++ + return TEST_SUCCESS; + + failed: +@@ -101,9 +121,6 @@ test_trace_mode(void) + + current = rte_trace_mode_get(); + +- if (!rte_trace_is_enabled()) +- return TEST_SKIPPED; +- + rte_trace_mode_set(RTE_TRACE_MODE_DISCARD); + if (rte_trace_mode_get() != RTE_TRACE_MODE_DISCARD) + goto failed; +@@ -172,6 +189,19 @@ test_generic_trace_points(void) + return TEST_SUCCESS; + } + ++static int ++test_trace_dump(void) ++{ ++ rte_trace_dump(stdout); ++ return 0; ++} ++ ++static int ++test_trace_metadata_dump(void) ++{ ++ return rte_trace_metadata_dump(stdout); ++} ++ + static struct unit_test_suite trace_tests = { + .suite_name = "trace autotest", + .setup = NULL, +@@ -184,6 +214,8 @@ static struct unit_test_suite trace_tests = { + TEST_CASE(test_trace_point_globbing), + TEST_CASE(test_trace_point_regex), + TEST_CASE(test_trace_points_lookup), ++ TEST_CASE(test_trace_dump), ++ TEST_CASE(test_trace_metadata_dump), + TEST_CASES_END() + } + }; +@@ -195,20 +227,3 @@ test_trace(void) + } + + REGISTER_TEST_COMMAND(trace_autotest, test_trace); +- +-static int +-test_trace_dump(void) +-{ +- rte_trace_dump(stdout); +- return 0; +-} +- +-REGISTER_TEST_COMMAND(trace_dump, test_trace_dump); +- +-static int +-test_trace_metadata_dump(void) +-{ +- return rte_trace_metadata_dump(stdout); +-} +- +-REGISTER_TEST_COMMAND(trace_metadata_dump, test_trace_metadata_dump); +diff --git a/dpdk/app/test/test_trace.h b/dpdk/app/test/test_trace.h +index 413842f60d..4ad44e2bea 100644 +--- a/dpdk/app/test/test_trace.h ++++ b/dpdk/app/test/test_trace.h +@@ -3,10 +3,12 @@ + */ + #include + ++extern int app_dpdk_test_tp_count; + RTE_TRACE_POINT( + app_dpdk_test_tp, + RTE_TRACE_POINT_ARGS(const char *str), + rte_trace_point_emit_string(str); ++ app_dpdk_test_tp_count++; + ) + + RTE_TRACE_POINT_FP( diff --git a/dpdk/buildtools/binutils-avx512-check.py b/dpdk/buildtools/binutils-avx512-check.py index a4e14f3593..a0847a23d6 100644 --- a/dpdk/buildtools/binutils-avx512-check.py @@ -6299,6 +14139,24 @@ index 26b199220a..39a60d09fa 100755 sphinx_cmd += ['-j', 'auto'] # find all the files sphinx will process so we can write them as dependencies +diff --git a/dpdk/buildtools/get-numa-count.py b/dpdk/buildtools/get-numa-count.py +index 1b7787787f..2f243886cd 100644 +--- a/dpdk/buildtools/get-numa-count.py ++++ b/dpdk/buildtools/get-numa-count.py +@@ -6,11 +6,12 @@ + import glob + import os + import subprocess ++import re + + if os.name == 'posix': + if os.path.isdir('/sys/devices/system/node'): + numa_nodes = glob.glob('/sys/devices/system/node/node*') +- numa_nodes.sort() ++ numa_nodes.sort(key=lambda l: int(re.findall('\d+', l)[0])) + print(int(os.path.basename(numa_nodes[-1])[4:]) + 1) + else: + subprocess.run(['sysctl', '-n', 'vm.ndomains'], check=False) diff --git a/dpdk/buildtools/meson.build b/dpdk/buildtools/meson.build index 22ea0ba375..e1c600e40f 100644 --- a/dpdk/buildtools/meson.build @@ -6346,6 +14204,64 @@ index 301418949b..5043b82651 100644 ar = 'aarch64-linux-gnu-ar' as = 'aarch64-linux-gnu-as' strip = 'aarch64-linux-gnu-strip' +diff --git a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu +new file mode 100644 +index 0000000000..db488d75f4 +--- /dev/null ++++ b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu +@@ -0,0 +1,19 @@ ++[binaries] ++c = 'clang' ++cpp = 'clang++' ++ar = 'llvm-ar' ++strip = 'llvm-strip' ++llvm-config = 'llvm-config' ++pcap-config = 'llvm-config' ++pkgconfig = 'aarch64-linux-gnu-pkg-config' ++ ++[host_machine] ++system = 'linux' ++cpu_family = 'aarch64' ++cpu = 'armv8-a' ++endian = 'little' ++ ++[properties] ++platform = 'generic' ++c_args = ['-target', 'aarch64-linux-gnu', '--sysroot', '/usr/aarch64-linux-gnu'] ++c_link_args = ['-target', 'aarch64-linux-gnu', '-fuse-ld=lld', '--gcc-toolchain=/usr'] +diff --git a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu1804 b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu1804 +deleted file mode 100644 +index db488d75f4..0000000000 +--- a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu1804 ++++ /dev/null +@@ -1,19 +0,0 @@ +-[binaries] +-c = 'clang' +-cpp = 'clang++' +-ar = 'llvm-ar' +-strip = 'llvm-strip' +-llvm-config = 'llvm-config' +-pcap-config = 'llvm-config' +-pkgconfig = 'aarch64-linux-gnu-pkg-config' +- +-[host_machine] +-system = 'linux' +-cpu_family = 'aarch64' +-cpu = 'armv8-a' +-endian = 'little' +- +-[properties] +-platform = 'generic' +-c_args = ['-target', 'aarch64-linux-gnu', '--sysroot', '/usr/aarch64-linux-gnu'] +-c_link_args = ['-target', 'aarch64-linux-gnu', '-fuse-ld=lld', '--gcc-toolchain=/usr'] +diff --git a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu1804 b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu1804 +new file mode 120000 +index 0000000000..8e2e3fa9c0 +--- /dev/null ++++ b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu1804 +@@ -0,0 +1 @@ ++arm64_armv8_linux_clang_ubuntu +\ No newline at end of file diff --git a/dpdk/config/arm/arm64_armv8_linux_gcc b/dpdk/config/arm/arm64_armv8_linux_gcc index 5391d35389..5c32f6b9ca 100644 --- a/dpdk/config/arm/arm64_armv8_linux_gcc @@ -6527,10 +14443,18 @@ index 372097ba01..131f56465a 100644 strip = 'aarch64-linux-gnu-strip' pkgconfig = 'aarch64-linux-gnu-pkg-config' diff --git a/dpdk/config/arm/meson.build b/dpdk/config/arm/meson.build -index 213324d262..89a3bf4213 100644 +index 213324d262..4131d6e227 100644 --- a/dpdk/config/arm/meson.build +++ b/dpdk/config/arm/meson.build -@@ -49,6 +49,7 @@ implementer_generic = { +@@ -44,11 +44,14 @@ implementer_generic = { + }, + 'generic_aarch32': { + 'march': 'armv8-a', +- 'compiler_options': ['-mfpu=neon'], ++ 'force_march': true, ++ 'march_features': ['simd'], ++ 'compiler_options': ['-mfpu=auto'], + 'flags': [ ['RTE_ARCH_ARM_NEON_MEMCPY', false], ['RTE_ARCH_STRICT_ALIGN', true], ['RTE_ARCH_ARMv8_AARCH32', true], @@ -6538,7 +14462,7 @@ index 213324d262..89a3bf4213 100644 ['RTE_CACHE_LINE_SIZE', 64] ] } -@@ -276,7 +277,8 @@ soc_cn10k = { +@@ -276,7 +279,8 @@ soc_cn10k = { 'implementer' : '0x41', 'flags': [ ['RTE_MAX_LCORE', 24], @@ -6548,7 +14472,7 @@ index 213324d262..89a3bf4213 100644 ], 'part_number': '0xd49', 'extra_march_features': ['crypto'], -@@ -432,11 +434,15 @@ if dpdk_conf.get('RTE_ARCH_32') +@@ -432,11 +436,15 @@ if dpdk_conf.get('RTE_ARCH_32') else # armv7 build dpdk_conf.set('RTE_ARCH_ARMv7', true) @@ -6564,7 +14488,7 @@ index 213324d262..89a3bf4213 100644 update_flags = true soc_config = {} if not meson.is_cross_build() -@@ -460,7 +466,7 @@ else +@@ -460,7 +468,7 @@ else # 'Primary Part number', 'Revision'] detect_vendor = find_program(join_paths(meson.current_source_dir(), 'armv8_machine.py')) @@ -6573,8 +14497,58 @@ index 213324d262..89a3bf4213 100644 if cmd.returncode() == 0 cmd_output = cmd.stdout().to_lower().strip().split(' ') implementer_id = cmd_output[0] +@@ -529,21 +537,25 @@ if update_flags + # probe supported archs and their features + candidate_march = '' + if part_number_config.has_key('march') +- supported_marchs = ['armv8.6-a', 'armv8.5-a', 'armv8.4-a', 'armv8.3-a', +- 'armv8.2-a', 'armv8.1-a', 'armv8-a'] +- check_compiler_support = false +- foreach supported_march: supported_marchs +- if supported_march == part_number_config['march'] +- # start checking from this version downwards +- check_compiler_support = true +- endif +- if (check_compiler_support and +- cc.has_argument('-march=' + supported_march)) +- candidate_march = supported_march +- # highest supported march version found +- break +- endif +- endforeach ++ if part_number_config.get('force_march', false) ++ candidate_march = part_number_config['march'] ++ else ++ supported_marchs = ['armv8.6-a', 'armv8.5-a', 'armv8.4-a', 'armv8.3-a', ++ 'armv8.2-a', 'armv8.1-a', 'armv8-a'] ++ check_compiler_support = false ++ foreach supported_march: supported_marchs ++ if supported_march == part_number_config['march'] ++ # start checking from this version downwards ++ check_compiler_support = true ++ endif ++ if (check_compiler_support and ++ cc.has_argument('-march=' + supported_march)) ++ candidate_march = supported_march ++ # highest supported march version found ++ break ++ endif ++ endforeach ++ endif + if candidate_march == '' + error('No suitable armv8 march version found.') + endif +@@ -575,7 +587,7 @@ if update_flags + # apply supported compiler options + if part_number_config.has_key('compiler_options') + foreach flag: part_number_config['compiler_options'] +- if cc.has_argument(flag) ++ if cc.has_multi_arguments(machine_args + [flag]) + machine_args += flag + else + warning('Configuration compiler option ' + diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build -index 805d5d51d0..ee12318d4f 100644 +index 805d5d51d0..a79a3ed39c 100644 --- a/dpdk/config/meson.build +++ b/dpdk/config/meson.build @@ -22,7 +22,8 @@ is_ms_linker = is_windows and (cc.get_id() == 'clang') @@ -6587,7 +14561,40 @@ index 805d5d51d0..ee12318d4f 100644 # Libraries have the abi_version as the filename extension # and have the soname be all but the final part of the abi_version. -@@ -334,7 +335,7 @@ if max_lcores == 'detect' +@@ -136,7 +137,7 @@ endif + + toolchain = cc.get_id() + dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain) +-dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1) ++dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper().underscorify(), 1) + + dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8) + dpdk_conf.set('RTE_ARCH_32', cc.sizeof('void *') == 4) +@@ -188,7 +189,7 @@ if find_libnuma + endif + + has_libfdt = 0 +-fdt_dep = cc.find_library('libfdt', required: false) ++fdt_dep = cc.find_library('fdt', required: false) + if fdt_dep.found() and cc.has_header('fdt.h') + dpdk_conf.set10('RTE_HAS_LIBFDT', true) + has_libfdt = 1 +@@ -196,11 +197,12 @@ if fdt_dep.found() and cc.has_header('fdt.h') + dpdk_extra_ldflags += '-lfdt' + endif + +-libexecinfo = cc.find_library('libexecinfo', required: false) +-if libexecinfo.found() and cc.has_header('execinfo.h') ++libexecinfo = cc.find_library('execinfo', required: false) ++if libexecinfo.found() + add_project_link_arguments('-lexecinfo', language: 'c') + dpdk_extra_ldflags += '-lexecinfo' + endif ++dpdk_conf.set('RTE_BACKTRACE', cc.has_header('execinfo.h') or is_windows) + + libarchive = dependency('libarchive', required: false, method: 'pkg-config') + if libarchive.found() +@@ -334,7 +336,7 @@ if max_lcores == 'detect' error('Discovery of max_lcores is not supported for cross-compilation.') endif # overwrite the default value with discovered values @@ -6608,6 +14615,60 @@ index 51f7ceebf3..784c33df9e 100644 ar = 'powerpc64le-linux-gcc-ar' strip = 'powerpc64le-linux-strip' +diff --git a/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu b/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu +new file mode 100644 +index 0000000000..803c612cbc +--- /dev/null ++++ b/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu +@@ -0,0 +1,11 @@ ++[binaries] ++c = 'powerpc64le-linux-gnu-gcc' ++cpp = 'powerpc64le-linux-gnu-cpp' ++ar = 'powerpc64le-linux-gnu-ar' ++strip = 'powerpc64le-linux-gnu-strip' ++ ++[host_machine] ++system = 'linux' ++cpu_family = 'ppc64' ++cpu = 'power8' ++endian = 'little' +diff --git a/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu1804 b/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu1804 +deleted file mode 100644 +index 803c612cbc..0000000000 +--- a/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu1804 ++++ /dev/null +@@ -1,11 +0,0 @@ +-[binaries] +-c = 'powerpc64le-linux-gnu-gcc' +-cpp = 'powerpc64le-linux-gnu-cpp' +-ar = 'powerpc64le-linux-gnu-ar' +-strip = 'powerpc64le-linux-gnu-strip' +- +-[host_machine] +-system = 'linux' +-cpu_family = 'ppc64' +-cpu = 'power8' +-endian = 'little' +diff --git a/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu1804 b/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu1804 +new file mode 120000 +index 0000000000..de90beb3c3 +--- /dev/null ++++ b/dpdk/config/ppc/ppc64le-power8-linux-gcc-ubuntu1804 +@@ -0,0 +1 @@ ++ppc64le-power8-linux-gcc-ubuntu +\ No newline at end of file +diff --git a/dpdk/config/rte_config.h b/dpdk/config/rte_config.h +index cab4390a97..2f1a3ffb21 100644 +--- a/dpdk/config/rte_config.h ++++ b/dpdk/config/rte_config.h +@@ -37,7 +37,6 @@ + #define RTE_MAX_MEMZONE 2560 + #define RTE_MAX_TAILQ 32 + #define RTE_LOG_DP_LEVEL RTE_LOG_INFO +-#define RTE_BACKTRACE 1 + #define RTE_MAX_VFIO_CONTAINERS 64 + + /* bsd module defines */ diff --git a/dpdk/config/x86/meson.build b/dpdk/config/x86/meson.build index e25ed316f4..54345c4da3 100644 --- a/dpdk/config/x86/meson.build @@ -6650,6 +14711,32 @@ index 61ba707c9b..026844141c 100755 # comment start if (index($0,comment_start) > 0) { in_comment = 1 +diff --git a/dpdk/devtools/check-git-log.sh b/dpdk/devtools/check-git-log.sh +index 23c6a7d9bb..cfdc7f63a6 100755 +--- a/dpdk/devtools/check-git-log.sh ++++ b/dpdk/devtools/check-git-log.sh +@@ -119,7 +119,7 @@ words="$selfdir/words-case.txt" + for word in $(cat $words); do + bad=$(echo "$headlines" | grep -iw $word | grep -vw $word) + if [ "$word" = "Tx" ]; then +- bad=$(echo $bad | grep -v 'OCTEON\ TX') ++ bad=$(echo $bad | grep -v 'OCTEON TX') + fi + for bad_line in $bad; do + bad_word=$(echo $bad_line | cut -d":" -f2 | grep -iwo $word) +diff --git a/dpdk/devtools/check-spdx-tag.sh b/dpdk/devtools/check-spdx-tag.sh +index a606ef85f0..76ab7e2a41 100755 +--- a/dpdk/devtools/check-spdx-tag.sh ++++ b/dpdk/devtools/check-spdx-tag.sh +@@ -20,7 +20,7 @@ check_spdx() { + echo "--------------------------" + fi + git grep -L SPDX-License-Identifier -- \ +- ':^.git*' ':^.ci/*' ':^.travis.yml' \ ++ ':^.git*' ':^.mailmap' ':^.ci/*' ':^.travis.yml' \ + ':^README' ':^MAINTAINERS' ':^VERSION' ':^ABI_VERSION' \ + ':^*/Kbuild' ':^*/README' \ + ':^license/' ':^config/' ':^buildtools/' \ diff --git a/dpdk/devtools/check-symbol-change.sh b/dpdk/devtools/check-symbol-change.sh index 8fcd0ce1a1..8992214ac8 100755 --- a/dpdk/devtools/check-symbol-change.sh @@ -6697,6 +14784,156 @@ index 5bd290ac97..32e1fa5c8f 100755 +fi + exit $ret +diff --git a/dpdk/devtools/checkpatches.sh b/dpdk/devtools/checkpatches.sh +index 34a2e43845..1d15496fad 100755 +--- a/dpdk/devtools/checkpatches.sh ++++ b/dpdk/devtools/checkpatches.sh +@@ -231,6 +231,28 @@ check_release_notes() { # + grep -v $current_rel_notes + } + ++check_names() { # ++ res=0 ++ ++ old_IFS=$IFS ++ IFS=' ++' ++ for contributor in $(sed -rn '1,/^--- / {s/.*: (.*<.*@.*>)/\1/p}' $1); do ++ ! grep -qE "^$contributor($| <)" .mailmap || continue ++ name=${contributor%% <*} ++ if grep -q "^$name <" .mailmap; then ++ reason="$name mail differs from primary mail" ++ else ++ reason="$contributor is unknown" ++ fi ++ echo "$reason, please fix the commit message or update .mailmap." ++ res=1 ++ done ++ IFS=$old_IFS ++ ++ return $res ++} ++ + number=0 + range='origin/main..' + quiet=false +@@ -262,12 +284,12 @@ print_headline() { # + total=0 + status=0 + +-check () { # <patch> <commit> <title> ++check () { # <patch-file> <commit> + local ret=0 ++ local subject='' + headline_printed=false + + total=$(($total + 1)) +- ! $verbose || print_headline "$3" + if [ -n "$1" ] ; then + tmpinput=$1 + else +@@ -282,10 +304,14 @@ check () { # <patch> <commit> <title> + fi + fi + ++ # Subject can be on 2 lines ++ subject=$(sed '/^Subject: */!d;s///;N;s,\n[[:space:]]\+, ,;s,\n.*,,;q' "$tmpinput") ++ ! $verbose || print_headline "$subject" ++ + ! $verbose || printf 'Running checkpatch.pl:\n' + report=$($DPDK_CHECKPATCH_PATH $options "$tmpinput" 2>/dev/null) + if [ $? -ne 0 ] ; then +- $headline_printed || print_headline "$3" ++ $headline_printed || print_headline "$subject" + printf '%s\n' "$report" | sed -n '1,/^total:.*lines checked$/p' + ret=1 + fi +@@ -293,7 +319,7 @@ check () { # <patch> <commit> <title> + ! $verbose || printf '\nChecking API additions/removals:\n' + report=$($VALIDATE_NEW_API "$tmpinput") + if [ $? -ne 0 ] ; then +- $headline_printed || print_headline "$3" ++ $headline_printed || print_headline "$subject" + printf '%s\n' "$report" + ret=1 + fi +@@ -301,7 +327,7 @@ check () { # <patch> <commit> <title> + ! $verbose || printf '\nChecking forbidden tokens additions:\n' + report=$(check_forbidden_additions "$tmpinput") + if [ $? -ne 0 ] ; then +- $headline_printed || print_headline "$3" ++ $headline_printed || print_headline "$subject" + printf '%s\n' "$report" + ret=1 + fi +@@ -309,7 +335,7 @@ check () { # <patch> <commit> <title> + ! $verbose || printf '\nChecking __rte_experimental tags:\n' + report=$(check_experimental_tags "$tmpinput") + if [ $? -ne 0 ] ; then +- $headline_printed || print_headline "$3" ++ $headline_printed || print_headline "$subject" + printf '%s\n' "$report" + ret=1 + fi +@@ -317,7 +343,7 @@ check () { # <patch> <commit> <title> + ! $verbose || printf '\nChecking __rte_internal tags:\n' + report=$(check_internal_tags "$tmpinput") + if [ $? -ne 0 ] ; then +- $headline_printed || print_headline "$3" ++ $headline_printed || print_headline "$subject" + printf '%s\n' "$report" + ret=1 + fi +@@ -325,7 +351,15 @@ check () { # <patch> <commit> <title> + ! $verbose || printf '\nChecking release notes updates:\n' + report=$(check_release_notes "$tmpinput") + if [ $? -ne 0 ] ; then +- $headline_printed || print_headline "$3" ++ $headline_printed || print_headline "$subject" ++ printf '%s\n' "$report" ++ ret=1 ++ fi ++ ++ ! $verbose || printf '\nChecking names in commit log:\n' ++ report=$(check_names "$tmpinput") ++ if [ $? -ne 0 ] ; then ++ $headline_printed || print_headline "$subject" + printf '%s\n' "$report" + ret=1 + fi +@@ -341,20 +375,10 @@ check () { # <patch> <commit> <title> + + if [ -n "$1" ] ; then + for patch in "$@" ; do +- # Subject can be on 2 lines +- subject=$(sed '/^Subject: */!d;s///;N;s,\n[[:space:]]\+, ,;s,\n.*,,;q' "$patch") +- check "$patch" '' "$subject" ++ check "$patch" '' + done + elif [ ! -t 0 ] ; then # stdin +- subject=$(while read header value ; do +- if [ "$header" = 'Subject:' ] ; then +- IFS= read next +- continuation=$(echo "$next" | sed -n 's,^[[:space:]]\+, ,p') +- echo $value$continuation +- break +- fi +- done) +- check '' '' "$subject" ++ check '' '' + else + if [ $number -eq 0 ] ; then + commits=$(git rev-list --reverse $range) +@@ -362,8 +386,7 @@ else + commits=$(git rev-list --reverse --max-count=$number HEAD) + fi + for commit in $commits ; do +- subject=$(git log --format='%s' -1 $commit) +- check '' $commit "$subject" ++ check '' $commit + done + fi + pass=$(($total - $status)) diff --git a/dpdk/devtools/libabigail.abignore b/dpdk/devtools/libabigail.abignore index 4b676f317d..146a601ed3 100644 --- a/dpdk/devtools/libabigail.abignore @@ -6739,10 +14976,10 @@ index 4ba57a6829..6cd34f64f1 100755 $libs -a 0:0.0 --vdev net_null1 --vdev net_null2 $eal_options -- \ --no-mlockall --total-num-mbufs=2048 $testpmd_options -ia diff --git a/dpdk/doc/api/doxy-api-index.md b/dpdk/doc/api/doxy-api-index.md -index 4245b9635c..baecb2e52e 100644 +index 4245b9635c..efb0fb3bd3 100644 --- a/dpdk/doc/api/doxy-api-index.md +++ b/dpdk/doc/api/doxy-api-index.md -@@ -9,222 +9,222 @@ API +@@ -9,222 +9,223 @@ API The public API headers are grouped by topics: - **device**: @@ -6785,6 +15022,7 @@ index 4245b9635c..baecb2e52e 100644 + [compress](@ref rte_comp.h), + [regexdev](@ref rte_regexdev.h), + [dmadev](@ref rte_dmadev.h), ++ [gpudev](@ref rte_gpudev.h), + [eventdev](@ref rte_eventdev.h), + [event_eth_rx_adapter](@ref rte_event_eth_rx_adapter.h), + [event_eth_tx_adapter](@ref rte_event_eth_tx_adapter.h), @@ -7148,6 +15386,18 @@ index 4245b9635c..baecb2e52e 100644 + [experimental APIs](@ref rte_compat.h), + [ABI versioning](@ref rte_function_versioning.h), + [version](@ref rte_version.h) +diff --git a/dpdk/doc/api/doxy-api.conf.in b/dpdk/doc/api/doxy-api.conf.in +index db2ca9b6ed..591abe9926 100644 +--- a/dpdk/doc/api/doxy-api.conf.in ++++ b/dpdk/doc/api/doxy-api.conf.in +@@ -100,7 +100,6 @@ GENERATE_DEPRECATEDLIST = YES + VERBATIM_HEADERS = NO + ALPHABETICAL_INDEX = NO + +-HTML_TIMESTAMP = NO + HTML_DYNAMIC_SECTIONS = YES + SEARCHENGINE = YES + SORT_MEMBER_DOCS = NO diff --git a/dpdk/doc/api/generate_examples.sh b/dpdk/doc/api/generate_examples.sh index 3e08236596..48574563ca 100755 --- a/dpdk/doc/api/generate_examples.sh @@ -7269,6 +15519,75 @@ index 1743ce301f..a55ce38800 100644 print('Upgrade sphinx to version >= 1.3.1 for ' 'improved Figure/Table number handling.', file=stderr) +diff --git a/dpdk/doc/guides/contributing/abi_policy.rst b/dpdk/doc/guides/contributing/abi_policy.rst +index 64919b6a2b..5fd4052585 100644 +--- a/dpdk/doc/guides/contributing/abi_policy.rst ++++ b/dpdk/doc/guides/contributing/abi_policy.rst +@@ -167,7 +167,7 @@ The requirements for changing the ABI are: + API becomes non-experimental, then the old one is marked with + ``__rte_deprecated``. + +- - The depreciated API should follow the notification process to be removed, ++ - The deprecated API should follow the notification process to be removed, + see :ref:`deprecation_notices`. + + - At the declaration of the next major ABI version, those ABI changes then +diff --git a/dpdk/doc/guides/contributing/abi_versioning.rst b/dpdk/doc/guides/contributing/abi_versioning.rst +index dd96527ee5..7afd1c1886 100644 +--- a/dpdk/doc/guides/contributing/abi_versioning.rst ++++ b/dpdk/doc/guides/contributing/abi_versioning.rst +@@ -94,7 +94,7 @@ that library. + ... + + However when a new ABI version is declared, for example DPDK ``22``, old +-depreciated functions may be safely removed at this point and the entire old ++deprecated functions may be safely removed at this point and the entire old + major ABI version removed, see the section :ref:`deprecating_entire_abi` on + how this may be done. + +diff --git a/dpdk/doc/guides/contributing/patches.rst b/dpdk/doc/guides/contributing/patches.rst +index bebcaf3925..84f79cd970 100644 +--- a/dpdk/doc/guides/contributing/patches.rst ++++ b/dpdk/doc/guides/contributing/patches.rst +@@ -148,6 +148,12 @@ Make your planned changes in the cloned ``dpdk`` repo. Here are some guidelines + + * Follow the :ref:`coding_style` guidelines. + ++* If you are a new contributor, or if your mail address changed, ++ you may update the ``.mailmap`` file. ++ Otherwise the new name or address will be added by a maintainer. ++ Keeping this file up-to-date will help when someone wants to contact you ++ about the changes you contributed to. ++ + * If you add new files or directories you should add your name to the ``MAINTAINERS`` file. + + * Initial submission of new PMDs should be prepared against a corresponding repo. +diff --git a/dpdk/doc/guides/cryptodevs/armv8.rst b/dpdk/doc/guides/cryptodevs/armv8.rst +index 8963f66a20..1a006754cb 100644 +--- a/dpdk/doc/guides/cryptodevs/armv8.rst ++++ b/dpdk/doc/guides/cryptodevs/armv8.rst +@@ -47,7 +47,7 @@ To build DPDK with this virtual crypto PMD, the user is required to: + + .. code-block:: console + +- meson build ++ meson setup build + ninja -C build + + The corresponding device can be created only if the following features +diff --git a/dpdk/doc/guides/cryptodevs/bcmfs.rst b/dpdk/doc/guides/cryptodevs/bcmfs.rst +index f5dcd59c87..d18a253913 100644 +--- a/dpdk/doc/guides/cryptodevs/bcmfs.rst ++++ b/dpdk/doc/guides/cryptodevs/bcmfs.rst +@@ -70,7 +70,7 @@ for cross compiling on x86 platform. + .. code-block:: console + + cd <DPDK-source-directory> +- meson <dest-dir> --cross-file config/arm/arm64_stingray_linux_gcc ++ meson setup <dest-dir> --cross-file config/arm/arm64_stingray_linux_gcc + cd <dest-dir> + ninja + diff --git a/dpdk/doc/guides/cryptodevs/mlx5.rst b/dpdk/doc/guides/cryptodevs/mlx5.rst index e86a6205e8..9936556cc9 100644 --- a/dpdk/doc/guides/cryptodevs/mlx5.rst @@ -7442,6 +15761,34 @@ index 29747b1c26..48a2a18aff 100644 maintenance_free = Y [Eth Rx adapter Features] +diff --git a/dpdk/doc/guides/freebsd_gsg/build_dpdk.rst b/dpdk/doc/guides/freebsd_gsg/build_dpdk.rst +index bed353473f..f35fd29a3d 100644 +--- a/dpdk/doc/guides/freebsd_gsg/build_dpdk.rst ++++ b/dpdk/doc/guides/freebsd_gsg/build_dpdk.rst +@@ -42,7 +42,7 @@ Building DPDK + The following commands can be used to build and install DPDK on a system. + The final, install, step generally needs to be run as root:: + +- meson build ++ meson setup build + cd build + ninja + ninja install +diff --git a/dpdk/doc/guides/gpus/cuda.rst b/dpdk/doc/guides/gpus/cuda.rst +index 38e22dc2c0..9c40f5a0dd 100644 +--- a/dpdk/doc/guides/gpus/cuda.rst ++++ b/dpdk/doc/guides/gpus/cuda.rst +@@ -24,8 +24,8 @@ You need to indicate to meson where CUDA headers files are through the CFLAGS va + Three ways: + + - Set ``export CFLAGS=-I/usr/local/cuda/include`` before building +-- Add CFLAGS in the meson command line ``CFLAGS=-I/usr/local/cuda/include meson build`` +-- Add the ``-Dc_args`` in meson command line ``meson build -Dc_args=-I/usr/local/cuda/include`` ++- Add CFLAGS in the meson command line ``CFLAGS=-I/usr/local/cuda/include meson setup build`` ++- Add the ``-Dc_args`` in meson command line ``meson setup build -Dc_args=-I/usr/local/cuda/include`` + + If headers are not found, the CUDA GPU driver library is not built. + diff --git a/dpdk/doc/guides/gpus/features/cuda.ini b/dpdk/doc/guides/gpus/features/cuda.ini new file mode 100644 index 0000000000..9d587eed6e @@ -7471,6 +15818,109 @@ index de80806649..dc9b318e7e 100644 v:margins="rect(4,4,4,4)" /><v:textRect height="22.5" width="90" +diff --git a/dpdk/doc/guides/howto/openwrt.rst b/dpdk/doc/guides/howto/openwrt.rst +index e1d7db2a90..be902c505f 100644 +--- a/dpdk/doc/guides/howto/openwrt.rst ++++ b/dpdk/doc/guides/howto/openwrt.rst +@@ -100,7 +100,7 @@ first. + ar = 'x86_64-openwrt-linux-ar' + strip = 'x86_64-openwrt-linux-strip' + +- meson builddir --cross-file openwrt-cross ++ meson setup builddir --cross-file openwrt-cross + ninja -C builddir + + Running DPDK application on OpenWrt +diff --git a/dpdk/doc/guides/linux_gsg/build_dpdk.rst b/dpdk/doc/guides/linux_gsg/build_dpdk.rst +index 0b08492ca2..2615d7b588 100644 +--- a/dpdk/doc/guides/linux_gsg/build_dpdk.rst ++++ b/dpdk/doc/guides/linux_gsg/build_dpdk.rst +@@ -42,7 +42,7 @@ To configure a DPDK build use: + + .. code-block:: console + +- meson <options> build ++ meson setup <options> build + + where "build" is the desired output build directory, and "<options>" can be + empty or one of a number of meson or DPDK-specific build options, described +@@ -129,7 +129,7 @@ automatically built as part of a meson build too. + To do so, pass a comma-separated list of the examples to build to the + `-Dexamples` meson option as below:: + +- meson -Dexamples=l2fwd,l3fwd build ++ meson setup -Dexamples=l2fwd,l3fwd build + + As with other meson options, this can also be set post-initial-config using `meson configure` in the build directory. + There is also a special value "all" to request that all example applications whose +@@ -155,12 +155,12 @@ The following meson command can be used on RHEL/Fedora systems to configure a 32 + assuming the relevant 32-bit development packages, such as a 32-bit libc, are installed:: + + PKG_CONFIG_LIBDIR=/usr/lib/pkgconfig \ +- meson -Dc_args='-m32' -Dc_link_args='-m32' build ++ meson setup -Dc_args='-m32' -Dc_link_args='-m32' build + + For Debian/Ubuntu systems, the equivalent command is:: + + PKG_CONFIG_LIBDIR=/usr/lib/i386-linux-gnu/pkgconfig \ +- meson -Dc_args='-m32' -Dc_link_args='-m32' build ++ meson setup -Dc_args='-m32' -Dc_link_args='-m32' build + + Once the build directory has been configured, + DPDK can be compiled using ``ninja`` as described above. +diff --git a/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst b/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst +index d59af58235..5cbcb67bae 100644 +--- a/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst ++++ b/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst +@@ -116,18 +116,18 @@ Cross Compiling DPDK with GNU toolchain using Meson + To cross-compile DPDK on a desired target machine we can use the following + command:: + +- meson cross-build --cross-file <target_machine_configuration> ++ meson setup cross-build --cross-file <target_machine_configuration> + ninja -C cross-build + + For example if the target machine is aarch64 we can use the following + command:: + +- meson aarch64-build-gcc --cross-file config/arm/arm64_armv8_linux_gcc ++ meson setup aarch64-build-gcc --cross-file config/arm/arm64_armv8_linux_gcc + ninja -C aarch64-build-gcc + + If the target machine is aarch32 we can use the following command:: + +- meson aarch32-build --cross-file config/arm/arm32_armv8_linux_gcc ++ meson setup aarch32-build --cross-file config/arm/arm32_armv8_linux_gcc + ninja -C aarch32-build + + LLVM/Clang toolchain +@@ -178,7 +178,7 @@ Assuming the file with augmented ``c_args`` and ``c_link_args`` + is named ``arm64_armv8_linux_clang``, + use the following command to cross-compile DPDK for the target machine:: + +- meson aarch64-build-clang --cross-file config/arm/arm64_armv8_linux_clang ++ meson setup aarch64-build-clang --cross-file config/arm/arm64_armv8_linux_clang + ninja -C aarch64-build-clang + + Cross Compiling DPDK with LLVM/Clang toolchain using Meson on Ubuntu 18.04 +@@ -195,7 +195,7 @@ On Ubuntu 18.04, these packages are needed: + + Use the following command to cross-compile DPDK for the target machine:: + +- meson aarch64-build-clang --cross-file config/arm/arm64_armv8_linux_clang_ubuntu1804 ++ meson setup aarch64-build-clang --cross-file config/arm/arm64_armv8_linux_clang_ubuntu1804 + ninja -C aarch64-build-clang + + Building for an aarch64 SoC on an aarch64 build machine +@@ -206,7 +206,7 @@ you don't need a separate cross toolchain, just a different set of + configuration options. To build for an aarch64 SoC, use the -Dplatform meson + option:: + +- meson soc_build -Dplatform=<target_soc> ++ meson setup soc_build -Dplatform=<target_soc> + + Substitute <target_soc> with one of the supported SoCs + diff --git a/dpdk/doc/guides/linux_gsg/enable_func.rst b/dpdk/doc/guides/linux_gsg/enable_func.rst index 7bd6b03f10..7538d04d97 100644 --- a/dpdk/doc/guides/linux_gsg/enable_func.rst @@ -7651,7 +16101,7 @@ index 2dd711bb37..75af2f01e1 100644 so please check with your platform documentation to see if it has such features, and how to enable them. diff --git a/dpdk/doc/guides/linux_gsg/sys_reqs.rst b/dpdk/doc/guides/linux_gsg/sys_reqs.rst -index d95a78d156..cfaa2db301 100644 +index d95a78d156..63990cee86 100644 --- a/dpdk/doc/guides/linux_gsg/sys_reqs.rst +++ b/dpdk/doc/guides/linux_gsg/sys_reqs.rst @@ -1,6 +1,8 @@ @@ -7663,6 +16113,15 @@ index d95a78d156..cfaa2db301 100644 System Requirements =================== +@@ -41,7 +43,7 @@ Compilation of the DPDK + + * For RHEL/Fedora systems these can be installed using ``dnf groupinstall "Development Tools"`` + * For Ubuntu/Debian systems these can be installed using ``apt install build-essential`` +- * For Alpine Linux, ``apk add gcc libc-dev bsd-compat-headers libexecinfo-dev`` ++ * For Alpine Linux, ``apk add gcc libc-dev bsd-compat-headers`` + + * Python 3.5 or later. + @@ -72,10 +74,10 @@ Compilation of the DPDK **Optional Tools:** @@ -7711,6 +16170,255 @@ index c9d0e1ad6c..db02ea1984 100644 - [1] https://lwn.net/Articles/837010/ \ No newline at end of file + [1] https://lwn.net/Articles/837010/ +diff --git a/dpdk/doc/guides/nics/ark.rst b/dpdk/doc/guides/nics/ark.rst +index da61814b5d..7897226203 100644 +--- a/dpdk/doc/guides/nics/ark.rst ++++ b/dpdk/doc/guides/nics/ark.rst +@@ -282,7 +282,7 @@ CFLAGS environment prior to the meson build step. I.e., + .. code-block:: console + + export CFLAGS="-DRTE_LIBRTE_ARK_MIN_TX_PKTLEN=60" +- meson build ++ meson setup build + + + Supported ARK RTL PCIe Instances +diff --git a/dpdk/doc/guides/nics/features.rst b/dpdk/doc/guides/nics/features.rst +index 27be2d2576..dc268a19ff 100644 +--- a/dpdk/doc/guides/nics/features.rst ++++ b/dpdk/doc/guides/nics/features.rst +@@ -174,7 +174,7 @@ Supports receiving segmented mbufs. + + .. _nic_features_buffer_split: + +-Buffer Split on Rx ++Buffer split on Rx + ------------------ + + Scatters the packets being received on specified boundaries to segmented mbufs. +diff --git a/dpdk/doc/guides/nics/features/bnxt.ini b/dpdk/doc/guides/nics/features/bnxt.ini +index afb5414b49..ac682c5779 100644 +--- a/dpdk/doc/guides/nics/features/bnxt.ini ++++ b/dpdk/doc/guides/nics/features/bnxt.ini +@@ -57,7 +57,7 @@ Perf doc = Y + + [rte_flow items] + any = Y +-eth = Y ++eth = P + ipv4 = Y + ipv6 = Y + gre = Y +@@ -71,7 +71,7 @@ represented_port = Y + tcp = Y + udp = Y + vf = Y +-vlan = Y ++vlan = P + vxlan = Y + + [rte_flow actions] +diff --git a/dpdk/doc/guides/nics/features/cxgbe.ini b/dpdk/doc/guides/nics/features/cxgbe.ini +index f674803ec4..f9912390fb 100644 +--- a/dpdk/doc/guides/nics/features/cxgbe.ini ++++ b/dpdk/doc/guides/nics/features/cxgbe.ini +@@ -36,7 +36,7 @@ x86-64 = Y + Usage doc = Y + + [rte_flow items] +-eth = Y ++eth = P + ipv4 = Y + ipv6 = Y + pf = Y +@@ -44,7 +44,7 @@ phy_port = Y + tcp = Y + udp = Y + vf = Y +-vlan = Y ++vlan = P + + [rte_flow actions] + count = Y +diff --git a/dpdk/doc/guides/nics/features/default.ini b/dpdk/doc/guides/nics/features/default.ini +index c96a52b58e..8bd849e96f 100644 +--- a/dpdk/doc/guides/nics/features/default.ini ++++ b/dpdk/doc/guides/nics/features/default.ini +@@ -23,6 +23,7 @@ Shared Rx queue = + Burst mode info = + Power mgmt address monitor = + MTU update = ++Buffer split on Rx = + Scattered Rx = + LRO = + TSO = +diff --git a/dpdk/doc/guides/nics/features/dpaa2.ini b/dpdk/doc/guides/nics/features/dpaa2.ini +index 4c06841a87..09ce66c788 100644 +--- a/dpdk/doc/guides/nics/features/dpaa2.ini ++++ b/dpdk/doc/guides/nics/features/dpaa2.ini +@@ -31,7 +31,7 @@ ARMv8 = Y + Usage doc = Y + + [rte_flow items] +-eth = Y ++eth = P + gre = Y + icmp = Y + ipv4 = Y +@@ -41,7 +41,7 @@ raw = Y + sctp = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + + [rte_flow actions] + drop = Y +diff --git a/dpdk/doc/guides/nics/features/e1000.ini b/dpdk/doc/guides/nics/features/e1000.ini +index e4bdef6da9..a9cbed1c3c 100644 +--- a/dpdk/doc/guides/nics/features/e1000.ini ++++ b/dpdk/doc/guides/nics/features/e1000.ini +@@ -31,7 +31,7 @@ x86-32 = Y + x86-64 = Y + + [rte_flow items] +-eth = Y ++eth = P + ipv4 = Y + ipv6 = Y + raw = Y +diff --git a/dpdk/doc/guides/nics/features/enic.ini b/dpdk/doc/guides/nics/features/enic.ini +index 1177752c15..3247ea868e 100644 +--- a/dpdk/doc/guides/nics/features/enic.ini ++++ b/dpdk/doc/guides/nics/features/enic.ini +@@ -39,7 +39,7 @@ x86-64 = Y + Usage doc = Y + + [rte_flow items] +-eth = Y ++eth = P + gtp = Y + gtpc = Y + gtpu = Y +@@ -49,7 +49,7 @@ raw = Y + sctp = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + vxlan = Y + + [rte_flow actions] +diff --git a/dpdk/doc/guides/nics/features/hinic.ini b/dpdk/doc/guides/nics/features/hinic.ini +index 9f6f0ebf3a..ada6607fe9 100644 +--- a/dpdk/doc/guides/nics/features/hinic.ini ++++ b/dpdk/doc/guides/nics/features/hinic.ini +@@ -40,7 +40,7 @@ ARMv8 = Y + + [rte_flow items] + any = Y +-eth = Y ++eth = P + icmp = Y + icmp6 = Y + ipv4 = Y +diff --git a/dpdk/doc/guides/nics/features/hns3.ini b/dpdk/doc/guides/nics/features/hns3.ini +index 405b94f05c..338b4e6864 100644 +--- a/dpdk/doc/guides/nics/features/hns3.ini ++++ b/dpdk/doc/guides/nics/features/hns3.ini +@@ -51,7 +51,7 @@ Linux = Y + ARMv8 = Y + + [rte_flow items] +-eth = Y ++eth = P + geneve = Y + icmp = Y + ipv4 = Y +@@ -60,7 +60,7 @@ nvgre = Y + sctp = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + vxlan = Y + vxlan_gpe = Y + +diff --git a/dpdk/doc/guides/nics/features/i40e.ini b/dpdk/doc/guides/nics/features/i40e.ini +index dd18fec217..6e141de326 100644 +--- a/dpdk/doc/guides/nics/features/i40e.ini ++++ b/dpdk/doc/guides/nics/features/i40e.ini +@@ -54,7 +54,7 @@ Power8 = Y + [rte_flow items] + ah = Y + esp = Y +-eth = Y ++eth = P + gre = Y + gtpc = Y + gtpu = Y +@@ -69,7 +69,7 @@ sctp = Y + tcp = Y + udp = Y + vf = Y +-vlan = Y ++vlan = P + vxlan = Y + + [rte_flow actions] +diff --git a/dpdk/doc/guides/nics/features/iavf.ini b/dpdk/doc/guides/nics/features/iavf.ini +index 01f514239e..3860f283d5 100644 +--- a/dpdk/doc/guides/nics/features/iavf.ini ++++ b/dpdk/doc/guides/nics/features/iavf.ini +@@ -21,7 +21,7 @@ RSS key update = Y + RSS reta update = Y + VLAN filter = Y + CRC offload = Y +-VLAN offload = Y ++VLAN offload = P + L3 checksum offload = P + L4 checksum offload = P + Packet type parsing = Y +@@ -40,7 +40,7 @@ ah = Y + arp_eth_ipv4 = Y + ecpri = Y + esp = Y +-eth = Y ++eth = P + gre = Y + gtpc = Y + gtpu = Y +@@ -57,7 +57,7 @@ ppp = Y + sctp = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + + [rte_flow actions] + count = Y +diff --git a/dpdk/doc/guides/nics/features/ice.ini b/dpdk/doc/guides/nics/features/ice.ini +index a15f42f94c..0d911590b7 100644 +--- a/dpdk/doc/guides/nics/features/ice.ini ++++ b/dpdk/doc/guides/nics/features/ice.ini +@@ -52,7 +52,7 @@ x86-64 = Y + ah = Y + arp_eth_ipv4 = Y + esp = Y +-eth = Y ++eth = P + gtpu = Y + gtp_psc = Y + icmp = Y +@@ -70,7 +70,7 @@ raw = Y + sctp = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + vxlan = Y + + [rte_flow actions] diff --git a/dpdk/doc/guides/nics/features/ice_dcf.ini b/dpdk/doc/guides/nics/features/ice_dcf.ini index 4d6fb6d849..54073f0b88 100644 --- a/dpdk/doc/guides/nics/features/ice_dcf.ini @@ -7734,11 +16442,326 @@ index 4d6fb6d849..54073f0b88 100644 Basic stats = Y Linux = Y x86-32 = Y +diff --git a/dpdk/doc/guides/nics/features/igc.ini b/dpdk/doc/guides/nics/features/igc.ini +index f2c6fa28ad..b5deea3f61 100644 +--- a/dpdk/doc/guides/nics/features/igc.ini ++++ b/dpdk/doc/guides/nics/features/igc.ini +@@ -35,7 +35,7 @@ Linux = Y + x86-64 = Y + + [rte_flow items] +-eth = Y ++eth = P + ipv4 = Y + ipv6 = Y + tcp = Y +diff --git a/dpdk/doc/guides/nics/features/ipn3ke.ini b/dpdk/doc/guides/nics/features/ipn3ke.ini +index defc39f525..1f6b780273 100644 +--- a/dpdk/doc/guides/nics/features/ipn3ke.ini ++++ b/dpdk/doc/guides/nics/features/ipn3ke.ini +@@ -47,13 +47,13 @@ x86-32 = Y + x86-64 = Y + + [rte_flow items] +-eth = Y ++eth = P + ipv4 = Y + mpls = Y + nvgre = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + vxlan = Y + + [rte_flow actions] +diff --git a/dpdk/doc/guides/nics/features/ixgbe.ini b/dpdk/doc/guides/nics/features/ixgbe.ini +index c5333d1142..e5cef81f9a 100644 +--- a/dpdk/doc/guides/nics/features/ixgbe.ini ++++ b/dpdk/doc/guides/nics/features/ixgbe.ini +@@ -56,7 +56,7 @@ x86-32 = Y + x86-64 = Y + + [rte_flow items] +-eth = Y ++eth = P + e_tag = Y + fuzzy = Y + ipv4 = Y +@@ -66,7 +66,7 @@ raw = Y + sctp = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + vxlan = Y + + [rte_flow actions] +diff --git a/dpdk/doc/guides/nics/features/mlx4.ini b/dpdk/doc/guides/nics/features/mlx4.ini +index 82f6f0bc0b..03f59a5f8b 100644 +--- a/dpdk/doc/guides/nics/features/mlx4.ini ++++ b/dpdk/doc/guides/nics/features/mlx4.ini +@@ -38,11 +38,11 @@ x86-64 = Y + Usage doc = Y + + [rte_flow items] +-eth = Y ++eth = P + ipv4 = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + + [rte_flow actions] + drop = Y +diff --git a/dpdk/doc/guides/nics/features/mlx5.ini b/dpdk/doc/guides/nics/features/mlx5.ini +index 845d2d4a97..e2cf7a1d9d 100644 +--- a/dpdk/doc/guides/nics/features/mlx5.ini ++++ b/dpdk/doc/guides/nics/features/mlx5.ini +@@ -15,6 +15,7 @@ Shared Rx queue = Y + Burst mode info = Y + Power mgmt address monitor = Y + MTU update = Y ++Buffer split on Rx = Y + Scattered Rx = Y + LRO = Y + TSO = Y +diff --git a/dpdk/doc/guides/nics/features/mvpp2.ini b/dpdk/doc/guides/nics/features/mvpp2.ini +index 1bcf74875e..653c9d08cb 100644 +--- a/dpdk/doc/guides/nics/features/mvpp2.ini ++++ b/dpdk/doc/guides/nics/features/mvpp2.ini +@@ -24,13 +24,13 @@ ARMv8 = Y + Usage doc = Y + + [rte_flow items] +-eth = Y ++eth = P + ipv4 = Y + ipv6 = Y + raw = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + + [rte_flow actions] + drop = Y +diff --git a/dpdk/doc/guides/nics/features/tap.ini b/dpdk/doc/guides/nics/features/tap.ini +index b4a356e5d5..f26355e57f 100644 +--- a/dpdk/doc/guides/nics/features/tap.ini ++++ b/dpdk/doc/guides/nics/features/tap.ini +@@ -27,12 +27,12 @@ x86-64 = Y + Usage doc = Y + + [rte_flow items] +-eth = Y ++eth = P + ipv4 = Y + ipv6 = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + + [rte_flow actions] + drop = Y +diff --git a/dpdk/doc/guides/nics/features/txgbe.ini b/dpdk/doc/guides/nics/features/txgbe.ini +index 6d0cc8afdd..958f8ac793 100644 +--- a/dpdk/doc/guides/nics/features/txgbe.ini ++++ b/dpdk/doc/guides/nics/features/txgbe.ini +@@ -52,7 +52,7 @@ x86-32 = Y + x86-64 = Y + + [rte_flow items] +-eth = Y ++eth = P + e_tag = Y + fuzzy = Y + ipv4 = Y +@@ -62,7 +62,7 @@ raw = Y + sctp = Y + tcp = Y + udp = Y +-vlan = Y ++vlan = P + vxlan = Y + + [rte_flow actions] diff --git a/dpdk/doc/guides/nics/hns3.rst b/dpdk/doc/guides/nics/hns3.rst -index 5f68a10ecf..791c9cc2ed 100644 +index 5f68a10ecf..380024600b 100644 --- a/dpdk/doc/guides/nics/hns3.rst +++ b/dpdk/doc/guides/nics/hns3.rst -@@ -290,5 +290,10 @@ Currently, we only support VF device driven by DPDK driver when PF is driven +@@ -30,7 +30,6 @@ Features of the HNS3 PMD are: + - DCB + - Scattered and gather for TX and RX + - Vector Poll mode driver +-- Dump register + - SR-IOV VF + - Multi-process + - MAC/VLAN filter +@@ -38,6 +37,15 @@ Features of the HNS3 PMD are: + - NUMA support + - Generic flow API + - IEEE1588/802.1AS timestamping ++- Basic stats ++- Extended stats ++- Traffic Management API ++- Speed capabilities ++- Link Auto-negotiation ++- Link flow control ++- Dump register ++- Dump private info from device ++- FW version + + Prerequisites + ------------- +@@ -58,7 +66,8 @@ The following options can be modified in the ``config/rte_config.h`` file. + + - ``RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF`` (default ``256``) + +- Number of MAX queues reserved for PF. ++ Number of MAX queues reserved for PF on HIP09 and HIP10. ++ The MAX queue number is also determined by the value the firmware report. + + Runtime Config Options + ~~~~~~~~~~~~~~~~~~~~~~ +@@ -81,7 +90,8 @@ Runtime Config Options + ``common``. + + For example:: +- -a 0000:7d:00.0,rx_func_hint=simple ++ ++ -a 0000:7d:00.0,rx_func_hint=simple + + - ``tx_func_hint`` (default ``none``) + +@@ -101,7 +111,8 @@ Runtime Config Options + ``common``. + + For example:: +- -a 0000:7d:00.0,tx_func_hint=common ++ ++ -a 0000:7d:00.0,tx_func_hint=common + + - ``dev_caps_mask`` (default ``0``) + +@@ -113,22 +124,25 @@ Runtime Config Options + Its main purpose is to debug and avoid problems. + + For example:: +- -a 0000:7d:00.0,dev_caps_mask=0xF ++ ++ -a 0000:7d:00.0,dev_caps_mask=0xF + + - ``mbx_time_limit_ms`` (default ``500``) +- Used to define the mailbox time limit by user. +- Current, the max waiting time for MBX response is 500ms, but in +- some scenarios, it is not enough. Since it depends on the response +- of the kernel mode driver, and its response time is related to the +- scheduling of the system. In this special scenario, most of the +- cores are isolated, and only a few cores are used for system +- scheduling. When a large number of services are started, the +- scheduling of the system will be very busy, and the reply of the +- mbx message will time out, which will cause our PMD initialization +- to fail. So provide access to set mailbox time limit for user. +- +- For example:: +- -a 0000:7d:00.0,mbx_time_limit_ms=600 ++ ++ Used to define the mailbox time limit by user. ++ Current, the max waiting time for MBX response is 500ms, but in ++ some scenarios, it is not enough. Since it depends on the response ++ of the kernel mode driver, and its response time is related to the ++ scheduling of the system. In this special scenario, most of the ++ cores are isolated, and only a few cores are used for system ++ scheduling. When a large number of services are started, the ++ scheduling of the system will be very busy, and the reply of the ++ mbx message will time out, which will cause our PMD initialization ++ to fail. So provide access to set mailbox time limit for user. ++ ++ For example:: ++ ++ -a 0000:7d:00.0,mbx_time_limit_ms=600 + + Link status event Pre-conditions + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +@@ -137,7 +151,8 @@ Firmware 1.8.0.0 and later versions support reporting link changes to the PF. + Therefore, to use the LSC for the PF driver, ensure that the firmware version + also supports reporting link changes. + If the VF driver needs to support LSC, special patch must be added: +-`<https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/drivers/net/ethernet/hisilicon/hns3?h=next-20210428&id=18b6e31f8bf4ac7af7b057228f38a5a530378e4e>`_. ++`<https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18b6e31f8bf4ac7af7b057228f38a5a530378e4e>`_. ++ + Note: The patch has been uploaded to 5.13 of the Linux kernel mainline. + + +@@ -197,36 +212,50 @@ Generic flow API + + - ``RSS Flow`` + +- RSS Flow supports to set hash input set, hash function, enable hash +- and configure queues. +- For example: +- Configure queues as queue 0, 1, 2, 3. ++ RSS Flow supports for creating rule base on input tuple, hash key, queues ++ and hash algorithm. But hash key, queues and hash algorithm are the global ++ configuration for hardware which will affect other rules. ++ The rule just setting input tuple is completely independent. ++ ++ Run ``testpmd``: + + .. code-block:: console + +- testpmd> flow create 0 ingress pattern end actions rss types end \ +- queues 0 1 2 3 end / end ++ dpdk-testpmd -a 0000:7d:00.0 -l 10-18 -- -i --rxq=8 --txq=8 ++ ++ All IP packets can be distributed to 8 queues. + +- Enable hash and set input set for IPv4-TCP. ++ Set IPv4-TCP packet is distributed to 8 queues based on L3/L4 SRC only. + + .. code-block:: console + +- testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end \ +- actions rss types ipv4-tcp l3-src-only end queues end / end ++ testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end actions \ ++ rss types ipv4-tcp l4-src-only l3-src-only end queues end / end + +- Set symmetric hash enable for flow type IPv4-TCP. ++ Disable IPv4 packet RSS hash. + + .. code-block:: console + +- testpmd> flow create 0 ingress pattern eth / ipv4 / tcp / end \ +- actions rss types ipv4-tcp end queues end func symmetric_toeplitz / end ++ testpmd> flow create 0 ingress pattern eth / ipv4 / end actions rss \ ++ types none end queues end / end + +- Set hash function as simple xor. ++ Set hash function as symmetric Toeplitz. + + .. code-block:: console + + testpmd> flow create 0 ingress pattern end actions rss types end \ +- queues end func simple_xor / end ++ queues end func symmetric_toeplitz / end ++ ++ In this case, all packets that enabled RSS are hashed using symmetric ++ Toeplitz algorithm. ++ ++ Flush all RSS rules ++ ++ .. code-block:: console ++ ++ testpmd> flow flush 0 ++ ++ The RSS configurations of hardwre is back to the one ethdev ops set. + + Statistics + ---------- +@@ -290,5 +319,10 @@ Currently, we only support VF device driven by DPDK driver when PF is driven by kernel mode hns3 ethdev driver. VF is not supported when PF is driven by DPDK driver. @@ -7750,10 +16773,32 @@ index 5f68a10ecf..791c9cc2ed 100644 Build with ICC is not supported yet. X86-32, Power8, ARMv7 and BSD are not supported yet. diff --git a/dpdk/doc/guides/nics/i40e.rst b/dpdk/doc/guides/nics/i40e.rst -index ef91b3a1ac..aedb1afc4b 100644 +index ef91b3a1ac..9629afede7 100644 --- a/dpdk/doc/guides/nics/i40e.rst +++ b/dpdk/doc/guides/nics/i40e.rst -@@ -101,6 +101,14 @@ For X710/XL710/XXV710, +@@ -88,19 +88,30 @@ Windows Prerequisites + - To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository + <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_. + +-Recommended Matching List +-------------------------- ++Kernel driver and Firmware Matching List ++---------------------------------------- + +-It is highly recommended to upgrade the i40e kernel driver and firmware to +-avoid the compatibility issues with i40e PMD. Here is the suggested matching +-list which has been tested and verified. The detailed information can refer +-to chapter Tested Platforms/Tested NICs in release notes. ++It is highly recommended to upgrade the i40e kernel driver and firmware ++to avoid the compatibility issues with i40e PMD. ++The table below shows a summary of the DPDK versions ++with corresponding out-of-tree Linux kernel drivers and firmware. ++The full list of in-tree and out-of-tree Linux kernel drivers from kernel.org ++and Linux distributions that were tested and verified ++are listed in the Tested Platforms section of the Release Notes for each release. + + For X710/XL710/XXV710, + +--------------+-----------------------+------------------+ | DPDK version | Kernel driver version | Firmware version | +==============+=======================+==================+ @@ -7768,7 +16813,7 @@ index ef91b3a1ac..aedb1afc4b 100644 | 21.02 | 2.14.13 | 8.00 | +--------------+-----------------------+------------------+ | 20.11 | 2.14.13 | 8.00 | -@@ -148,6 +156,14 @@ For X722, +@@ -148,6 +159,14 @@ For X722, +--------------+-----------------------+------------------+ | DPDK version | Kernel driver version | Firmware version | +==============+=======================+==================+ @@ -7783,7 +16828,7 @@ index ef91b3a1ac..aedb1afc4b 100644 | 21.02 | 2.14.13 | 5.00 | +--------------+-----------------------+------------------+ | 20.11 | 2.13.10 | 5.00 | -@@ -771,6 +787,13 @@ it will fail and return the info "Conflict with the first rule's input set", +@@ -771,6 +790,13 @@ it will fail and return the info "Conflict with the first rule's input set", which means the current rule's input set conflicts with the first rule's. Remove the first rule if want to change the input set of the PCTYPE. @@ -7798,10 +16843,31 @@ index ef91b3a1ac..aedb1afc4b 100644 ------------------------------------------------------ diff --git a/dpdk/doc/guides/nics/ice.rst b/dpdk/doc/guides/nics/ice.rst -index f95fef8cf0..6b903b9bbc 100644 +index f95fef8cf0..569fd9dadc 100644 --- a/dpdk/doc/guides/nics/ice.rst +++ b/dpdk/doc/guides/nics/ice.rst -@@ -58,6 +58,12 @@ The detailed information can refer to chapter Tested Platforms/Tested NICs in re +@@ -41,13 +41,16 @@ Windows Prerequisites + - Loading of private Dynamic Device Personalization (DDP) package is not supported on Windows. + + +-Recommended Matching List +-------------------------- ++Kernel driver, DDP and Firmware Matching List ++--------------------------------------------- + + It is highly recommended to upgrade the ice kernel driver, firmware and DDP package + to avoid the compatibility issues with ice PMD. +-Here is the suggested matching list which has been tested and verified. +-The detailed information can refer to chapter Tested Platforms/Tested NICs in release notes. ++The table below shows a summary of the DPDK versions ++with corresponding out-of-tree Linux kernel drivers, DDP package and firmware. ++The full list of in-tree and out-of-tree Linux kernel drivers from kernel.org ++and Linux distributions that were tested and verified ++are listed in the Tested Platforms section of the Release Notes for each release. + + +-----------+---------------+-----------------+-----------+--------------+-----------+ + | DPDK | Kernel Driver | OS Default DDP | COMMS DDP | Wireless DDP | Firmware | +@@ -58,6 +61,12 @@ The detailed information can refer to chapter Tested Platforms/Tested NICs in re +-----------+---------------+-----------------+-----------+--------------+-----------+ | 21.05 | 1.6.5 | 1.3.26 | 1.3.30 | 1.3.6 | 3.0 | +-----------+---------------+-----------------+-----------+--------------+-----------+ @@ -7814,11 +16880,45 @@ index f95fef8cf0..6b903b9bbc 100644 Pre-Installation Configuration ------------------------------ +diff --git a/dpdk/doc/guides/nics/intel_vf.rst b/dpdk/doc/guides/nics/intel_vf.rst +index 648af39c22..98560e7204 100644 +--- a/dpdk/doc/guides/nics/intel_vf.rst ++++ b/dpdk/doc/guides/nics/intel_vf.rst +@@ -643,3 +643,20 @@ Inline IPsec Support + supports inline IPsec processing for IAVF PMD. For more details see the + IPsec Security Gateway Sample Application and Security library + documentation. ++ ++ice: VF inserts VLAN tag incorrectly on AVX-512 Tx path ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++When the kernel driver requests the VF to use the L2TAG2 field of the Tx context ++descriptor to insert the hardware offload VLAN tag, ++AVX-512 Tx path cannot handle this case correctly ++due to its lack of support for the Tx context descriptor. ++ ++The VLAN tag will be inserted to the wrong location (inner of QinQ) ++on AVX-512 Tx path. ++That is inconsistent with the behavior of PF (outer of QinQ). ++The ice kernel driver version newer than 1.8.9 requests to use L2TAG2 ++and has this issue. ++ ++Set the parameter `--force-max-simd-bitwidth` as 64/128/256 ++to avoid selecting AVX-512 Tx path. diff --git a/dpdk/doc/guides/nics/ixgbe.rst b/dpdk/doc/guides/nics/ixgbe.rst -index 82fa453fa2..ad1a3da610 100644 +index 82fa453fa2..1b7cc17bfb 100644 --- a/dpdk/doc/guides/nics/ixgbe.rst +++ b/dpdk/doc/guides/nics/ixgbe.rst -@@ -101,6 +101,23 @@ To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be ch +@@ -16,8 +16,6 @@ The wider register gives space to hold multiple packet buffers so as to save ins + There is no change to PMD API. The RX/TX handler are the only two entries for vPMD packet I/O. + They are transparently registered at runtime RX/TX execution if all condition checks pass. + +-1. To date, only an SSE version of IX GBE vPMD is available. +- + Some constraints apply as pre-conditions for specific optimizations on bulk packet transfers. + The following sections explain RX and TX constraints in the vPMD. + +@@ -101,6 +99,23 @@ To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be ch fdir_conf->mode will also be checked. @@ -7887,7 +16987,7 @@ index a25add7c47..66493a1157 100644 .. note:: diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst -index feb2e57cee..ce40d1cdac 100644 +index feb2e57cee..e2fb45b1db 100644 --- a/dpdk/doc/guides/nics/mlx5.rst +++ b/dpdk/doc/guides/nics/mlx5.rst @@ -19,7 +19,7 @@ Information and documentation about these adapters can be found on the @@ -7921,16 +17021,33 @@ index feb2e57cee..ce40d1cdac 100644 - Raw encapsulation: - The input buffer, used as outer header, is not validated. -@@ -420,6 +426,8 @@ Limitations +@@ -382,6 +388,8 @@ Limitations + TCP header (122B). + - Rx queue with LRO offload enabled, receiving a non-LRO packet, can forward + it with size limited to max LRO size, not to max RX packet length. ++ - The driver rounds down the port configuration value ``max_lro_pkt_size`` ++ (from ``rte_eth_rxmode``) to a multiple of 256 due to hardware limitation. + - LRO can be used with outer header of TCP packets of the standard format: + eth (with or without vlan) / ipv4 or ipv6 / tcp / payload + +@@ -418,8 +426,14 @@ Limitations + encapsulation actions. + - For NIC Rx flow, supports ``MARK``, ``COUNT``, ``QUEUE``, ``RSS`` in the sample actions list. - - For E-Switch mirroring flow, supports ``RAW ENCAP``, ``Port ID``, - ``VXLAN ENCAP``, ``NVGRE ENCAP`` in the sample actions list. +- - For E-Switch mirroring flow, supports ``RAW ENCAP``, ``Port ID``, +- ``VXLAN ENCAP``, ``NVGRE ENCAP`` in the sample actions list. ++ - For E-Switch mirroring flow, supports ``RAW_ENCAP``, ``PORT_ID``, ++ ``VXLAN_ENCAP``, ``NVGRE_ENCAP`` in the sample actions list. ++ - For E-Switch mirroring flow with sample ratio = 1, the ``ENCAP`` action ++ supports uplink port only. ++ - For E-Switch mirroring flow with sample ratio = 1, the ``PORT`` and ``JUMP`` actions ++ are not supported without presented ``ENCAP`` action in the sample actions list. + - For ConnectX-5 trusted device, the application metadata with SET_TAG index 0 + is not supported before ``RTE_FLOW_ACTION_TYPE_SAMPLE`` action. - Modify Field flow: -@@ -428,6 +436,12 @@ Limitations +@@ -428,6 +442,12 @@ Limitations - Modification of the 802.1Q Tag, VXLAN Network or GENEVE Network ID's is not supported. - Encapsulation levels are not supported, can modify outermost header fields only. - Offsets must be 32-bits aligned, cannot skip past the boundary of a field. @@ -7943,7 +17060,7 @@ index feb2e57cee..ce40d1cdac 100644 - IPv6 header item 'proto' field, indicating the next header protocol, should not be set as extension header. -@@ -462,17 +476,18 @@ Limitations +@@ -462,17 +482,18 @@ Limitations - Integrity: @@ -7965,7 +17082,7 @@ index feb2e57cee..ce40d1cdac 100644 - Connection tracking: -@@ -508,6 +523,8 @@ Limitations +@@ -508,6 +529,8 @@ Limitations from the reference "Clock Queue" completions, the scheduled send timestamps should not be specified with non-zero MSB. @@ -7974,7 +17091,7 @@ index feb2e57cee..ce40d1cdac 100644 Statistics ---------- -@@ -554,15 +571,6 @@ Environment variables +@@ -554,15 +577,6 @@ Environment variables The register would be flushed to HW usually when the write-combining buffer becomes full, but it depends on CPU design. @@ -7990,7 +17107,7 @@ index feb2e57cee..ce40d1cdac 100644 Run-time configuration ~~~~~~~~~~~~~~~~~~~~~~ -@@ -649,7 +657,7 @@ Driver options +@@ -649,7 +663,7 @@ Driver options A timeout value is set in the driver to control the waiting time before dropping a packet. Once the timer is expired, the delay drop will be @@ -7999,7 +17116,7 @@ index feb2e57cee..ce40d1cdac 100644 it, a rearming is needed and it is part of the kernel driver starting from OFED 5.5. -@@ -1033,7 +1041,7 @@ Driver options +@@ -1033,7 +1047,7 @@ Driver options For the MARK action the last 16 values in the full range are reserved for internal PMD purposes (to emulate FLAG action). The valid range for the @@ -8008,7 +17125,7 @@ index feb2e57cee..ce40d1cdac 100644 for the 24-bit mode, the flows with the MARK action value outside the specified range will be rejected. -@@ -1317,7 +1325,7 @@ DPDK and must be installed separately: +@@ -1317,7 +1331,7 @@ DPDK and must be installed separately: - mlx5_core: hardware driver managing Mellanox ConnectX-4/ConnectX-5/ConnectX-6/BlueField devices and related Ethernet kernel network devices. @@ -8017,7 +17134,7 @@ index feb2e57cee..ce40d1cdac 100644 - ib_uverbs: user space driver for Verbs (entry point for libibverbs). - **Firmware update** -@@ -1382,9 +1390,9 @@ managers on most distributions, this PMD requires Ethernet extensions that +@@ -1382,9 +1396,9 @@ managers on most distributions, this PMD requires Ethernet extensions that may not be supported at the moment (this is a work in progress). `Mellanox OFED @@ -8029,6 +17146,231 @@ index feb2e57cee..ce40d1cdac 100644 include the necessary support and should be used in the meantime. For DPDK, only libibverbs, libmlx5, mlnx-ofed-kernel packages and firmware updates are required from that distribution. +diff --git a/dpdk/doc/guides/nics/mvneta.rst b/dpdk/doc/guides/nics/mvneta.rst +index b7f279c3cb..2ee2637a58 100644 +--- a/dpdk/doc/guides/nics/mvneta.rst ++++ b/dpdk/doc/guides/nics/mvneta.rst +@@ -117,7 +117,7 @@ Add path to libmusdk.pc in PKG_CONFIG_PATH environment variable. + .. code-block:: console + + export PKG_CONFIG_PATH=$<musdk_install_dir>/lib/pkgconfig/:$PKG_CONFIG_PATH +- meson build --cross-file config/arm/arm64_armada_linux_gcc ++ meson setup build --cross-file config/arm/arm64_armada_linux_gcc + ninja -C build + + +diff --git a/dpdk/doc/guides/nics/mvpp2.rst b/dpdk/doc/guides/nics/mvpp2.rst +index e40fed7286..cbfa47afd8 100644 +--- a/dpdk/doc/guides/nics/mvpp2.rst ++++ b/dpdk/doc/guides/nics/mvpp2.rst +@@ -133,7 +133,7 @@ Add path to libmusdk.pc in PKG_CONFIG_PATH environment variable. + + export PKG_CONFIG_PATH=$<musdk_install_dir>/lib/pkgconfig/:$PKG_CONFIG_PATH + +- meson build --cross-file config/arm/arm64_armada_linux_gcc ++ meson setup build --cross-file config/arm/arm64_armada_linux_gcc + ninja -C build + + +diff --git a/dpdk/doc/guides/nics/tap.rst b/dpdk/doc/guides/nics/tap.rst +index 681010d9ed..84ec805170 100644 +--- a/dpdk/doc/guides/nics/tap.rst ++++ b/dpdk/doc/guides/nics/tap.rst +@@ -34,14 +34,14 @@ Using the option ``mac=fixed`` you can create a fixed known MAC address:: + + The MAC address will have a fixed value with the last octet incrementing by one + for each interface string containing ``mac=fixed``. The MAC address is formatted +-as 00:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the +-actual MAC address: ``00:64:74:61:70:[00-FF]``. ++as 02:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the ++actual MAC address: ``02:64:74:61:70:[00-FF]``. + +- --vdev=net_tap0,mac="00:64:74:61:70:11" ++ --vdev=net_tap0,mac="02:64:74:61:70:11" + + The MAC address will have a user value passed as string. The MAC address is in + format with delimiter ``:``. The string is byte converted to hex and you get +-the actual MAC address: ``00:64:74:61:70:11``. ++the actual MAC address: ``02:64:74:61:70:11``. + + It is possible to specify a remote netdevice to capture packets from by adding + ``remote=foo1``, for example:: +diff --git a/dpdk/doc/guides/nics/virtio.rst b/dpdk/doc/guides/nics/virtio.rst +index 7c0ae2b3af..0de445c6e0 100644 +--- a/dpdk/doc/guides/nics/virtio.rst ++++ b/dpdk/doc/guides/nics/virtio.rst +@@ -43,7 +43,7 @@ Features and Limitations of virtio PMD + In this release, the virtio PMD provides the basic functionality of packet reception and transmission. + + * It supports merge-able buffers per packet when receiving packets and scattered buffer per packet +- when transmitting packets. The packet size supported is from 64 to 1518. ++ when transmitting packets. The packet size supported is from 64 to 9728. + + * It supports multicast packets and promiscuous mode. + +@@ -304,6 +304,7 @@ Prerequisites for Rx interrupts + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + To support Rx interrupts, ++ + #. Check if guest kernel supports VFIO-NOIOMMU: + + Linux started to support VFIO-NOIOMMU since 4.8.0. Make sure the guest +@@ -466,12 +467,16 @@ according to below configuration: + + #. Split virtqueue mergeable path: If Rx mergeable is negotiated, in-order feature is + not negotiated, this path will be selected. ++ + #. Split virtqueue non-mergeable path: If Rx mergeable and in-order feature are not + negotiated, also Rx offload(s) are requested, this path will be selected. ++ + #. Split virtqueue in-order mergeable path: If Rx mergeable and in-order feature are + both negotiated, this path will be selected. ++ + #. Split virtqueue in-order non-mergeable path: If in-order feature is negotiated and + Rx mergeable is not negotiated, this path will be selected. ++ + #. Split virtqueue vectorized Rx path: If Rx mergeable is disabled and no Rx offload + requested, this path will be selected. + +@@ -480,16 +485,21 @@ according to below configuration: + + #. Packed virtqueue mergeable path: If Rx mergeable is negotiated, in-order feature + is not negotiated, this path will be selected. ++ + #. Packed virtqueue non-mergeable path: If Rx mergeable and in-order feature are not + negotiated, this path will be selected. ++ + #. Packed virtqueue in-order mergeable path: If in-order and Rx mergeable feature are + both negotiated, this path will be selected. ++ + #. Packed virtqueue in-order non-mergeable path: If in-order feature is negotiated and + Rx mergeable is not negotiated, this path will be selected. ++ + #. Packed virtqueue vectorized Rx path: If building and running environment support + (AVX512 || NEON) && in-order feature is negotiated && Rx mergeable + is not negotiated && TCP_LRO Rx offloading is disabled && vectorized option enabled, + this path will be selected. ++ + #. Packed virtqueue vectorized Tx path: If building and running environment support + (AVX512 || NEON) && in-order feature is negotiated && vectorized option enabled, + this path will be selected. +@@ -567,5 +577,7 @@ or configuration, below steps can help you identify which path you selected and + root cause faster. + + #. Run vhost/virtio test case; ++ + #. Run "perf top" and check virtio Rx/Tx callback names; ++ + #. Identify which virtio path is selected refer to above table. +diff --git a/dpdk/doc/guides/platform/bluefield.rst b/dpdk/doc/guides/platform/bluefield.rst +index 635e414600..e96c43c71e 100644 +--- a/dpdk/doc/guides/platform/bluefield.rst ++++ b/dpdk/doc/guides/platform/bluefield.rst +@@ -62,7 +62,7 @@ rdma-core library with corresponding kernel drivers is required. + + .. code-block:: console + +- meson build ++ meson setup build + ninja -C build + + Cross Compilation +@@ -116,5 +116,5 @@ Then, untar the tarball at the cross toolchain directory on the x86 host. + + .. code-block:: console + +- meson build --cross-file config/arm/arm64_bluefield_linux_gcc ++ meson setup build --cross-file config/arm/arm64_bluefield_linux_gcc + ninja -C build +diff --git a/dpdk/doc/guides/platform/cnxk.rst b/dpdk/doc/guides/platform/cnxk.rst +index 88995cc70c..8eff2c4cd9 100644 +--- a/dpdk/doc/guides/platform/cnxk.rst ++++ b/dpdk/doc/guides/platform/cnxk.rst +@@ -105,7 +105,9 @@ where even VF bound to the first domain and odd VF bound to the second domain. + Typical application usage models are, + + #. Communication between the Linux kernel and DPDK application. ++ + #. Exception path to Linux kernel from DPDK application as SW ``KNI`` replacement. ++ + #. Communication between two different DPDK applications. + + SDP interface +@@ -124,6 +126,7 @@ can bind PF or VF to use SDP interface and it will be enumerated as ethdev ports + The primary use case for SDP is to enable the smart NIC use case. Typical usage models are, + + #. Communication channel between remote host and cnxk SoC over PCIe. ++ + #. Transfer packets received from network interface to remote host over PCIe and + vice-versa. + +@@ -244,7 +247,7 @@ context or stats using debugfs. + + Enable ``debugfs`` by: + +-1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUGFS=y``. ++1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUG_FS=y``. + 2. Boot OCTEON CN9K/CN10K with debugfs supported kernel. + 3. Verify ``debugfs`` mounted by default "mount | grep -i debugfs" or mount it manually by using. + +@@ -572,7 +575,7 @@ Native Compilation + + .. code-block:: console + +- meson build ++ meson setup build + ninja -C build + + Cross Compilation +@@ -582,7 +585,7 @@ Refer to :doc:`../linux_gsg/cross_build_dpdk_for_arm64` for generic arm64 detail + + .. code-block:: console + +- meson build --cross-file config/arm/arm64_cn10k_linux_gcc ++ meson setup build --cross-file config/arm/arm64_cn10k_linux_gcc + ninja -C build + + .. note:: +diff --git a/dpdk/doc/guides/platform/octeontx.rst b/dpdk/doc/guides/platform/octeontx.rst +index 42ddb1762e..1459dc7109 100644 +--- a/dpdk/doc/guides/platform/octeontx.rst ++++ b/dpdk/doc/guides/platform/octeontx.rst +@@ -94,14 +94,14 @@ drivers can be compiled with the following steps, + + .. code-block:: console + +- meson build -Dexamples=<application> ++ meson setup build -Dexamples=<application> + ninja -C build + + The example applications can be compiled using the following: + + .. code-block:: console + +- meson build -Dexamples=<application> ++ meson setup build -Dexamples=<application> + ninja -C build + + Cross Compilation +@@ -127,7 +127,7 @@ Now this build system can be used to build applications for **OCTEON TX** :sup:` + .. code-block:: console + + cd <dpdk directory> +- meson build --cross-file config/arm/arm64_thunderx_linux_gcc ++ meson setup build --cross-file config/arm/arm64_thunderx_linux_gcc + ninja -C build + + The example applications can be compiled using the following: +@@ -135,7 +135,7 @@ The example applications can be compiled using the following: + .. code-block:: console + + cd <dpdk directory> +- meson build --cross-file config/arm/arm64_thunderx_linux_gcc -Dexamples=<application> ++ meson setup build --cross-file config/arm/arm64_thunderx_linux_gcc -Dexamples=<application> + ninja -C build + + .. note:: diff --git a/dpdk/doc/guides/prog_guide/bpf_lib.rst b/dpdk/doc/guides/prog_guide/bpf_lib.rst index 1feb7734a3..1cf2d59429 100644 --- a/dpdk/doc/guides/prog_guide/bpf_lib.rst @@ -8055,6 +17397,99 @@ index 1feb7734a3..1cf2d59429 100644 R0 = ntohl(*(uint32_t *)R0); and ``R1-R5`` were scratched. +diff --git a/dpdk/doc/guides/prog_guide/build-sdk-meson.rst b/dpdk/doc/guides/prog_guide/build-sdk-meson.rst +index 30ce805902..b3c1517931 100644 +--- a/dpdk/doc/guides/prog_guide/build-sdk-meson.rst ++++ b/dpdk/doc/guides/prog_guide/build-sdk-meson.rst +@@ -9,7 +9,7 @@ Summary + For many platforms, compiling and installing DPDK should work using the + following set of commands:: + +- meson build ++ meson setup build + cd build + ninja + ninja install +@@ -57,12 +57,12 @@ Configuring the Build + ---------------------- + + To configure a build, run the meson tool, passing the path to the directory +-to be used for the build e.g. ``meson build``, as shown above. If calling ++to be used for the build e.g. ``meson setup build``, as shown above. If calling + meson from somewhere other than the root directory of the DPDK project the + path to the root directory should be passed as the first parameter, and the + build path as the second. For example, to build DPDK in /tmp/dpdk-build:: + +- user@host:/tmp$ meson ~user/dpdk dpdk-build ++ user@host:/tmp$ meson setup ~user/dpdk dpdk-build + + Meson will then configure the build based on settings in the project's + meson.build files, and by checking the build environment for e.g. compiler +@@ -80,24 +80,29 @@ available run ``meson configure`` in the build directory. + Examples of adjusting the defaults when doing initial meson configuration. + Project-specific options are passed used -Doption=value:: + +- meson --werror werrorbuild # build with warnings as errors ++ # build with warnings as errors ++ meson setup --werror werrorbuild + +- meson --buildtype=debug debugbuild # build for debugging ++ # build for debugging ++ meson setup --buildtype=debug debugbuild + +- meson -Dexamples=l3fwd,l2fwd fwdbuild # build some examples as +- # part of the normal DPDK build ++ # build some examples as part of the normal DPDK build ++ meson setup -Dexamples=l3fwd,l2fwd fwdbuild + +- meson -Dmax_lcores=8 smallbuild # scale build for smaller systems ++ # scale build for smaller systems ++ meson setup -Dmax_lcores=8 smallbuild + +- meson -Denable_docs=true fullbuild # build and install docs ++ # build and install docs ++ meson setup -Denable_docs=true fullbuild + +- meson -Dcpu_instruction_set=generic # use builder-independent baseline -march ++ # use builder-independent baseline -march ++ meson setup -Dcpu_instruction_set=generic + +- meson -Ddisable_drivers=event/*,net/tap # disable tap driver and all +- # eventdev PMDs for a smaller build ++ # disable tap driver and all eventdev PMDs for a smaller build ++ meson setup -Ddisable_drivers=event/*,net/tap + +- meson -Denable_trace_fp=true tracebuild # build with fast path traces +- # enabled ++ # build with fast path traces enabled ++ meson setup -Denable_trace_fp=true tracebuild + + Examples of setting some of the same options using meson configure:: + +@@ -135,7 +140,7 @@ As well as those settings taken from ``meson configure``, other options + such as the compiler to use can be passed via environment variables. For + example:: + +- CC=clang meson clang-build ++ CC=clang meson setup clang-build + + .. note:: + +@@ -188,12 +193,12 @@ Cross Compiling DPDK + To cross-compile DPDK on a desired target machine we can use the following + command:: + +- meson cross-build --cross-file <target_machine_configuration> ++ meson setup cross-build --cross-file <target_machine_configuration> + + For example if the target machine is arm64 we can use the following + command:: + +- meson arm-build --cross-file config/arm/arm64_armv8_linux_gcc ++ meson setup arm-build --cross-file config/arm/arm64_armv8_linux_gcc + + where config/arm/arm64_armv8_linux_gcc contains settings for the compilers + and other build tools to be used, as well as characteristics of the target diff --git a/dpdk/doc/guides/prog_guide/compressdev.rst b/dpdk/doc/guides/prog_guide/compressdev.rst index 07d1a62a63..2a59c434c1 100644 --- a/dpdk/doc/guides/prog_guide/compressdev.rst @@ -8658,6 +18093,93 @@ index 67b11e1563..3b4ef502b2 100644 } Rx event vectorization for SW Rx adapter +diff --git a/dpdk/doc/guides/prog_guide/event_timer_adapter.rst b/dpdk/doc/guides/prog_guide/event_timer_adapter.rst +index 7547059a05..7733424aac 100644 +--- a/dpdk/doc/guides/prog_guide/event_timer_adapter.rst ++++ b/dpdk/doc/guides/prog_guide/event_timer_adapter.rst +@@ -35,7 +35,7 @@ device upon timer expiration. + + The Event Timer Adapter API represents each event timer with a generic struct, + which contains an event and user metadata. The ``rte_event_timer`` struct is +-defined in ``lib/event/librte_event_timer_adapter.h``. ++defined in ``rte_event_timer_adapter.h``. + + .. _timer_expiry_event: + +@@ -107,18 +107,19 @@ to ``rte_event_timer_adapter_create()``. + + .. code-block:: c + +- #define NSECPERSEC 1E9 // No of ns in 1 sec ++ #define NSECPERSEC 1E9 + const struct rte_event_timer_adapter_conf adapter_config = { + .event_dev_id = event_dev_id, + .timer_adapter_id = 0, ++ .socket_id = rte_socket_id(), + .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, +- .timer_tick_ns = NSECPERSEC / 10, // 100 milliseconds +- .max_tmo_nsec = 180 * NSECPERSEC // 2 minutes ++ .timer_tick_ns = NSECPERSEC / 10, ++ .max_tmo_ns = 180 * NSECPERSEC, + .nb_timers = 40000, +- .timer_adapter_flags = 0, ++ .flags = 0, + }; + +- struct rte_event_timer_adapter *adapter = NULL; ++ struct rte_event_timer_adapter *adapter; + adapter = rte_event_timer_adapter_create(&adapter_config); + + if (adapter == NULL) { ... }; +@@ -145,9 +146,9 @@ to support timers of the respective type. A periodic timer expires at a fixed + time interval repeatedly till it is cancelled. A non-periodic timer expires only + once. The periodic capability flag, ``RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC``, + can be set for implementations that support periodic mode if desired. To +-configure an adapter in periodic mode, ``timer_adapter_flags`` of ++configure an adapter in periodic mode, ``flags`` of + ``rte_event_timer_adapter_conf`` is set to include the periodic flag +-``RTE_EVENT_TIMER_ADAPTER_F_PERIODIC``. Maximum timeout (``max_tmo_nsec``) does ++``RTE_EVENT_TIMER_ADAPTER_F_PERIODIC``. Maximum timeout (``max_tmo_ns``) does + not apply to periodic mode. + + Retrieve Event Timer Adapter Contextual Information +@@ -228,9 +229,7 @@ Note that it is necessary to initialize the event timer state to + RTE_EVENT_TIMER_NOT_ARMED. Also note that we have saved a pointer to the + ``conn`` object in the timer's event payload. This will allow us to locate + the connection object again once we dequeue the timer expiry event from the +-event device later. As a convenience, the application may specify no value for +-ev.event_ptr, and the adapter will by default set it to point at the event +-timer itself. ++event device later. + + Now we can arm the event timer with ``rte_event_timer_arm_burst()``: + +diff --git a/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst +index e605b86376..30d13bcc61 100644 +--- a/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst ++++ b/dpdk/doc/guides/prog_guide/generic_segmentation_offload_lib.rst +@@ -204,7 +204,7 @@ To segment an outgoing packet, an application must: + - a flag, that indicates whether the IPv4 headers of output segments should + contain fixed or incremental ID values. + +-2. Set the appropriate ol_flags in the mbuf. ++#. Set the appropriate ol_flags in the mbuf. + + - The GSO library use the value of an mbuf's ``ol_flags`` attribute to + determine how a packet should be segmented. It is the application's +diff --git a/dpdk/doc/guides/prog_guide/graph_lib.rst b/dpdk/doc/guides/prog_guide/graph_lib.rst +index 1cfdc86433..4ab0623f44 100644 +--- a/dpdk/doc/guides/prog_guide/graph_lib.rst ++++ b/dpdk/doc/guides/prog_guide/graph_lib.rst +@@ -173,7 +173,7 @@ Create the graph object + ~~~~~~~~~~~~~~~~~~~~~~~ + Now that the nodes are linked, Its time to create a graph by including + the required nodes. The application can provide a set of node patterns to +-form a graph object. The ``famish()`` API used underneath for the pattern ++form a graph object. The ``fnmatch()`` API used underneath for the pattern + matching to include the required nodes. After the graph create any changes to + nodes or graph is not allowed. + diff --git a/dpdk/doc/guides/prog_guide/img/flow_tru_droppper.png b/dpdk/doc/guides/prog_guide/img/flow_tru_dropper.png similarity index 100% rename from dpdk/doc/guides/prog_guide/img/flow_tru_droppper.png @@ -8688,6 +18210,38 @@ index e3708a9377..98a6b83983 100644 xml:space="preserve" style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144" x="16.351753" +diff --git a/dpdk/doc/guides/prog_guide/lto.rst b/dpdk/doc/guides/prog_guide/lto.rst +index f79c449598..ff9f47a8f4 100644 +--- a/dpdk/doc/guides/prog_guide/lto.rst ++++ b/dpdk/doc/guides/prog_guide/lto.rst +@@ -30,4 +30,4 @@ Link time optimization can be enabled by setting meson built-in 'b_lto' option: + + .. code-block:: console + +- meson build -Db_lto=true ++ meson setup build -Db_lto=true +diff --git a/dpdk/doc/guides/prog_guide/profile_app.rst b/dpdk/doc/guides/prog_guide/profile_app.rst +index bd6700ef85..14292d4c25 100644 +--- a/dpdk/doc/guides/prog_guide/profile_app.rst ++++ b/dpdk/doc/guides/prog_guide/profile_app.rst +@@ -42,7 +42,7 @@ and recompile the DPDK: + + .. code-block:: console + +- meson build ++ meson setup build + meson configure build -Dc_args=-DRTE_ETHDEV_PROFILE_WITH_VTUNE + ninja -C build + +@@ -103,7 +103,7 @@ Example: + + .. code-block:: console + +- meson --cross config/arm/arm64_armv8_linux_gcc -Dc_args='-DRTE_ARM_EAL_RDTSC_USE_PMU' build ++ meson setup --cross config/arm/arm64_armv8_linux_gcc -Dc_args='-DRTE_ARM_EAL_RDTSC_USE_PMU' build + + .. warning:: + diff --git a/dpdk/doc/guides/prog_guide/qos_framework.rst b/dpdk/doc/guides/prog_guide/qos_framework.rst index 89ea199529..22616117cb 100644 --- a/dpdk/doc/guides/prog_guide/qos_framework.rst @@ -8708,8 +18262,21 @@ index 89ea199529..22616117cb 100644 Flow Through the Dropper +diff --git a/dpdk/doc/guides/prog_guide/ring_lib.rst b/dpdk/doc/guides/prog_guide/ring_lib.rst +index 54e0bb4b68..515a715266 100644 +--- a/dpdk/doc/guides/prog_guide/ring_lib.rst ++++ b/dpdk/doc/guides/prog_guide/ring_lib.rst +@@ -172,7 +172,7 @@ If there are not enough objects in the ring (this is detected by checking prod_t + + .. figure:: img/ring-dequeue1.* + +- Dequeue last step ++ Dequeue first step + + + Dequeue Second Step diff --git a/dpdk/doc/guides/prog_guide/rte_flow.rst b/dpdk/doc/guides/prog_guide/rte_flow.rst -index c51ed88cfe..714769d0e4 100644 +index c51ed88cfe..91ace7ae7a 100644 --- a/dpdk/doc/guides/prog_guide/rte_flow.rst +++ b/dpdk/doc/guides/prog_guide/rte_flow.rst @@ -60,12 +60,12 @@ Flow rules can also be grouped, the flow rule priority is specific to the @@ -8728,7 +18295,36 @@ index c51ed88cfe..714769d0e4 100644 simultaneously available to applications. Considering that allowed pattern/actions combinations cannot be known in -@@ -1379,7 +1379,7 @@ Matches a network service header (RFC 8300). +@@ -148,14 +148,15 @@ Attribute: Group + Flow rules can be grouped by assigning them a common group number. Groups + allow a logical hierarchy of flow rule groups (tables) to be defined. These + groups can be supported virtually in the PMD or in the physical device. +-Group 0 is the default group and this is the only group which flows are +-guarantee to matched against, all subsequent groups can only be reached by +-way of the JUMP action from a matched flow rule. ++Group 0 is the default group and is the only group that ++flows are guaranteed to be matched against. ++All subsequent groups can only be reached by using a JUMP action ++from a matched flow rule. + + Although optional, applications are encouraged to group similar rules as + much as possible to fully take advantage of hardware capabilities + (e.g. optimized matching) and work around limitations (e.g. a single pattern +-type possibly allowed in a given group), while being aware that the groups ++type possibly allowed in a given group), while being aware that the groups' + hierarchies must be programmed explicitly. + + Note that support for more than a single group is not guaranteed. +@@ -170,7 +171,7 @@ Priority levels are arbitrary and up to the application, they do + not need to be contiguous nor start from 0, however the maximum number + varies between devices and may be affected by existing flow rules. + +-A flow which matches multiple rules in the same group will always matched by ++A flow which matches multiple rules in the same group will always be matched by + the rule with the highest priority in that group. + + If a packet is matched by several rules of a given group for a given +@@ -1379,7 +1380,7 @@ Matches a network service header (RFC 8300). - ``ttl``: maximum SFF hopes (6 bits). - ``length``: total length in 4 bytes words (6 bits). - ``reserved1``: reserved1 bits (4 bits). @@ -8737,11 +18333,217 @@ index c51ed88cfe..714769d0e4 100644 - ``next_proto``: indicates protocol type of encap data (8 bits). - ``spi``: service path identifier (3 bytes). - ``sindex``: service index (1 byte). +@@ -1610,22 +1611,15 @@ rte_flow_flex_item_create() routine. + value and mask. + + Item: ``L2TPV2`` +-^^^^^^^^^^^^^^^^^^^ ++^^^^^^^^^^^^^^^^ + + Matches a L2TPv2 header. + +-- ``flags_version``: flags(12b), version(4b). +-- ``length``: total length of the message. +-- ``tunnel_id``: identifier for the control connection. +-- ``session_id``: identifier for a session within a tunnel. +-- ``ns``: sequence number for this date or control message. +-- ``nr``: sequence number expected in the next control message to be received. +-- ``offset_size``: offset of payload data. +-- ``offset_padding``: offset padding, variable length. ++- ``hdr``: header definition (``rte_l2tpv2.h``). + - Default ``mask`` matches flags_version only. + + Item: ``PPP`` +-^^^^^^^^^^^^^^^^^^^ ++^^^^^^^^^^^^^ + + Matches a PPP header. + +@@ -1838,12 +1832,12 @@ flow group/tables on the device, this action redirects the matched flow to + the specified group on that device. + + If a matched flow is redirected to a table which doesn't contain a matching +-rule for that flow then the behavior is undefined and the resulting behavior +-is up to the specific device. Best practice when using groups would be define ++rule for that flow, then the behavior is undefined and the resulting behavior ++is up to the specific device. Best practice when using groups would be to define + a default flow rule for each group which a defines the default actions in that + group so a consistent behavior is defined. + +-Defining an action for matched flow in a group to jump to a group which is ++Defining an action for a matched flow in a group to jump to a group which is + higher in the group hierarchy may not be supported by physical devices, + depending on how groups are mapped to the physical devices. In the + definitions of jump actions, applications should be aware that it may be +@@ -2015,8 +2009,8 @@ Also, regarding packet encapsulation ``level``: + level. + + - ``2`` and subsequent values request RSS to be performed on the specified +- inner packet encapsulation level, from outermost to innermost (lower to +- higher values). ++ inner packet encapsulation level, from outermost to innermost (lower to ++ higher values). + + Values other than ``0`` are not necessarily supported. + +@@ -3073,20 +3067,23 @@ The immediate value ``RTE_FLOW_FIELD_VALUE`` (or a pointer to it + ``RTE_FLOW_FIELD_START`` is used to point to the beginning of a packet. + See ``enum rte_flow_field_id`` for the list of supported fields. + +-``op`` selects the operation to perform on a destination field. ++``op`` selects the operation to perform on a destination field: ++ + - ``set`` copies the data from ``src`` field to ``dst`` field. + - ``add`` adds together ``dst`` and ``src`` and stores the result into ``dst``. +-- ``sub`` subtracts ``src`` from ``dst`` and stores the result into ``dst`` ++- ``sub`` subtracts ``src`` from ``dst`` and stores the result into ``dst``. + + ``width`` defines a number of bits to use from ``src`` field. + + ``level`` is used to access any packet field on any encapsulation level +-as well as any tag element in the tag array. +-- ``0`` means the default behaviour. Depending on the packet type, it can +-mean outermost, innermost or anything in between. ++as well as any tag element in the tag array: ++ ++- ``0`` means the default behaviour. Depending on the packet type, ++ it can mean outermost, innermost or anything in between. + - ``1`` requests access to the outermost packet encapsulation level. + - ``2`` and subsequent values requests access to the specified packet +-encapsulation level, from outermost to innermost (lower to higher values). ++ encapsulation level, from outermost to innermost (lower to higher values). ++ + For the tag array (in case of multiple tags are supported and present) + ``level`` translates directly into the array index. + +diff --git a/dpdk/doc/guides/prog_guide/rte_security.rst b/dpdk/doc/guides/prog_guide/rte_security.rst +index 72ca0bd330..8619757e84 100644 +--- a/dpdk/doc/guides/prog_guide/rte_security.rst ++++ b/dpdk/doc/guides/prog_guide/rte_security.rst +@@ -592,68 +592,27 @@ Security session configuration + + Security Session configuration structure is defined as ``rte_security_session_conf`` + +-.. code-block:: c +- +- struct rte_security_session_conf { +- enum rte_security_session_action_type action_type; +- /**< Type of action to be performed on the session */ +- enum rte_security_session_protocol protocol; +- /**< Security protocol to be configured */ +- union { +- struct rte_security_ipsec_xform ipsec; +- struct rte_security_macsec_xform macsec; +- struct rte_security_pdcp_xform pdcp; +- struct rte_security_docsis_xform docsis; +- }; +- /**< Configuration parameters for security session */ +- struct rte_crypto_sym_xform *crypto_xform; +- /**< Security Session Crypto Transformations */ +- void *userdata; +- /**< Application specific userdata to be saved with session */ +- }; ++.. literalinclude:: ../../../lib/security/rte_security.h ++ :language: c ++ :start-after: Structure rte_security_session_conf 8< ++ :end-before: >8 End of structure rte_security_session_conf. + + The configuration structure reuses the ``rte_crypto_sym_xform`` struct for crypto related + configuration. The ``rte_security_session_action_type`` struct is used to specify whether the + session is configured for Lookaside Protocol offload or Inline Crypto or Inline Protocol + Offload. + +-.. code-block:: c +- +- enum rte_security_session_action_type { +- RTE_SECURITY_ACTION_TYPE_NONE, +- /**< No security actions */ +- RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, +- /**< Crypto processing for security protocol is processed inline +- * during transmission +- */ +- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL, +- /**< All security protocol processing is performed inline during +- * transmission +- */ +- RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, +- /**< All security protocol processing including crypto is performed +- * on a lookaside accelerator +- */ +- RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO +- /**< Similar to ACTION_TYPE_NONE but crypto processing for security +- * protocol is processed synchronously by a CPU. +- */ +- }; ++.. literalinclude:: ../../../lib/security/rte_security.h ++ :language: c ++ :start-after: Enumeration of rte_security_session_action_type 8< ++ :end-before: >8 End enumeration of rte_security_session_action_type. + + The ``rte_security_session_protocol`` is defined as + +-.. code-block:: c +- +- enum rte_security_session_protocol { +- RTE_SECURITY_PROTOCOL_IPSEC = 1, +- /**< IPsec Protocol */ +- RTE_SECURITY_PROTOCOL_MACSEC, +- /**< MACSec Protocol */ +- RTE_SECURITY_PROTOCOL_PDCP, +- /**< PDCP Protocol */ +- RTE_SECURITY_PROTOCOL_DOCSIS, +- /**< DOCSIS Protocol */ +- }; ++.. literalinclude:: ../../../lib/security/rte_security.h ++ :language: c ++ :start-after: Enumeration of rte_security_session_protocol 8< ++ :end-before: >8 End enumeration of rte_security_session_protocol. + + Currently the library defines configuration parameters for IPsec and PDCP only. + For other protocols like MACSec, structures and enums are defined as place holders +diff --git a/dpdk/doc/guides/prog_guide/trace_lib.rst b/dpdk/doc/guides/prog_guide/trace_lib.rst +index fbadf9fde9..9a8f38073d 100644 +--- a/dpdk/doc/guides/prog_guide/trace_lib.rst ++++ b/dpdk/doc/guides/prog_guide/trace_lib.rst +@@ -271,10 +271,16 @@ Trace memory + The trace memory will be allocated through an internal function + ``__rte_trace_mem_per_thread_alloc()``. The trace memory will be allocated + per thread to enable lock less trace-emit function. +-The memory for the trace memory for DPDK lcores will be allocated on +-``rte_eal_init()`` if the trace is enabled through a EAL option. +-For non DPDK threads, on the first trace emission, the memory will be +-allocated. ++ ++For non lcore threads, the trace memory is allocated on the first trace ++emission. ++ ++For lcore threads, if trace points are enabled through a EAL option, the trace ++memory is allocated when the threads are known of DPDK ++(``rte_eal_init`` for EAL lcores, ``rte_thread_register`` for non-EAL lcores). ++Otherwise, when trace points are enabled later in the life of the application, ++the behavior is the same as non lcore threads and the trace memory is allocated ++on the first trace emission. + + Trace memory layout + ~~~~~~~~~~~~~~~~~~~ diff --git a/dpdk/doc/guides/prog_guide/vhost_lib.rst b/dpdk/doc/guides/prog_guide/vhost_lib.rst -index 76f5d303c9..8959568d8f 100644 +index 76f5d303c9..6ad1afa378 100644 --- a/dpdk/doc/guides/prog_guide/vhost_lib.rst +++ b/dpdk/doc/guides/prog_guide/vhost_lib.rst -@@ -331,7 +331,7 @@ vhost-user implementation has two options: +@@ -303,6 +303,12 @@ The following is an overview of some key Vhost API functions: + Clear inflight packets which are submitted to DMA engine in vhost async data + path. Completed packets are returned to applications through ``pkts``. + ++* ``rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx)`` ++ ++ Notify the guest that used descriptors have been added to the vring. This function ++ will return -EAGAIN when vq's access lock is held by other thread, user should try ++ again later. ++ + Vhost-user Implementations + -------------------------- + +@@ -331,7 +337,7 @@ vhost-user implementation has two options: * The vhost supported features must be exactly the same before and after the restart. For example, if TSO is disabled and then enabled, @@ -8750,7 +18552,7 @@ index 76f5d303c9..8959568d8f 100644 No matter which mode is used, once a connection is established, DPDK vhost-user will start receiving and processing vhost messages from QEMU. -@@ -362,12 +362,12 @@ Guest memory requirement +@@ -362,12 +368,12 @@ Guest memory requirement * Memory pre-allocation @@ -8769,7 +18571,7 @@ index 76f5d303c9..8959568d8f 100644 For async data path, we force the VM memory to be pre-allocated at vhost lib when mapping the guest memory; and also we need to lock the memory to -@@ -375,8 +375,8 @@ Guest memory requirement +@@ -375,8 +381,8 @@ Guest memory requirement * Memory sharing @@ -8793,6 +18595,66 @@ index 3cb2175688..522390bf1b 100644 BPHY CGX/RPM PMD ---------------- +diff --git a/dpdk/doc/guides/rawdevs/ntb.rst b/dpdk/doc/guides/rawdevs/ntb.rst +index 2bb115d13f..f8befc6594 100644 +--- a/dpdk/doc/guides/rawdevs/ntb.rst ++++ b/dpdk/doc/guides/rawdevs/ntb.rst +@@ -1,6 +1,8 @@ + .. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2018 Intel Corporation. + ++.. include:: <isonum.txt> ++ + NTB Rawdev Driver + ================= + +@@ -17,19 +19,23 @@ some information by using scratchpad registers. + BIOS setting on Intel Xeon + -------------------------- + +-Intel Non-transparent Bridge needs special BIOS setting. The reference for +-Skylake is https://www.intel.com/content/dam/support/us/en/documents/server-products/Intel_Xeon_Processor_Scalable_Family_BIOS_User_Guide.pdf +- +-- Set the needed PCIe port as NTB to NTB mode on both hosts. +-- Enable NTB bars and set bar size of bar 23 and bar 45 as 12-29 (4K-512M) +- on both hosts (for Ice Lake, bar size can be set as 12-51, namely 4K-128PB). +- Note that bar size on both hosts should be the same. +-- Disable split bars for both hosts. +-- Set crosslink control override as DSD/USP on one host, USD/DSP on +- another host. +-- Disable PCIe PII SSC (Spread Spectrum Clocking) for both hosts. This +- is a hardware requirement. +- ++Intel Non-transparent Bridge (NTB) needs special BIOS settings on both systems. ++Note that for 4th Generation Intel\ |reg| Xeon\ |reg| Scalable Processors, ++option ``Port Subsystem Mode`` should be changed from ``Gen5`` to ``Gen4 Only``, ++then reboot. ++ ++- Set ``Non-Transparent Bridge PCIe Port Definition`` for needed PCIe ports ++ as ``NTB to NTB`` mode, on both hosts. ++- Set ``Enable NTB BARs`` as ``Enabled``, on both hosts. ++- Set ``Enable SPLIT BARs`` as ``Disabled``, on both hosts. ++- Set ``Imbar1 Size``, ``Imbar2 Size``, ``Embar1 Size`` and ``Embar2 Size``, ++ as 12-29 (i.e., 4K-512M) for 2nd Generation Intel\ |reg| Xeon\ |reg| Scalable Processors; ++ as 12-51 (i.e., 4K-128PB) for 3rd and 4th Generation Intel\ |reg| Xeon\ |reg| Scalable Processors. ++ Note that those bar sizes on both hosts should be the same. ++- Set ``Crosslink Control override`` as ``DSD/USP`` on one host, ++ ``USD/DSP`` on another host. ++- Set ``PCIe PLL SSC (Spread Spectrum Clocking)`` as ``Disabled``, on both hosts. ++ This is a hardware requirement when using Re-timer Cards. + + Device Setup + ------------ +@@ -145,4 +151,8 @@ like the following: + Limitation + ---------- + +-- This PMD only supports Intel Skylake and Ice Lake platforms. ++This PMD is only supported on Intel Xeon Platforms: ++ ++- 4th Generation Intel® Xeon® Scalable Processors. ++- 3rd Generation Intel® Xeon® Scalable Processors. ++- 2nd Generation Intel® Xeon® Scalable Processors. diff --git a/dpdk/doc/guides/regexdevs/features_overview.rst b/dpdk/doc/guides/regexdevs/features_overview.rst index c512bde592..3e7ab409bf 100644 --- a/dpdk/doc/guides/regexdevs/features_overview.rst @@ -8886,10 +18748,10 @@ index 25439dad45..1fd1755858 100644 * Parameters of ``rte_cryptodev_sym_session_create()`` were modified to accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``. diff --git a/dpdk/doc/guides/rel_notes/release_21_11.rst b/dpdk/doc/guides/rel_notes/release_21_11.rst -index db09ec01ea..69199a9583 100644 +index db09ec01ea..762db48e9b 100644 --- a/dpdk/doc/guides/rel_notes/release_21_11.rst +++ b/dpdk/doc/guides/rel_notes/release_21_11.rst -@@ -878,3 +878,975 @@ Tested Platforms +@@ -878,3 +878,2319 @@ Tested Platforms * Kernel version: 5.10 * Ubuntu 18.04 @@ -9865,6 +19727,1350 @@ index db09ec01ea..69199a9583 100644 + +* DPDK 21.11.2 contains fixes up to DPDK 22.07 as well as fixes for CVE-2022-28199 and CVE-2022-2132 +* Issues identified/fixed in DPDK main branch after DPDK 22.07 may be present in DPDK 21.11.2 ++ ++21.11.3 Release Notes ++--------------------- ++ ++ ++21.11.3 Fixes ++~~~~~~~~~~~~~ ++ ++* app/dumpcap: fix crash on cleanup ++* app/dumpcap: fix pathname for output file ++* app/eventdev: fix limits in error message ++* app/testpmd: fix build with clang 15 ++* app/testpmd: fix build with clang 15 in flow code ++* app/testpmd: fix MAC header in checksum forward engine ++* app/testpmd: make quit flag volatile ++* app/testpmd: remove jumbo offload ++* app/testpmd: restore ixgbe bypass commands ++* app/testpmd: skip port reset in secondary process ++* baseband/acc100: add LDPC encoder padding function ++* baseband/acc100: add null checks ++* baseband/acc100: check AQ availability ++* baseband/acc100: check turbo dec/enc input ++* baseband/acc100: enforce additional check on FCW ++* baseband/acc100: fix clearing PF IR outside handler ++* baseband/acc100: fix close cleanup ++* baseband/acc100: fix device minimum alignment ++* baseband/acc100: fix double MSI intr in TB mode ++* baseband/acc100: fix input length for CRC24B ++* baseband/acc100: fix memory leak ++* baseband/acc100: fix null HARQ input case ++* baseband/acc100: fix ring availability calculation ++* baseband/acc100: fix ring/queue allocation ++* build: enable developer mode for all working trees ++* buildtools: fix NUMA nodes count ++* bus/auxiliary: prevent device from being probed again ++* bus/dpaa: fix build with clang 15 ++* ci: bump versions of actions in GHA ++* ci: enable ABI check in GHA ++* ci: update to new API for step outputs in GHA ++* common/cnxk: fix log level during MCAM allocation ++* common/cnxk: fix missing flow counter reset ++* common/cnxk: fix printing disabled MKEX registers ++* common/cnxk: fix schedule weight update ++* common/iavf: avoid copy in async mode ++* common/mlx5: fix multi-process mempool registration ++* common/mlx5: fix shared mempool subscription ++* common/qat: fix VF to PF answer ++* common/sfc_efx/base: fix maximum Tx data count ++* common/sfc_efx/base: remove VQ index check during VQ start ++* cryptodev: fix missing SHA3 algorithm strings ++* cryptodev: fix unduly newlines in logs ++* crypto/qat: fix build with GCC 12 ++* crypto/qat: fix null hash algorithm digest size ++* devtools: fix checkpatch header retrieval from stdin ++* dma/idxd: check DSA device allocation ++* doc: add LRO size limitation in mlx5 guide ++* doc: add Rx buffer split capability for mlx5 ++* doc: avoid meson deprecation in setup ++* doc: document device dump in procinfo guide ++* doc: fix application name in procinfo guide ++* doc: fix colons in testpmd aged flow rules ++* doc: fix dumpcap interface parameter option ++* doc: fix event timer adapter guide ++* doc: fix maximum packet size of virtio driver ++* doc: fix reference to dma application example ++* doc: fix support table for Ethernet/VLAN flow items ++* doc: fix typo depreciated instead of deprecated ++* doc: fix underlines in testpmd guide ++* drivers: fix typos found by Lintian ++* drivers: remove unused build variable ++* eal: fix data race in multi-process support ++* eal: fix doxygen comments for UUID ++* eal: fix side effect in some pointer arithmetic macros ++* event/cnxk: fix mbuf offset calculation ++* event/cnxk: fix missing mempool cookie marking ++* event/cnxk: fix missing xstats operations ++* eventdev/crypto: fix multi-process ++* eventdev/eth_tx: add spinlock for adapter start/stop ++* eventdev/eth_tx: fix adapter stop ++* eventdev/eth_tx: fix queue delete ++* eventdev: fix name of Rx conf type in documentation ++* event/dlb2: handle enqueuing more than maximum depth ++* event/dsw: fix flow migration ++* event/sw: fix device name in dump ++* event/sw: fix flow ID init in self test ++* event/sw: fix log in self test ++* examples/fips_validation: fix typo in error log ++* examples/ipsec-secgw: fix Tx checksum offload flag ++* examples/ipsec-secgw: fix Tx checksum offload flag ++* examples/ipsec-secgw: use Tx checksum offload conditionally ++* examples/l2fwd-crypto: fix typo in error message ++* examples/l3fwd: fix MTU configuration with event mode ++* examples/qos_sched: fix number of subport profiles ++* examples/vhost: fix use after free ++* examples/vm_power_manager: use safe list iterator ++* graph: fix node objects allocation ++* gro: check payload length after trim ++* gro: trim tail padding bytes ++* hash: fix RCU configuration memory leak ++* ipsec: fix build with GCC 12 ++* lib: remove empty return types from doxygen comments ++* malloc: fix storage size for some allocations ++* mem: fix API doc about allocation on secondary processes ++* mempool/cnxk: fix destroying empty pool ++* mempool: make event callbacks process-private ++* net: accept unaligned data in checksum routines ++* net/atlantic: fix build with clang 15 ++* net/axgbe: clear buffer on scattered Rx chaining failure ++* net/axgbe: fix checksum and RSS in scattered Rx ++* net/axgbe: fix length of each segment in scattered Rx ++* net/axgbe: fix mbuf lengths in scattered Rx ++* net/axgbe: fix scattered Rx ++* net/axgbe: optimise scattered Rx ++* net/axgbe: remove freeing buffer in scattered Rx ++* net/axgbe: reset end of packet in scattered Rx ++* net/axgbe: save segment data in scattered Rx ++* net/bnxt: fix build with GCC 13 ++* net/bnxt: fix error code during MTU change ++* net/bnxt: fix null pointer dereference in LED config ++* net/bnxt: fix representor info freeing ++* net/bnxt: remove unnecessary check ++* net/bonding: fix array overflow in Rx burst ++* net/bonding: fix descriptor limit reporting ++* net/bonding: fix double slave link status query ++* net/bonding: fix dropping valid MAC packets ++* net/bonding: fix flow flush order on close ++* net/bonding: fix mbuf fast free handling ++* net/bonding: fix slave device Rx/Tx offload configuration ++* net/bonding: fix Tx hash for TCP ++* net/bonding: set initial value of descriptor count alignment ++* net/cnxk: fix DF bit in vector mode ++* net/cnxk: fix later skip to include mbuf private data ++* net/dpaa2: fix buffer freeing on SG Tx ++* net/dpaa2: fix build with clang 15 ++* net/dpaa2: fix DPDMUX error behaviour ++* net/dpaa2: use internal mempool for SG table ++* net/dpaa: fix buffer freeing in slow path ++* net/dpaa: fix buffer freeing on SG Tx ++* net/dpaa: fix jumbo packet Rx in case of VSP ++* net/dpaa: use internal mempool for SG table ++* net/enetfec: fix buffer leak ++* net/enetfec: fix restart ++* net/failsafe: fix interrupt handle leak ++* net/hns3: add L3 and L4 RSS types ++* net/hns3: delete unused markup ++* net/hns3: extract functions to create RSS and FDIR flow rule ++* net/hns3: fix clearing hardware MAC statistics ++* net/hns3: fix crash in SVE Tx ++* net/hns3: fix crash when secondary process access FW ++* net/hns3: fix IPv4 and IPv6 RSS ++* net/hns3: fix IPv4 RSS ++* net/hns3: fix lock protection of RSS flow rule ++* net/hns3: fix minimum Tx frame length ++* net/hns3: fix next-to-use overflow in simple Tx ++* net/hns3: fix next-to-use overflow in SVE Tx ++* net/hns3: fix packet type for GENEVE ++* net/hns3: fix restore filter function input ++* net/hns3: fix RSS filter restore ++* net/hns3: fix RSS flow rule restore ++* net/hns3: fix RSS rule restore ++* net/hns3: fix Rx with PTP ++* net/hns3: fix typos in IPv6 SCTP fields ++* net/hns3: fix VF mailbox message handling ++* net/hns3: move flow direction rule recovery ++* net/hns3: revert fix mailbox communication with HW ++* net/hns3: revert Tx performance optimization ++* net/i40e: fix build with MinGW GCC 12 ++* net/i40e: fix jumbo frame Rx with X722 ++* net/i40e: fix pctype configuration for X722 ++* net/i40e: fix VF representor release ++* net/iavf: add thread for event callbacks ++* net/iavf: check illegal packet sizes ++* net/iavf: fix IPsec flow create error check ++* net/iavf: fix L3 checksum Tx offload flag ++* net/iavf: fix outer checksum flags ++* net/iavf: fix pattern check for flow director parser ++* net/iavf: fix processing VLAN TCI in SSE path ++* net/iavf: fix queue stop for large VF ++* net/iavf: fix SPI check ++* net/iavf: fix Tx done descriptors cleanup ++* net/iavf: fix VLAN insertion ++* net/iavf: fix VLAN offload ++* net/iavf: revert VLAN insertion fix ++* net/iavf: update IPsec ESN values when updating session ++* net/ice/base: fix 100M speed capability ++* net/ice/base: fix add MAC rule ++* net/ice/base: fix array overflow in add switch recipe ++* net/ice/base: fix bit finding range over ptype bitmap ++* net/ice/base: fix division during E822 PTP init ++* net/ice/base: fix double VLAN in promiscuous mode ++* net/ice/base: fix DSCP PFC TLV creation ++* net/ice/base: fix duplicate flow rules ++* net/ice/base: fix endian format ++* net/ice/base: fix function descriptions for parser ++* net/ice/base: fix inner symmetric RSS hash in raw flow ++* net/ice/base: fix input set of GTPoGRE ++* net/ice/base: fix media type of PHY 10G SFI C2C ++* net/ice/base: ignore promiscuous already exist ++* net/ice: check illegal packet sizes ++* net/ice: fix interrupt handler unregister ++* net/ice: fix null function pointer call ++* net/ice: fix RSS hash update ++* net/ice: fix scalar Rx path segment ++* net/ice: fix scalar Tx path segment ++* net/ice: support VXLAN-GPE tunnel offload ++* net/ionic: fix adapter name for logging ++* net/ionic: fix endianness for RSS ++* net/ionic: fix endianness for Rx and Tx ++* net/ionic: fix reported error stats ++* net/ionic: fix Rx filter save ++* net/ixgbe: fix broadcast Rx on VF after promisc removal ++* net/ixgbe: fix unexpected VLAN Rx in promisc mode on VF ++* net/ixgbevf: fix promiscuous and allmulti ++* net/memif: fix crash with different number of Rx/Tx queues ++* net/mlx4: fix Verbs FD leak in secondary process ++* net/mlx5: fix action flag data type ++* net/mlx5: fix assert when creating meter policy ++* net/mlx5: fix build with recent compilers ++* net/mlx5: fix check for orphan wait descriptor ++* net/mlx5: fix drop action validation ++* net/mlx5: fix first segment inline length ++* net/mlx5: fix hairpin split with set VLAN VID action ++* net/mlx5: fix indexed pool local cache crash ++* net/mlx5: fix inline length exceeding descriptor limit ++* net/mlx5: fix maximum LRO message size ++* net/mlx5: fix meter profile delete after disable ++* net/mlx5: fix mirror flow validation with ASO action ++* net/mlx5: fix modify action with tunnel decapsulation ++* net/mlx5: fix null check in devargs parsing ++* net/mlx5: fix port event cleaning order ++* net/mlx5: fix port initialization with small LRO ++* net/mlx5: fix race condition in counter pool resizing ++* net/mlx5: fix RSS expansion buffer size ++* net/mlx5: fix shared Rx queue config reuse ++* net/mlx5: fix single not inline packet storing ++* net/mlx5: fix source port checking in sample flow rule ++* net/mlx5: fix thread termination check on Windows ++* net/mlx5: fix thread workspace memory leak ++* net/mlx5: fix tunnel header with IPIP offload ++* net/mlx5: fix Tx check for hardware descriptor length ++* net/mlx5: fix Verbs FD leak in secondary process ++* net/mvneta: fix build with GCC 12 ++* net/nfp: compose firmware file name with new hwinfo ++* net/nfp: fix internal buffer size and MTU check ++* net/nfp: fix memory leak in Rx ++* net/nfp: fix Rx descriptor DMA address ++* net/nfp: improve HW info header log readability ++* net/ngbe: fix maximum frame size ++* net/ngbe: remove semaphore between SW/FW ++* net/ngbe: rename some extended statistics ++* net/qede/base: fix 32-bit build with GCC 12 ++* net/tap: fix overflow of network interface index ++* net/txgbe: fix IPv6 flow rule ++* net/txgbe: remove semaphore between SW/FW ++* net/txgbe: rename some extended statistics ++* net/virtio: fix crash when configured twice ++* node: check Rx element allocation ++* pcapng: fix write more packets than IOV_MAX limit ++* pdump: do not allow enable/disable in primary process ++* power: fix some doxygen comments ++* Revert "cryptodev: fix missing SHA3 algorithm strings" ++* Revert "net/i40e: enable maximum frame size at port level" ++* Revert "net/i40e: fix jumbo frame Rx with X722" ++* Revert "net/i40e: fix max frame size config at port level" ++* Revert "net/iavf: add thread for event callbacks" ++* ring: fix description ++* ring: remove leftover comment about watermark ++* ring: squash gcc 12.2.1 warnings ++* sched: fix subport profile configuration ++* service: fix build with clang 15 ++* service: fix early move to inactive status ++* test/crypto: fix bitwise operator in a SNOW3G case ++* test/crypto: fix debug messages ++* test/crypto: fix PDCP vectors ++* test/crypto: fix wireless auth digest segment ++* test/efd: fix build with clang 15 ++* test/event: fix build with clang 15 ++* test/hash: fix bulk lookup check ++* test/hash: remove dead code in extendable bucket test ++* test/ipsec: fix build with GCC 12 ++* test/ipsec: skip if no compatible device ++* test/member: fix build with clang 15 ++* timer: fix stopping all timers ++* trace: fix dynamically enabling trace points ++* trace: fix leak with regexp ++* trace: fix metadata dump ++* trace: fix mode change ++* trace: fix mode for new trace point ++* trace: fix race in debug dump ++* vdpa/ifc: handle data path update failure ++* version: 21.11.3-rc1 ++* vhost: add non-blocking API for posting interrupt ++* vhost: fix build with clang 15 ++* vhost: fix build with GCC 12 ++* vhost: fix doxygen warnings ++* vhost: fix virtqueue use after free on NUMA reallocation ++ ++21.11.3 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* `Ubuntu Testing <https://mails.dpdk.org/archives/stable/2022-December/041641.html>`__ ++ ++ * Physical NIC tests ++ * Virtual NIC tests ++ * OVS-DPDK VUC tests ++ ++ ++* `Intel(R) Testing <https://mails.dpdk.org/archives/stable/2022-December/041659.html>`__ ++ ++ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing ++ ++ * PF (i40e) ++ * PF (ixgbe) ++ * PF (ice) ++ * VF (i40e) ++ * VF (ixgbe) ++ * VF (ice) ++ * Compile Testing ++ * Intel NIC single core/NIC performance ++ * Power and IPsec ++ ++ * Basic cryptodev and virtio testing ++ ++ * vhost/virtio basic loopback, PVP and performance test ++ * cryptodev Function/Performance ++ ++ ++* `Nvidia(R) Testing <https://mails.dpdk.org/archives/stable/2022-December/041665.html>`__ ++ ++ * Basic functionality with testpmd ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Regex application ++ * Buffer Split ++ * Tx scheduling ++ ++ * Build tests ++ * ConnectX-6 Dx ++ * ConnectX-5 ++ * ConnectX-4 Lx ++ * BlueField-2 ++ ++ ++* `Red Hat(R) Testing <https://mails.dpdk.org/archives/stable/2022-December/041667.html>`__ ++ ++ * Platform ++ ++ * RHEL 8 ++ * Kernel 4.18 ++ * Qemu 6.2 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++21.11.3 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* DPDK 21.11.3 contains fixes up to DPDK 22.11 ++* Issues identified/fixed in DPDK main branch after DPDK 22.11 may be present in DPDK 21.11.3 ++* Some i40e `patches <https://mails.dpdk.org/archives/stable/2022-December/041648.html>`__ from DPDK 21.11.2 reverted as causing issue and no fix available. ++* Some deprecation warnings for af_xdp driver with libbpf >= v0.7.0. See `mailing list <https://mails.dpdk.org/archives/dev/2022-December/257961.html>`__ for more details. ++ ++21.11.4 Release Notes ++--------------------- ++ ++ ++21.11.4 Fixes ++~~~~~~~~~~~~~ ++ ++* acl: fix crash on PPC64 with GCC 11 ++* app/bbdev: check statistics failure ++* app/compress-perf: fix remaining data for ops ++* app/compress-perf: fix some typos ++* app/compress-perf: fix testing single operation ++* app/crypto-perf: fix IPsec direction ++* app/crypto-perf: fix number of segments ++* app/crypto-perf: fix SPI zero ++* app/crypto-perf: fix test file memory leak ++* app/dumpcap: fix storing port identifier ++* app/flow-perf: fix division or module by zero ++* app/testpmd: fix crash on cleanup ++* app/testpmd: fix forwarding stats for Tx dropped ++* app/testpmd: fix interactive mode with no ports ++* app/testpmd: fix link check condition on port start ++* app/testpmd: fix packet count in IEEE 1588 engine ++* app/testpmd: fix packet transmission in noisy VNF engine ++* app/testpmd: fix secondary process packet forwarding ++* app/testpmd: fix Tx preparation in checksum engine ++* baseband/acc: fix acc100 iteration counter in TB ++* baseband/acc: fix memory leak on acc100 close ++* build: detect backtrace availability ++* build: fix dependencies lookup ++* build: fix toolchain definition ++* bus/ifpga: fix devargs handling ++* ci: switch to Ubuntu 20.04 ++* cmdline: handle EOF as quit ++* cmdline: make rdline status not private ++* common/cnxk: add memory clobber to steor and ldeor ++* common/cnxk: fix auth key length ++* common/cnxk: fix dual VLAN parsing ++* common/sfc_efx/base: add MAE mark reset action ++* compressdev: fix empty devargs parsing ++* compressdev: fix end of driver list ++* compress/mlx5: fix decompress xform validation ++* compress/mlx5: fix output Adler-32 checksum offset ++* compress/mlx5: fix queue setup for partial transformations ++* crypto/ccp: fix IOVA handling ++* crypto/ccp: remove some dead code for UIO ++* crypto/ccp: remove some printf ++* cryptodev: fix empty devargs parsing ++* cryptodev: fix telemetry data truncation ++* crypto/qat: fix stream cipher direction ++* devtools: fix escaped space in grep pattern ++* dma/ioat: fix device stop if no copies done ++* dma/ioat: fix error reporting on restart ++* dma/ioat: fix indexes after restart ++* dma/skeleton: fix empty devargs parsing ++* doc: add gpudev to the Doxygen index ++* doc: fix dependency setup in l2fwd-cat example guide ++* doc: fix description of L2TPV2 flow item ++* doc: fix LPM support in l3forward guide ++* doc: fix pipeline example path in user guide ++* doc: fix reference to event timer header ++* eal: cleanup alarm and hotplug before memory detach ++* eal/freebsd: fix lock in alarm callback ++* eal/linux: fix hugetlbfs sub-directories discovery ++* eal: use same atomic intrinsics for GCC and clang ++* eal/windows: fix pedantic build ++* ethdev: fix build with LTO ++* ethdev: fix telemetry data truncation ++* ethdev: remove telemetry Rx mbuf alloc failed field ++* event/cnxk: fix burst timer arm ++* event/cnxk: fix SSO cleanup ++* event/cnxk: fix timer operations in secondary process ++* event/cnxk: wait for CPT flow control on WQE path ++* eventdev/eth_tx: fix devices loop ++* eventdev/timer: fix overflow ++* examples/cmdline: fix build with GCC 12 ++* examples/ipsec-secgw: fix auth IV length ++* examples/qos_sched: fix config entries in wrong sections ++* examples/qos_sched: fix debug mode ++* examples/qos_sched: fix Tx port config when link down ++* fbarray: fix metadata dump ++* gpudev: fix deadlocks when registering callback ++* graph: fix node shrink ++* hash: fix GFNI implementation build with GCC 12 ++* kni: fix build on RHEL 9.1 ++* kni: fix possible starvation when mbufs are exhausted ++* kvargs: add API documentation for process callback ++* mailmap: add list of contributors ++* mem: fix heap ID in telemetry ++* mem: fix hugepage info mapping ++* mem: fix telemetry data truncation ++* mempool: fix telemetry data truncation ++* net/af_xdp: squash deprecated-declaration warnings ++* net/bnxt: fix link state change interrupt config ++* net/bnxt: fix RSS hash in mbuf ++* net/bnxt: fix Rx queue stats after queue stop and start ++* net/bnxt: fix Tx queue stats after queue stop and start ++* net/cnxk: fix LBK BPID usage ++* net/e1000: fix saving of stripped VLAN TCI ++* net/hns3: add debug info for Rx/Tx dummy function ++* net/hns3: add verification of RSS types ++* net/hns3: allow adding queue buffer size hash rule ++* net/hns3: declare flow rule keeping capability ++* net/hns3: extract common functions to set Rx/Tx ++* net/hns3: extract common function to query device ++* net/hns3: fix burst mode query with dummy function ++* net/hns3: fix clearing RSS configuration ++* net/hns3: fix config struct used for conversion ++* net/hns3: fix duplicate RSS rule check ++* net/hns3: fix empty devargs parsing ++* net/hns3: fix inaccurate RTC time to read ++* net/hns3: fix log about indirection table size ++* net/hns3: fix possible truncation of hash key when config ++* net/hns3: fix possible truncation of redirection table ++* net/hns3: fix RSS key size compatibility ++* net/hns3: fix warning on flush or destroy rule ++* net/hns3: make getting Tx function static ++* net/hns3: refactor set RSS hash algorithm and key interface ++* net/hns3: reimplement hash flow function ++* net/hns3: remove debug condition for Tx prepare ++* net/hns3: remove unused structures ++* net/hns3: remove useless code when destroy valid RSS rule ++* net/hns3: save hash algo to RSS filter list node ++* net/hns3: separate flow RSS config from RSS conf ++* net/hns3: separate setting and clearing RSS rule ++* net/hns3: separate setting hash algorithm ++* net/hns3: separate setting hash key ++* net/hns3: separate setting redirection table ++* net/hns3: separate setting RSS types ++* net/hns3: separate Tx prepare from getting Tx function ++* net/hns3: use hardware config to report hash key ++* net/hns3: use hardware config to report hash types ++* net/hns3: use hardware config to report redirection table ++* net/hns3: use new RSS rule to configure hardware ++* net/hns3: use RSS filter list to check duplicated rule ++* net/i40e: fix AVX512 fast-free path ++* net/i40e: fix MAC loopback on X722 ++* net/i40e: fix validation of flow transfer attribute ++* net/i40e: reduce interrupt interval in multi-driver mode ++* net/iavf: add lock for VF commands ++* net/iavf: fix building data desc ++* net/iavf: fix device stop during reset ++* net/iavf: fix VLAN offload with AVX2 ++* net/iavf: protect insertion in flow list ++* net/ice: fix validation of flow transfer attribute ++* net/ipn3ke: fix representor name ++* net/ipn3ke: fix thread exit ++* net/ixgbe: enable IPv6 mask in flow rules ++* net/ixgbe: fix firmware version consistency ++* net/ixgbe: fix IPv6 mask in flow director ++* net/mlx5: check compressed CQE opcode in vectorized Rx ++* net/mlx5: fix build with GCC 12 and ASan ++* net/mlx5: fix CQE dump for Tx ++* net/mlx5: fix error CQE dumping for vectorized Rx ++* net/mlx5: fix flow sample with ConnectX-5 ++* net/mlx5: fix hairpin Tx queue reference count ++* net/mlx5: fix sysfs port name translation ++* net/mlx5: fix Windows build with MinGW GCC 12 ++* net/mlx5: ignore non-critical syndromes for Rx queue ++* net/nfp: fix firmware name derived from PCI name ++* net/nfp: fix getting RSS configuration ++* net/nfp: fix MTU configuration order ++* net/ngbe: fix packet type to parse from offload flags ++* net/sfc: enforce fate action in transfer flow rules ++* net/sfc: export pick transfer proxy callback to representors ++* net/sfc: fix MAC address entry leak in transfer flow parsing ++* net/sfc: fix resetting mark in tunnel offload switch rules ++* net/sfc: invalidate switch port entry on representor unplug ++* net/txgbe: fix default signal quality value for KX/KX4 ++* net/txgbe: fix interrupt loss ++* net/txgbe: fix packet type to parse from offload flags ++* net/txgbe: fix Rx buffer size in config register ++* net/vhost: add missing newline in logs ++* net/vhost: fix leak in interrupt handle setup ++* net/vhost: fix Rx interrupt ++* net/virtio: deduce IP length for TSO checksum ++* net/virtio: fix empty devargs parsing ++* net/virtio: remove address width limit for modern devices ++* net/virtio-user: fix device starting failure handling ++* pdump: fix build with GCC 12 ++* raw/ifpga/base: fix init with multi-process ++* raw/skeleton: fix empty devargs parsing ++* raw/skeleton: fix selftest ++* regex/mlx5: fix doorbell record ++* regex/mlx5: utilize all available queue pairs ++* reorder: fix sequence number mbuf field register ++* reorder: invalidate buffer from ready queue in drain ++* sched: fix alignment of structs in subport ++* table: fix action selector group size log2 setting ++* telemetry: fix repeat display when callback don't init dict ++* telemetry: move include after guard ++* test/bbdev: extend HARQ tolerance ++* test/bbdev: fix crash for non supported HARQ length ++* test/bbdev: remove check for invalid opaque data ++* test/crypto: add missing MAC-I to PDCP vectors ++* test/crypto: fix capability check for ZUC cipher-auth ++* test/crypto: fix statistics error messages ++* test/crypto: fix typo in AES test ++* test/crypto: fix ZUC digest length in comparison ++* test: fix segment length in packet generator ++* test/mbuf: fix mbuf reset test ++* test/mbuf: fix test with mbuf debug enabled ++* test/reorder: fix double free of drained buffers ++* vdpa/ifc: fix argument compatibility check ++* vdpa/ifc: fix reconnection in SW-assisted live migration ++* version: 21.11.4-rc1 ++* vhost: decrease log level for unimplemented requests ++* vhost: fix net header settings in datapath ++* vhost: fix OOB access for invalid vhost ID ++* vhost: fix possible FD leaks ++* vhost: fix possible FD leaks on truncation ++ ++21.11.4 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* `Intel(R) Testing <https://mails.dpdk.org/archives/stable/2023-April/043590.html>`__ ++ ++ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing ++ ++ * PF (i40e) ++ * PF (ixgbe) ++ * PF (ice) ++ * VF (i40e) ++ * VF (ixgbe) ++ * VF (ice) ++ * Compile Testing ++ * Intel NIC single core/NIC performance ++ * Power and IPsec ++ ++ * Basic cryptodev and virtio testing ++ ++ * vhost/virtio basic loopback, PVP and performance test ++ * cryptodev Function/Performance ++ ++ ++* `Nvidia(R) Testing <https://mails.dpdk.org/archives/stable/2023-April/043578.html>`__ ++ ++ * Basic functionality with testpmd ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Regex application ++ * Buffer Split ++ * Tx scheduling ++ ++ * Build tests ++ * ConnectX-6 Dx ++ * ConnectX-5 ++ * ConnectX-4 Lx ++ * BlueField-2 ++ ++ ++* `Red Hat(R) Testing <https://mails.dpdk.org/archives/stable/2023-April/043572.html>`__ ++ ++ * Platform ++ ++ * RHEL 9 ++ * Kernel 5.14 ++ * Qemu 6.2 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++21.11.4 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* DPDK 21.11.4 contains fixes up to DPDK 23.03 ++* Issues identified/fixed in DPDK main branch after DPDK 23.03 may be present in DPDK 21.11.4 ++* Intel validation team reported a performance issue for a specific test on a specific platform, Intel(R) Xeon(R) Platinum 8280M CPU @ 2.70GHz CPU. Other tests and other platforms do not have this performance issue. See `mailing list <https://mails.dpdk.org/archives/stable/2023-May/043729.html>`__ for more details. ++ ++21.11.5 Release Notes ++--------------------- ++ ++ ++21.11.5 Fixes ++~~~~~~~~~~~~~ ++ ++* app/crypto-perf: fix socket ID default value ++* app/testpmd: fix checksum engine with GTP on 32-bit ++* app/testpmd: fix GTP L2 length in checksum engine ++* baseband/fpga_5gnr_fec: fix possible division by zero ++* baseband/fpga_5gnr_fec: fix starting unconfigured queue ++* build: fix case of project language name ++* ci: fix libabigail cache in GHA ++* common/cnxk: fix inline device VF identification ++* common/cnxk: fix IPsec IPv6 tunnel address byte swap ++* common/iavf: fix MAC type for 710 NIC ++* common/mlx5: adjust fork call with new kernel API ++* common/qat: detach crypto from compress build ++* common/sfc_efx/base: fix Rx queue without RSS hash prefix ++* crypto/ipsec_mb: fix enqueue counter for SNOW3G ++* crypto/ipsec_mb: optimize allocation in session ++* crypto/openssl: skip workaround at compilation time ++* crypto/scheduler: fix last element for valid args ++* doc: fix auth algos in cryptoperf app ++* doc: fix event timer adapter guide ++* doc: fix format in flow API guide ++* doc: fix kernel patch link in hns3 guide ++* doc: fix number of leading spaces in hns3 guide ++* doc: fix syntax in hns3 guide ++* doc: fix typo in cnxk platform guide ++* doc: fix typo in graph guide ++* doc: fix typos and wording in flow API guide ++* doc: remove warning with Doxygen 1.9.7 ++* doc: update BIOS settings and supported HW for NTB ++* eal: avoid calling cleanup twice ++* eal/linux: fix legacy mem init with many segments ++* eal/linux: fix secondary process crash for mp hotplug requests ++* ethdev: check that at least one FEC mode is specified ++* ethdev: fix indirect action conversion ++* ethdev: fix MAC address occupies two entries ++* ethdev: fix potential leak in PCI probing helper ++* ethdev: update documentation for API to get FEC ++* ethdev: update documentation for API to set FEC ++* event/cnxk: fix nanoseconds to ticks conversion ++* eventdev/timer: fix buffer flush ++* eventdev/timer: fix timeout event wait behavior ++* event/dsw: free rings on close ++* examples/fips_validation: fix digest length in AES-GCM ++* examples/ip_pipeline: fix build with GCC 13 ++* examples/ipsec-secgw: fix TAP default MAC address ++* examples/l2fwd-cat: fix external build ++* examples/ntb: fix build with GCC 13 ++* fib: fix adding default route ++* hash: fix reading unaligned bits in Toeplitz hash ++* ipc: fix file descriptor leakage with unhandled messages ++* ipsec: fix NAT-T header length ++* kernel/freebsd: fix function parameter list ++* kni: fix build with Linux 6.3 ++* kni: fix build with Linux 6.5 ++* mbuf: fix Doxygen comment of distributor metadata ++* mem: fix memsegs exhausted message ++* net/bonding: fix destroy dedicated queues flow ++* net/bonding: fix startup when NUMA is not supported ++* net/cnxk: fix cookies check with security offload ++* net/cnxk: fix flow queue index validation ++* net/cnxk: flush SQ before configuring MTU ++* net/dpaa2: fix checksum good flags ++* net/e1000: fix queue number initialization ++* net/e1000: fix Rx and Tx queue status ++* net/hns3: delete duplicate macro definition ++* net/hns3: extract PTP to its own header file ++* net/hns3: fix build warning ++* net/hns3: fix device start return value ++* net/hns3: fix FEC mode check ++* net/hns3: fix FEC mode for 200G ports ++* net/hns3: fix IMP reset trigger ++* net/hns3: fix inaccurate log ++* net/hns3: fix index to look up table in NEON Rx ++* net/hns3: fix mbuf leakage when RxQ started after reset ++* net/hns3: fix mbuf leakage when RxQ started during reset ++* net/hns3: fix missing FEC capability ++* net/hns3: fix never set MAC flow control ++* net/hns3: fix non-zero weight for disabled TC ++* net/hns3: fix redundant line break in log ++* net/hns3: fix RTC time after reset ++* net/hns3: fix RTC time on initialization ++* net/hns3: fix Rx multiple firmware reset interrupts ++* net/hns3: fix uninitialized variable ++* net/hns3: fix variable type mismatch ++* net/hns3: get FEC capability from firmware ++* net/hns3: uninitialize PTP ++* net/i40e: fix comments ++* net/i40e: fix Rx data buffer size ++* net/i40e: fix tunnel packet Tx descriptor ++* net/iavf: fix abnormal disable HW interrupt ++* net/iavf: fix Rx data buffer size ++* net/iavf: fix stop ordering ++* net/iavf: fix tunnel TSO path selection ++* net/iavf: fix VLAN insertion in vector path ++* net/iavf: fix VLAN offload with AVX512 ++* net/iavf: release large VF when closing device ++* net/ice: adjust timestamp mbuf register ++* net/ice/base: remove unreachable code ++* net/ice: fix 32-bit build ++* net/ice: fix DCF control thread crash ++* net/ice: fix DCF RSS initialization ++* net/ice: fix outer UDP checksum offload ++* net/ice: fix protocol agnostic offloading with big packets ++* net/ice: fix RSS hash key generation ++* net/ice: fix Rx data buffer size ++* net/ice: fix statistics ++* net/ice: fix timestamp enabling ++* net/ice: fix tunnel packet Tx descriptor ++* net/ice: fix VLAN mode parser ++* net/ice: initialize parser for double VLAN ++* net/igc: fix Rx and Tx queue status ++* net/ixgbe: add proper memory barriers in Rx ++* net/ixgbe: fix Rx and Tx queue status ++* net/mlx5: enhance error log for tunnel offloading ++* net/mlx5: fix device removal event handling ++* net/mlx5: fix drop action attribute validation ++* net/mlx5: fix drop action memory leak ++* net/mlx5: fix duplicated tag index matching in SWS ++* net/mlx5: fix flow dump for modify field ++* net/mlx5: fix flow workspace destruction ++* net/mlx5: fix LRO TCP checksum ++* net/mlx5: fix risk in NEON Rx descriptor read ++* net/mlx5: fix validation for conntrack indirect action ++* net/mlx5: forbid MPRQ restart ++* net/netvsc: fix sizeof calculation ++* net/nfp: fix address always related with PF ID 0 ++* net/nfp: fix offloading flows ++* net/ngbe: fix extended statistics ++* net/ngbe: fix RSS offload capability ++* net/qede: fix RSS indirection table initialization ++* net/sfc: invalidate dangling MAE flow action FW resource IDs ++* net/sfc: stop misuse of Rx ingress m-port metadata on EF100 ++* net/tap: set locally administered bit for fixed MAC address ++* net/txgbe/base: fix Tx with fiber hotplug ++* net/txgbe: fix extended statistics ++* net/txgbe: fix interrupt enable mask ++* net/txgbe: fix to set autoneg for 1G speed ++* net/txgbe: fix use-after-free on remove ++* net/virtio: fix initialization to return negative errno ++* net/virtio: propagate interrupt configuration error values ++* net/virtio-user: fix leak when initialisation fails ++* net/vmxnet3: fix drop of empty segments in Tx ++* net/vmxnet3: fix return code in initializing ++* pci: fix comment referencing renamed function ++* pipeline: fix double free for table stats ++* ring: fix dequeue parameter name ++* ring: fix use after free ++* telemetry: fix autotest on Alpine ++* test: add graph tests ++* test/bonding: fix include of standard header ++* test/crypto: fix PDCP-SDAP test vectors ++* test/crypto: fix return value for SNOW3G ++* test/crypto: fix session creation check ++* test/malloc: fix missing free ++* test/malloc: fix statistics checks ++* test/mbuf: fix crash in a forked process ++* version: 21.11.5-rc1 ++* vfio: fix include with musl runtime ++* vhost: fix invalid call FD handling ++ ++21.11.5 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* `Red Hat(R) Testing <https://mails.dpdk.org/archives/stable/2023-August/045101.html>`__ ++ ++ * Platform ++ ++ * RHEL 9 ++ * Kernel 5.14 ++ * Qemu 6.2 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* `Nvidia(R) Testing <https://mails.dpdk.org/archives/stable/2023-August/045124.html>`__ ++ ++ * Basic functionality with testpmd ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Regex application ++ * Buffer Split ++ * Tx scheduling ++ ++ * Build tests ++ * ConnectX-6 Dx ++ * ConnectX-5 ++ * ConnectX-4 Lx ++ * BlueField-2 ++ ++ ++* `Intel(R) Testing <https://mails.dpdk.org/archives/stable/2023-August/045177.html>`__ ++ ++ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing ++ ++ * PF (i40e) ++ * PF (ixgbe) ++ * PF (ice) ++ * VF (i40e) ++ * VF (ixgbe) ++ * VF (ice) ++ * Compile Testing ++ * Intel NIC single core/NIC performance ++ * Power and IPsec ++ ++ * Basic cryptodev and virtio testing ++ ++ * vhost/virtio basic loopback, PVP and performance test ++ * cryptodev Function/Performance ++ ++21.11.5 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* DPDK 21.11.5 contains fixes up to DPDK 23.07 ++* Issues identified/fixed in DPDK main branch after DPDK 23.07 may be present in DPDK 21.11.5 ++ ++21.11.5 Fixes skipped and status unresolved ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++* c5b531d6ee app/crypto-perf: fix session freeing ++* 04dac73643 eventdev/crypto: fix enqueue count ++* 4b04134cbb eventdev/crypto: fix failed events ++* da73a2a0d1 eventdev/crypto: fix offset used while flushing events ++* f442c04001 eventdev/crypto: fix overflow in circular buffer ++* 5a0f64d84b net/cnxk: fix configuring large Rx/Tx queues ++* 59ceaa72d5 common/cnxk: fix part number for CN10K ++* 31a28a99fd net/ngbe: add spinlock protection on YT PHY ++* 5781638519 common/cnxk: fix RQ mask config for CN10KB chip ++* 3fe71706ab event/cnxk: fix stale data in workslot ++* 927cb43fe9 examples/l3fwd: fix port group mask with AltiVec ++* 0f044b6681 net/iavf: fix refine protocol header ++* 0b241667cc net/iavf: fix tainted scalar ++* b125c0e721 net/iavf: fix tainted scalar ++* cedb44dc87 common/mlx5: improve AES-XTS tweak capability check ++* 0fd1386c30 app/testpmd: cleanup cleanly from signal ++* f1d0993e03 app/testpmd: fix interactive mode on Windows ++* 7be74edb90 common/mlx5: use just sufficient barrier for Arm ++* 7bdf7a13ae app/testpmd: fix encap/decap size calculation ++* d2d7f0190b doc: fix code blocks in cryptodev guide ++* 7e7b6762ea eal: enhance NUMA affinity heuristic ++* e97738919c net/nfp: fix Tx descriptor free logic of NFD3 ++* ebc352c77f net/mlx5: fix matcher layout size calculation ++* ad4d51d277 net/mlx5: forbid duplicated tag index in pattern template ++* 6df1bc6b3b mempool/cnxk: avoid hang when counting batch allocs ++* 772e30281a common/cnxk: fix CPT backpressure disable on LBK ++* b37fe88a2c event/cnxk: fix LMTST write for single event mode ++* 92a16af450 net/iavf: fix virtchnl command called in interrupt ++* 12011b11a3 net/txgbe: adapt to MNG veto bit setting ++* 21f702d556 net/ngbe: fix link status in no LSC mode ++* 659cfce01e net/ngbe: remove redundant codes ++* 6fd3a7a618 net/ice/base: fix internal etype in switch filter ++* 9749dffe23 net/ice: fix MAC type of E822 and E823 ++* 1c7a4d37e7 common/cnxk: fix mailbox timeout due to deadlock ++* 5e170dd8b6 net/txgbe: fix blocking system events ++* 37ca457d3f common/mlx5: fix obtaining IB device in LAG mode ++* 8c047e823a net/bnxt: fix multi-root card support ++* 8b4618a7b4 crypto/qat: fix null algorithm digest placement ++* 9a518054b5 Ahmad examples/l3fwd: fix duplicate expression for default nexthop ++* e6479f009f net/mlx5: fix MPRQ stride size for headroom ++ ++21.11.6 Release Notes ++--------------------- ++ ++ ++21.11.6 Fixes ++~~~~~~~~~~~~~ ++ ++* app/bbdev: fix link with NXP LA12XX ++* app/dumpcap: allow multiple invocations ++* app/dumpcap: fix mbuf pool ring type ++* app/pipeline: add sigint handler ++* app/procinfo: adjust format of RSS info ++* app/procinfo: fix RSS info ++* app/procinfo: remove unnecessary rte_malloc ++* app/testpmd: add explicit check for tunnel TSO ++* app/testpmd: fix help string ++* app/testpmd: fix primary process not polling all queues ++* app/testpmd: fix tunnel TSO capability check ++* app/testpmd: fix tunnel TSO configuration ++* app/testpmd: remove useless check in TSO command ++* baseband/acc: fix ACC100 HARQ input alignment ++* bus/dpaa: fix build with asserts for GCC 13 ++* bus/pci: fix device ID log ++* common/cnxk: fix aura disable handling ++* common/cnxk: fix default flow action setting ++* common/cnxk: fix different size bit operations ++* common/cnxk: fix DPI memzone name ++* common/cnxk: fix pool buffer size in opaque mode ++* common/cnxk: fix xstats for different packet sizes ++* common/cnxk: remove dead Meson code ++* common/mlx5: fix controller index parsing ++* config/arm: fix aarch32 build with GCC 13 ++* cryptodev: add missing doc for security context ++* crypto/ipsec_mb: add dependency check for cross build ++* crypto/nitrox: fix panic with high number of segments ++* crypto/qat: fix NULL algorithm digest placement ++* crypto/qat: fix raw API null algorithm digest ++* dma/cnxk: fix device state ++* doc: fix hns3 build option about max queue number ++* doc: fix RSS flow description in hns3 guide ++* doc: fix some ordered lists ++* doc: remove number of commands in vDPA guide ++* doc: remove restriction on ixgbe vector support ++* doc: replace code blocks with includes in security guide ++* doc: update features in hns3 guide ++* doc: update versions recommendations for i40e and ice ++* eal/unix: fix firmware reading with external xz helper ++* eal/windows: fix build with recent MinGW ++* ethdev: account for smaller MTU when setting default ++* ethdev: fix 32-bit build with GCC 13 ++* ethdev: fix ESP packet type description ++* ethdev: fix function name in comment ++* event/cnxk: fix getwork mode devargs parsing ++* event/cnxk: fix return values for capability API ++* eventdev/eth_rx: fix timestamp field register in mbuf ++* eventdev: fix device pointer for vdev-based devices ++* eventdev: fix missing driver names in info struct ++* eventdev: fix symbol export for port maintenance ++* event/dlb2: fix disable PASID ++* event/dlb2: fix missing queue ordering capability flag ++* event/dlb2: fix name check in self-test ++* event/sw: fix ordering corruption with op release ++* event/sw: remove obsolete comment ++* examples/ethtool: fix pause configuration ++* examples/ipsec-secgw: fix partial overflow ++* fib6: fix adding default route as first route ++* fib: fix adding default route overwriting entire table ++* hash: align SSE lookup to scalar implementation ++* malloc: remove return from void functions ++* mempool: clarify enqueue/dequeue ops documentation ++* mempool/cnxk: fix free from non-EAL threads ++* mempool: fix default ops for an empty mempool ++* mempool: fix get function documentation ++* meter: fix RFC4115 trTCM API Doxygen ++* net/af_packet: fix Rx and Tx queue state ++* net/af_xdp: fix Rx and Tx queue state ++* net/af_xdp: make compatible with libbpf 0.8.0 ++* net/avp: fix Rx and Tx queue state ++* net/bnx2x: fix Rx and Tx queue state ++* net/bnxt: fix Rx and Tx queue state ++* net/bonding: fix header for C++ ++* net/bonding: fix link status callback stop ++* net/bonding: fix possible overrun ++* net/bonding: fix Rx and Tx queue state ++* net/cnxk: fix uninitialized variable ++* net/cnxk: fix uninitialized variable ++* net/cxgbe: fix Rx and Tx queue state ++* net/dpaa2: fix Rx and Tx queue state ++* net/dpaa: fix Rx and Tx queue state ++* net/e1000: fix Rx and Tx queue state ++* net/ena: fix Rx and Tx queue state ++* net/enetc: fix Rx and Tx queue state ++* net/enic: avoid extra unlock in MTU set ++* net/enic: fix Rx and Tx queue state ++* net/hinic: fix Rx and Tx queue state ++* net/hns3: extract common function to obtain revision ID ++* net/hns3: fix crash for NEON and SVE ++* net/hns3: fix double stats for IMP and global reset ++* net/hns3: fix error code for multicast resource ++* net/hns3: fix flushing multicast MAC address ++* net/hns3: fix ignored reset event ++* net/hns3: fix IMP or global reset ++* net/hns3: fix LRO offload to report ++* net/hns3: fix mailbox sync ++* net/hns3: fix multiple reset detected log ++* net/hns3: fix order in NEON Rx ++* net/hns3: fix reset event status ++* net/hns3: fix setting DCB capability ++* net/hns3: fix some error logs ++* net/hns3: fix some return values ++* net/hns3: fix traffic management thread safety ++* net/hns3: fix typo in function name ++* net/hns3: fix unchecked Rx free threshold ++* net/hns3: fix uninitialized hash algo value ++* net/hns3: fix VF default MAC modified when set failed ++* net/hns3: fix VF reset handler interruption ++* net/hns3: keep set/get algo key functions local ++* net/hns3: refactor interrupt state query ++* net/hns3: remove reset log in secondary ++* net/i40e: fix buffer leak on Rx reconfiguration ++* net/i40e: fix FDIR queue receives broadcast packets ++* net/iavf: fix checksum offloading ++* net/iavf: fix ESN session update ++* net/iavf: fix indent in Tx path ++* net/iavf: fix port stats clearing ++* net/iavf: fix TSO with big segments ++* net/iavf: fix Tx debug ++* net/iavf: fix Tx offload flags check ++* net/iavf: fix Tx offload mask ++* net/iavf: fix Tx preparation ++* net/iavf: fix VLAN offload strip flag ++* net/iavf: remove log from Tx prepare function ++* net/iavf: unregister interrupt handler before FD close ++* net/ice: fix crash on closing representor ports ++* net/ice: fix DCF port statistics ++* net/ice: fix initial link status ++* net/ice: fix L1 check interval ++* net/ice: fix TM configuration clearing ++* net/ice: fix TSO with big segments ++* net/ice: fix Tx preparation ++* net/ice: remove log from Tx prepare function ++* net/ice: write timestamp to first segment in scattered Rx ++* net/ipn3ke: fix Rx and Tx queue state ++* net/memif: fix Rx and Tx queue state ++* net/mlx4: fix Rx and Tx queue state ++* net/mlx5: fix decap action checking in sample flow ++* net/mlx5: fix E-Switch mirror flow rule validation ++* net/mlx5: fix hairpin queue states ++* net/mlx5: fix hairpin queue unbind ++* net/mlx5: fix leak in sysfs port name translation ++* net/mlx5: fix matcher layout size calculation ++* net/mlx5: fix MPRQ stride size to accommodate the headroom ++* net/mlx5: fix multi-segment Tx inline data length ++* net/mlx5: fix shared Rx queue list management ++* net/mlx5: fix use after free on Rx queue start ++* net/mlx5: fix validation of sample encap flow action ++* net/mlx5: zero UDP checksum over IPv4 in encapsulation ++* net/mvneta: fix Rx and Tx queue state ++* net/mvpp2: fix Rx and Tx queue state ++* net/netvsc: increase VSP response timeout to 60 seconds ++* net/nfp: fix DMA error after abnormal exit ++* net/nfp: fix link status interrupt ++* net/nfp: fix reconfigure logic in PF initialization ++* net/nfp: fix reconfigure logic in VF initialization ++* net/nfp: fix reconfigure logic of set MAC address ++* net/nfp: fix Tx descriptor free logic of NFD3 ++* net/ngbe: check process type in close operation ++* net/ngbe: fix flow control ++* net/ngbe: fix Rx and Tx queue state ++* net/ngbe: keep link down after device close ++* net/ngbe: reconfigure MAC Rx when link update ++* net/null: fix Rx and Tx queue state ++* net/octeon_ep: fix Rx and Tx queue state ++* net/pfe: fix Rx and Tx queue state ++* net/ring: fix Rx and Tx queue state ++* net/sfc: account for data offset on Tx ++* net/sfc: add missing error code indication to MAE init path ++* net/sfc: fix Rx and Tx queue state ++* net/sfc: remove null dereference in log ++* net/sfc: set max Rx packet length for representors ++* net/softnic: fix Rx and Tx queue state ++* net/tap: fix IPv4 checksum offloading ++* net/tap: fix L4 checksum offloading ++* net/tap: fix RSS for fragmented packets ++* net/tap: use MAC address parse API instead of local parser ++* net/txgbe: add Tx queue maximum limit ++* net/txgbe: check process type in close operation ++* net/txgbe: fix GRE tunnel packet checksum ++* net/txgbe: fix out of bound access ++* net/txgbe: fix Rx and Tx queue state ++* net/txgbe: keep link down after device close ++* net/txgbe: reconfigure MAC Rx when link update ++* net/vhost: fix Rx and Tx queue state ++* net/virtio: fix descriptor addresses in 32-bit build ++* net/virtio: fix link state interrupt vector setting ++* net/virtio: fix missing next flag in Tx packed ring ++* net/virtio: fix Rx and Tx queue state ++* net/vmxnet3: fix Rx and Tx queue state ++* pdump: fix error number on IPC response ++* random: initialize state for unregistered non-EAL threads ++* rawdev: fix device class in log message ++* Revert "net/iavf: fix abnormal disable HW interrupt" ++* Revert "net/iavf: fix tunnel TSO path selection" ++* test/bbdev: assert failed test for queue configure ++* test/bbdev: fix Python script subprocess ++* test/bonding: add missing check ++* test/bonding: fix uninitialized RSS configuration ++* test/bonding: remove unreachable statement ++* test/crypto: fix IV in some vectors ++* test/crypto: fix typo in asym tests ++* test/crypto: skip some synchronous tests with CPU crypto ++* test/event: fix crypto null device creation ++* test/hash: fix creation error log ++* version: 21.11.6-rc1 ++* vhost: fix check on virtqueue access in async registration ++* vhost: fix check on virtqueue access in in-flight getter ++* vhost: fix missing check on virtqueue access ++* vhost: fix missing lock protection in power monitor API ++* vhost: fix missing vring call check on virtqueue access ++ ++21.11.6 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* `Red Hat(R) Testing <https://mails.dpdk.org/archives/stable/2023-December/046925.html>`__ ++ ++ * Platform ++ ++ * RHEL 9 ++ * Kernel 5.14 ++ * Qemu 7.2 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* `Nvidia(R) Testing <https://mails.dpdk.org/archives/stable/2024-January/046939.html>`__ ++ ++ * Basic functionality with testpmd ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Regex application ++ * Buffer Split ++ * Tx scheduling ++ ++ * Build tests ++ * ConnectX-6 Dx ++ * ConnectX-7 ++ * BlueField-2 ++ ++ ++* `Intel(R) Testing <https://mails.dpdk.org/archives/stable/2024-January/047001.html>`__ ++ ++ * Basic Intel(R) NIC(ixgbe, i40e and ice) testing ++ ++ * PF (i40e, ixgbe) ++ * VF (i40e, ixgbe) ++ * PF/VF (ice) ++ * Compile Testing ++ * Intel NIC single core/NIC performance ++ * IPsec ++ ++ * Basic cryptodev and virtio testing ++ ++ * vhost/virtio basic loopback, PVP and performance test ++ * cryptodev Function/Performance ++ ++21.11.6 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* DPDK 21.11.6 contains fixes up to DPDK 23.11 ++* Issues identified/fixed in DPDK main branch after DPDK 23.11 may be present in DPDK 21.11.6 ++ ++21.11.6 Fixes skipped and status unresolved ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++ ++* c5b531d6ee app/crypto-perf: fix session freeing ++* 61b52e7edb app/test: fix reference to master in bonding test ++* 0fd1386c30 app/testpmd: cleanup cleanly from signal ++* a996cd04ae app/testpmd: fix early exit from signal ++* 7bdf7a13ae app/testpmd: fix encap/decap size calculation ++* f1d0993e03 app/testpmd: fix interactive mode on Windows ++* 5d8c1f6253 common/cnxk: check error in MAC address set ++* a6f639e079 common/cnxk: fix BP threshold calculation ++* 772e30281a common/cnxk: fix CPT backpressure disable on LBK ++* 2e9863fbb6 common/cnxk: fix different size bit operations ++* 4f6f36ce23 common/cnxk: fix leak in error path ++* 1c7a4d37e7 common/cnxk: fix mailbox timeout due to deadlock ++* 59ceaa72d5 common/cnxk: fix part number for CN10K ++* 5781638519 common/cnxk: fix RQ mask config for CN10KB chip ++* 56fa6f92e9 common/cnxk: fix RSS key configuration ++* 37ca457d3f common/mlx5: fix obtaining IB device in LAG mode ++* cedb44dc87 common/mlx5: improve AES-XTS tweak capability check ++* 7be74edb90 common/mlx5: use just sufficient barrier for Arm ++* 9d91c3047d crypto/openssl: fix memory leaks in asym operations ++* 3dd3115078 dma/cnxk: fix chunk buffer failure return code ++* ba39a261a7 dma/cnxk: fix completion ring tail wrap ++* 95a955e3e0 dma/cnxk: fix device reconfigure ++* 694e8e643d event/cnxk: fix CASP usage for clang ++* b37fe88a2c event/cnxk: fix LMTST write for single event mode ++* 3fe71706ab event/cnxk: fix stale data in workslot ++* 04dac73643 eventdev/crypto: fix enqueue count ++* 4b04134cbb eventdev/crypto: fix failed events ++* da73a2a0d1 eventdev/crypto: fix offset used while flushing events ++* f442c04001 eventdev/crypto: fix overflow in circular buffer ++* 9a518054b5 examples/l3fwd: fix duplicate expression for default nexthop ++* 927cb43fe9 examples/l3fwd: fix port group mask with AltiVec ++* 6df1bc6b3b mempool/cnxk: avoid hang when counting batch allocs ++* b3ddd649ad mempool/cnxk: fix alloc from non-EAL threads ++* 0236016c02 net/axgbe: fix Rx and Tx queue state ++* 5a0f64d84b net/cnxk: fix configuring large Rx/Tx queues ++* 7752f14026 net/cnxk: fix Rx flush on CN10k ++* 0f044b6681 net/iavf: fix refine protocol header ++* 0b241667cc net/iavf: fix tainted scalar ++* b125c0e721 net/iavf: fix tainted scalar ++* 92a16af450 net/iavf: fix virtchnl command called in interrupt ++* 6fd3a7a618 net/ice/base: fix internal etype in switch filter ++* 9749dffe23 net/ice: fix MAC type of E822 and E823 ++* 2ece3b7186 net/mlx5: fix flow workspace double free in Windows ++* c46216e77a net/nfp: fix Rx and Tx queue state ++* a74c5001e9 net/ngbe: add proper memory barriers in Rx ++* 31a28a99fd net/ngbe: add spinlock protection on YT PHY ++* 21f702d556 net/ngbe: fix link status in no LSC mode ++* 659cfce01e net/ngbe: remove redundant codes ++* 12011b11a3 net/txgbe: adapt to MNG veto bit setting ++* 5e170dd8b6 net/txgbe: fix blocking system events ++* 166591931b pcapng: modify timestamp calculation ++* 63bf81a617 test: fix named test macro diff --git a/dpdk/doc/guides/rel_notes/release_2_1.rst b/dpdk/doc/guides/rel_notes/release_2_1.rst index 35e6c88884..d0ad99ebce 100644 --- a/dpdk/doc/guides/rel_notes/release_2_1.rst @@ -9878,6 +21084,19 @@ index 35e6c88884..d0ad99ebce 100644 * **ixgbe/base: Fix SFP probing.** +diff --git a/dpdk/doc/guides/sample_app_ug/dma.rst b/dpdk/doc/guides/sample_app_ug/dma.rst +index 5cd7db650a..2765895564 100644 +--- a/dpdk/doc/guides/sample_app_ug/dma.rst ++++ b/dpdk/doc/guides/sample_app_ug/dma.rst +@@ -45,7 +45,7 @@ The application requires a number of command line options: + + .. code-block:: console + +- ./<build_dir>/examples/dpdk-ioat [EAL options] -- [-p MASK] [-q NQ] [-s RS] [-c <sw|hw>] ++ ./<build_dir>/examples/dpdk-dma [EAL options] -- [-p MASK] [-q NQ] [-s RS] [-c <sw|hw>] + [--[no-]mac-updating] [-b BS] [-f FS] [-i SI] + + where, diff --git a/dpdk/doc/guides/sample_app_ug/fips_validation.rst b/dpdk/doc/guides/sample_app_ug/fips_validation.rst index 56df434215..39baea3346 100644 --- a/dpdk/doc/guides/sample_app_ug/fips_validation.rst @@ -9934,10 +21153,28 @@ index c53ee7c386..468a977478 100644 ``<auth_key>`` diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst -index 440642ef7c..3ada3575ba 100644 +index 440642ef7c..51621b692f 100644 --- a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst +++ b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst -@@ -176,7 +176,7 @@ function. The value returned is the number of parsed arguments: +@@ -50,13 +50,12 @@ Compiling the Application + * https://github.com/01org/intel-cmt-cat + + +-#. To compile the application export the path to PQoS lib +- and the DPDK source tree and go to the example directory: ++To compile the application, export the path to PQoS lib: + +- .. code-block:: console +- +- export PQOS_INSTALL_PATH=/path/to/libpqos ++.. code-block:: console + ++ export CFLAGS=-I/path/to/intel-cmt-cat/include ++ export LDFLAGS=-L/path/to/intel-cmt-cat/lib + + To compile the sample application see :doc:`compiling`. + +@@ -176,7 +175,7 @@ function. The value returned is the number of parsed arguments: .. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c :language: c :start-after: Initialize the Environment Abstraction Layer (EAL). 8< @@ -10153,6 +21390,34 @@ index 1b4444b7d8..ce49eab96f 100644 and filled: .. literalinclude:: ../../../examples/l2fwd-crypto/main.c +diff --git a/dpdk/doc/guides/sample_app_ug/l3_forward.rst b/dpdk/doc/guides/sample_app_ug/l3_forward.rst +index 6d7d7c5cc1..9fb44e2c23 100644 +--- a/dpdk/doc/guides/sample_app_ug/l3_forward.rst ++++ b/dpdk/doc/guides/sample_app_ug/l3_forward.rst +@@ -44,9 +44,8 @@ returned by the LPM or FIB lookup. + The set of LPM and FIB rules used by the application is statically configured + and loaded into the LPM or FIB object at initialization time. + +-In the sample application, hash-based and FIB-based forwarding supports ++In the sample application, hash-based, LPM-based, FIB-based and ACL-based forwarding supports + both IPv4 and IPv6. +-LPM-based forwarding supports IPv4 only. + + Compiling the Application + ------------------------- +diff --git a/dpdk/doc/guides/sample_app_ug/pipeline.rst b/dpdk/doc/guides/sample_app_ug/pipeline.rst +index 49d50136bc..7c86bf484a 100644 +--- a/dpdk/doc/guides/sample_app_ug/pipeline.rst ++++ b/dpdk/doc/guides/sample_app_ug/pipeline.rst +@@ -58,7 +58,7 @@ The following is an example command to run the application configured for the VX + + .. code-block:: console + +- $ ./<build_dir>/examples/dpdk-pipeline -c 0x3 -- -s examples/vxlan.cli ++ $ ./<build_dir>/examples/dpdk-pipeline -c 0x3 -- -s examples/pipeline/examples/vxlan.cli + + The application should start successfully and display as follows: + diff --git a/dpdk/doc/guides/sample_app_ug/server_node_efd.rst b/dpdk/doc/guides/sample_app_ug/server_node_efd.rst index 605eb09a61..c6cbc3def6 100644 --- a/dpdk/doc/guides/sample_app_ug/server_node_efd.rst @@ -10179,10 +21444,42 @@ index 6d0de64401..08ddd7aa59 100644 :dedent: 1 +diff --git a/dpdk/doc/guides/sample_app_ug/vdpa.rst b/dpdk/doc/guides/sample_app_ug/vdpa.rst +index cb9c4f2169..51e69fc20d 100644 +--- a/dpdk/doc/guides/sample_app_ug/vdpa.rst ++++ b/dpdk/doc/guides/sample_app_ug/vdpa.rst +@@ -38,8 +38,7 @@ where + * --iface specifies the path prefix of the UNIX domain socket file, e.g. + /tmp/vhost-user-, then the socket files will be named as /tmp/vhost-user-<n> + (n starts from 0). +-* --interactive means run the vdpa sample in interactive mode, currently 4 +- internal cmds are supported: ++* --interactive means run the vDPA sample in interactive mode: + + 1. help: show help message + 2. list: list all available vdpa devices diff --git a/dpdk/doc/guides/sample_app_ug/vm_power_management.rst b/dpdk/doc/guides/sample_app_ug/vm_power_management.rst -index 7160b6a63a..9ce87956c9 100644 +index 7160b6a63a..e0af729e66 100644 --- a/dpdk/doc/guides/sample_app_ug/vm_power_management.rst +++ b/dpdk/doc/guides/sample_app_ug/vm_power_management.rst +@@ -255,7 +255,7 @@ To build just the ``vm_power_manager`` application using ``meson``/``ninja``: + .. code-block:: console + + cd dpdk +- meson build ++ meson setup build + cd build + ninja + meson configure -Dexamples=vm_power_manager +@@ -494,7 +494,7 @@ To build just the ``vm_power_manager`` application using ``meson``/``ninja``: + .. code-block:: console + + cd dpdk +- meson build ++ meson setup build + cd build + ninja + meson configure -Dexamples=vm_power_manager/guest_cli @@ -681,7 +681,7 @@ The following is an example JSON string for a power management request. "resource_id": 10 }} @@ -10192,10 +21489,40 @@ index 7160b6a63a..9ce87956c9 100644 Where {core_num} is the lcore to query. Before using this command, please enable responses via the set_query command on the host. +diff --git a/dpdk/doc/guides/testpmd_app_ug/run_app.rst b/dpdk/doc/guides/testpmd_app_ug/run_app.rst +index 30edef07ea..ccc1bd6ddb 100644 +--- a/dpdk/doc/guides/testpmd_app_ug/run_app.rst ++++ b/dpdk/doc/guides/testpmd_app_ug/run_app.rst +@@ -621,6 +621,7 @@ as follows: + - ``dev_configure`` + - ``dev_start`` + - ``dev_stop`` ++- ``dev_reset`` + - ``rx_queue_setup`` + - ``tx_queue_setup`` + - ``rx_queue_release`` diff --git a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst -index 44228cd7d2..94792d88cc 100644 +index 44228cd7d2..3a522a80a6 100644 --- a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst +++ b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst +@@ -1767,7 +1767,7 @@ Enable or disable a per port Rx offloading on all Rx queues of a port:: + * ``offloading``: can be any of these offloading capability: + vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro, + qinq_strip, outer_ipv4_cksum, macsec_strip, +- header_split, vlan_filter, vlan_extend, jumbo_frame, ++ header_split, vlan_filter, vlan_extend, + scatter, timestamp, security, keep_crc, rss_hash + + This command should be run when the port is stopped, or else it will fail. +@@ -1782,7 +1782,7 @@ Enable or disable a per queue Rx offloading only on a specific Rx queue:: + * ``offloading``: can be any of these offloading capability: + vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro, + qinq_strip, outer_ipv4_cksum, macsec_strip, +- header_split, vlan_filter, vlan_extend, jumbo_frame, ++ header_split, vlan_filter, vlan_extend, + scatter, timestamp, security, keep_crc + + This command should be run when the port is stopped, or else it will fail. @@ -3510,7 +3510,7 @@ Tunnel offload Indicate tunnel offload rule type @@ -10205,6 +21532,147 @@ index 44228cd7d2..94792d88cc 100644 Matching pattern ^^^^^^^^^^^^^^^^ +@@ -4342,7 +4342,7 @@ Disabling isolated mode:: + testpmd> + + Dumping HW internal information +-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ``flow dump`` dumps the hardware's internal representation information of + all flows. It is bound to ``rte_flow_dev_dump()``:: +@@ -4358,10 +4358,10 @@ Otherwise, it will complain error occurred:: + Caught error type [...] ([...]): [...] + + Listing and destroying aged flow rules +-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ``flow aged`` simply lists aged flow rules be get from api ``rte_flow_get_aged_flows``, +-and ``destroy`` parameter can be used to destroy those flow rules in PMD. ++and ``destroy`` parameter can be used to destroy those flow rules in PMD:: + + flow aged {port_id} [destroy] + +@@ -4396,7 +4396,7 @@ will be ID 3, ID 1, ID 0:: + 1 0 0 i-- + 0 0 0 i-- + +-If attach ``destroy`` parameter, the command will destroy all the list aged flow rules. ++If attach ``destroy`` parameter, the command will destroy all the list aged flow rules:: + + testpmd> flow aged 0 destroy + Port 0 total aged flows: 4 +diff --git a/dpdk/doc/guides/tools/cryptoperf.rst b/dpdk/doc/guides/tools/cryptoperf.rst +index ce93483291..1e63638932 100644 +--- a/dpdk/doc/guides/tools/cryptoperf.rst ++++ b/dpdk/doc/guides/tools/cryptoperf.rst +@@ -233,7 +233,6 @@ The following are the application command-line options: + Set authentication algorithm name, where ``name`` is one + of the following:: + +- 3des-cbc + aes-cbc-mac + aes-cmac + aes-gmac +diff --git a/dpdk/doc/guides/tools/dumpcap.rst b/dpdk/doc/guides/tools/dumpcap.rst +index 6fa284380c..d8a137b1cd 100644 +--- a/dpdk/doc/guides/tools/dumpcap.rst ++++ b/dpdk/doc/guides/tools/dumpcap.rst +@@ -43,7 +43,7 @@ To list interfaces available for capture, use ``--list-interfaces``. + + To filter packets in style of *tshark*, use the ``-f`` flag. + +-To capture on multiple interfaces at once, use multiple ``-I`` flags. ++To capture on multiple interfaces at once, use multiple ``-i`` flags. + + + Example +@@ -55,7 +55,7 @@ Example + 0. 000:00:03.0 + 1. 000:00:03.1 + +- # <build_dir>/app/dpdk-dumpcap -I 0000:00:03.0 -c 6 -w /tmp/sample.pcapng ++ # <build_dir>/app/dpdk-dumpcap -i 0000:00:03.0 -c 6 -w /tmp/sample.pcapng + Packets captured: 6 + Packets received/dropped on interface '0000:00:03.0' 6/0 + +diff --git a/dpdk/doc/guides/tools/proc_info.rst b/dpdk/doc/guides/tools/proc_info.rst +index 9772d97ef0..5dd6f9ecae 100644 +--- a/dpdk/doc/guides/tools/proc_info.rst ++++ b/dpdk/doc/guides/tools/proc_info.rst +@@ -1,10 +1,10 @@ + .. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2015 Intel Corporation. + +-dpdk-procinfo Application +-========================= ++dpdk-proc-info Application ++========================== + +-The dpdk-procinfo application is a Data Plane Development Kit (DPDK) application ++The dpdk-proc-info application is a Data Plane Development Kit (DPDK) application + that runs as a DPDK secondary process and is capable of retrieving port + statistics, resetting port statistics, printing DPDK memory information and + displaying debug information for port. +@@ -17,9 +17,10 @@ The application has a number of command line options: + + .. code-block:: console + +- ./<build_dir>/app/dpdk-procinfo -- -m | [-p PORTMASK] [--stats | --xstats | ++ ./<build_dir>/app/dpdk-proc-info -- -m | [-p PORTMASK] [--stats | --xstats | + --stats-reset | --xstats-reset] [ --show-port | --show-tm | --show-crypto | +- --show-ring[=name] | --show-mempool[=name] | --iter-mempool=name ] ++ --show-ring[=name] | --show-mempool[=name] | --iter-mempool=name | ++ --show-port-private ] + + Parameters + ~~~~~~~~~~ +@@ -69,17 +70,20 @@ mempool. For invalid or no mempool name, whole list is dump. + The iter-mempool parameter iterates and displays mempool elements specified + by name. For invalid or no mempool name no elements are displayed. + ++**--show-port-private** ++The show-port-private parameter displays ports private information. ++ + Limitations + ----------- + +-* dpdk-procinfo should run alongside primary process with same DPDK version. ++* dpdk-proc-info should run alongside primary process with same DPDK version. + +-* When running ``dpdk-procinfo`` with shared library mode, it is required to ++* When running ``dpdk-proc-info`` with shared library mode, it is required to + pass the same NIC PMD libraries as used for the primary application. Any + mismatch in PMD library arguments can lead to undefined behavior and results + affecting primary application too. + +-* Stats retrieval using ``dpdk-procinfo`` is not supported for virtual devices like PCAP and TAP. ++* Stats retrieval using ``dpdk-proc-info`` is not supported for virtual devices like PCAP and TAP. + +-* Since default DPDK EAL arguments for ``dpdk-procinfo`` are ``-c1, -n4 & --proc-type=secondary``, ++* Since default DPDK EAL arguments for ``dpdk-proc-info`` are ``-c1, -n4 & --proc-type=secondary``, + It is not expected that the user passes any EAL arguments. +diff --git a/dpdk/doc/guides/windows_gsg/build_dpdk.rst b/dpdk/doc/guides/windows_gsg/build_dpdk.rst +index 38b3068d7b..29f2b38feb 100644 +--- a/dpdk/doc/guides/windows_gsg/build_dpdk.rst ++++ b/dpdk/doc/guides/windows_gsg/build_dpdk.rst +@@ -104,7 +104,7 @@ To compile the examples, the flag ``-Dexamples`` is required. + .. code-block:: console + + cd C:\Users\me\dpdk +- meson -Dexamples=helloworld build ++ meson setup -Dexamples=helloworld build + ninja -C build + + Option 2. Cross-Compile with MinGW-w64 +@@ -115,5 +115,5 @@ Depending on the distribution, paths in this file may need adjustments. + + .. code-block:: console + +- meson --cross-file config/x86/cross-mingw -Dexamples=helloworld build ++ meson setup --cross-file config/x86/cross-mingw -Dexamples=helloworld build + ninja -C build diff --git a/dpdk/drivers/baseband/acc100/acc100_pf_enum.h b/dpdk/drivers/baseband/acc100/acc100_pf_enum.h index a1ee416d26..2fba667627 100644 --- a/dpdk/drivers/baseband/acc100/acc100_pf_enum.h @@ -10235,7 +21703,7 @@ index a1ee416d26..2fba667627 100644 HWPfFecUl5gI2MThreshReg = 0x00BC0004, HWPfFecUl5gVersionReg = 0x00BC0100, diff --git a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c -index 1c6080f2f8..6cdc6e65f7 100644 +index 1c6080f2f8..d84a8839e0 100644 --- a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c +++ b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.c @@ -141,8 +141,8 @@ aqDepth(int qg_idx, struct rte_acc100_conf *acc100_conf) @@ -10249,7 +21717,123 @@ index 1c6080f2f8..6cdc6e65f7 100644 } /* Return the AQ depth for a Queue Group Index */ -@@ -1236,6 +1236,8 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index) +@@ -423,11 +423,12 @@ acc100_check_ir(struct acc100_device *acc100_dev) + while (ring_data->valid) { + if ((ring_data->int_nb < ACC100_PF_INT_DMA_DL_DESC_IRQ) || ( + ring_data->int_nb > +- ACC100_PF_INT_DMA_DL5G_DESC_IRQ)) ++ ACC100_PF_INT_DMA_DL5G_DESC_IRQ)) { + rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x", + ring_data->int_nb, ring_data->detailed_info); +- /* Initialize Info Ring entry and move forward */ +- ring_data->val = 0; ++ /* Initialize Info Ring entry and move forward */ ++ ring_data->val = 0; ++ } + info_ring_head++; + ring_data = acc100_dev->info_ring + + (info_ring_head & ACC100_INFO_RING_MASK); +@@ -660,7 +661,8 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id) + acc100_reg_write(d, reg_addr->ring_size, value); + + /* Configure tail pointer for use when SDONE enabled */ +- d->tail_ptrs = rte_zmalloc_socket( ++ if (d->tail_ptrs == NULL) ++ d->tail_ptrs = rte_zmalloc_socket( + dev->device->driver->name, + ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t), + RTE_CACHE_LINE_SIZE, socket_id); +@@ -668,8 +670,8 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id) + rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u", + dev->device->driver->name, + dev->data->dev_id); +- rte_free(d->sw_rings); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto free_sw_rings; + } + d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs); + +@@ -692,15 +694,16 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id) + /* Continue */ + } + +- d->harq_layout = rte_zmalloc_socket("HARQ Layout", ++ if (d->harq_layout == NULL) ++ d->harq_layout = rte_zmalloc_socket("HARQ Layout", + ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout), + RTE_CACHE_LINE_SIZE, dev->data->socket_id); + if (d->harq_layout == NULL) { + rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u", + dev->device->driver->name, + dev->data->dev_id); +- rte_free(d->sw_rings); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto free_tail_ptrs; + } + + /* Mark as configured properly */ +@@ -711,6 +714,15 @@ acc100_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id) + PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova); + + return 0; ++ ++free_tail_ptrs: ++ rte_free(d->tail_ptrs); ++ d->tail_ptrs = NULL; ++free_sw_rings: ++ rte_free(d->sw_rings_base); ++ d->sw_rings = NULL; ++ ++ return ret; + } + + static int +@@ -767,7 +779,11 @@ acc100_dev_close(struct rte_bbdev *dev) + rte_free(d->tail_ptrs); + rte_free(d->info_ring); + rte_free(d->sw_rings_base); ++ rte_free(d->harq_layout); + d->sw_rings_base = NULL; ++ d->tail_ptrs = NULL; ++ d->info_ring = NULL; ++ d->harq_layout = NULL; + } + /* Ensure all in flight HW transactions are completed */ + usleep(ACC100_LONG_WAIT); +@@ -824,6 +840,10 @@ acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, + struct acc100_queue *q; + int16_t q_idx; + ++ if (d == NULL) { ++ rte_bbdev_log(ERR, "Undefined device"); ++ return -ENODEV; ++ } + /* Allocate the queue data structure. */ + q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q), + RTE_CACHE_LINE_SIZE, conf->socket); +@@ -831,10 +851,6 @@ acc100_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, + rte_bbdev_log(ERR, "Failed to allocate queue memory"); + return -ENOMEM; + } +- if (d == NULL) { +- rte_bbdev_log(ERR, "Undefined device"); +- return -ENODEV; +- } + + q->d = d; + q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id)); +@@ -1082,7 +1098,7 @@ acc100_dev_info_get(struct rte_bbdev *dev, + d->acc100_conf.q_ul_4g.num_qgroups - 1; + dev_info->default_queue_conf = default_queue_conf; + dev_info->cpu_flag_reqs = NULL; +- dev_info->min_alignment = 64; ++ dev_info->min_alignment = 1; + dev_info->capabilities = bbdev_capabilities; + #ifdef ACC100_EXT_MEM + dev_info->harq_buffer_size = d->ddr_size; +@@ -1236,6 +1252,8 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index) return (bg == 1 ? ACC100_K0_3_1 : ACC100_K0_3_2) * z_c; } /* LBRM case - includes a division by N */ @@ -10258,7 +21842,164 @@ index 1c6080f2f8..6cdc6e65f7 100644 if (rv_index == 1) return (((bg == 1 ? ACC100_K0_1_1 : ACC100_K0_1_2) * n_cb) / n) * z_c; -@@ -1460,8 +1462,7 @@ acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc, +@@ -1288,13 +1306,14 @@ acc100_fcw_td_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_td *fcw) + + /* Fill in a frame control word for LDPC decoding. */ + static inline void +-acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, ++acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + union acc100_harq_layout_data *harq_layout) + { + uint16_t harq_out_length, harq_in_length, ncb_p, k0_p, parity_offset; + uint16_t harq_index; + uint32_t l; + bool harq_prun = false; ++ uint32_t max_hc_in; + + fcw->qm = op->ldpc_dec.q_m; + fcw->nfiller = op->ldpc_dec.n_filler; +@@ -1311,6 +1330,14 @@ acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + op->ldpc_dec.tb_params.ea : + op->ldpc_dec.tb_params.eb; + ++ if (unlikely(check_bit(op->ldpc_dec.op_flags, ++ RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) && ++ (op->ldpc_dec.harq_combined_input.length == 0))) { ++ rte_bbdev_log(WARNING, "Null HARQ input size provided"); ++ /* Disable HARQ input in that case to carry forward. */ ++ op->ldpc_dec.op_flags ^= RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE; ++ } ++ + fcw->hcin_en = check_bit(op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE); + fcw->hcout_en = check_bit(op->ldpc_dec.op_flags, +@@ -1344,13 +1371,21 @@ acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + harq_in_length = op->ldpc_dec.harq_combined_input.length; + if (fcw->hcin_decomp_mode > 0) + harq_in_length = harq_in_length * 8 / 6; +- harq_in_length = RTE_ALIGN(harq_in_length, 64); +- if ((harq_layout[harq_index].offset > 0) & harq_prun) { ++ harq_in_length = RTE_MIN(harq_in_length, op->ldpc_dec.n_cb ++ - op->ldpc_dec.n_filler); ++ ++ /* Alignment on next 64B - Already enforced from HC output */ ++ harq_in_length = RTE_ALIGN_CEIL(harq_in_length, ACC100_HARQ_ALIGN_64B); ++ ++ /* Stronger alignment requirement when in decompression mode */ ++ if (fcw->hcin_decomp_mode > 0) ++ harq_in_length = RTE_ALIGN_FLOOR(harq_in_length, ACC100_HARQ_ALIGN_COMP); ++ ++ if ((harq_layout[harq_index].offset > 0) && harq_prun) { + rte_bbdev_log_debug("HARQ IN offset unexpected for now\n"); + fcw->hcin_size0 = harq_layout[harq_index].size0; + fcw->hcin_offset = harq_layout[harq_index].offset; +- fcw->hcin_size1 = harq_in_length - +- harq_layout[harq_index].offset; ++ fcw->hcin_size1 = harq_in_length - harq_layout[harq_index].offset; + } else { + fcw->hcin_size0 = harq_in_length; + fcw->hcin_offset = 0; +@@ -1362,6 +1397,21 @@ acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + fcw->hcin_size1 = 0; + } + ++ /* Enforce additional check on FCW validity */ ++ max_hc_in = RTE_ALIGN_CEIL(fcw->ncb - fcw->nfiller, ACC100_HARQ_ALIGN_64B); ++ if ((fcw->hcin_size0 > max_hc_in) || ++ (fcw->hcin_size1 + fcw->hcin_offset > max_hc_in) || ++ ((fcw->hcin_size0 > fcw->hcin_offset) && ++ (fcw->hcin_size1 != 0))) { ++ rte_bbdev_log(ERR, " Invalid FCW : HCIn %d %d %d, Ncb %d F %d", ++ fcw->hcin_size0, fcw->hcin_size1, ++ fcw->hcin_offset, ++ fcw->ncb, fcw->nfiller); ++ /* Disable HARQ input in that case to carry forward */ ++ op->ldpc_dec.op_flags ^= RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE; ++ fcw->hcin_en = 0; ++ } ++ + fcw->itmax = op->ldpc_dec.iter_max; + fcw->itstop = check_bit(op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE); +@@ -1386,15 +1436,27 @@ acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + if (fcw->hcout_en > 0) { + parity_offset = (op->ldpc_dec.basegraph == 1 ? 20 : 8) + * op->ldpc_dec.z_c - op->ldpc_dec.n_filler; +- k0_p = (fcw->k0 > parity_offset) ? +- fcw->k0 - op->ldpc_dec.n_filler : fcw->k0; ++ k0_p = (fcw->k0 > parity_offset) ? fcw->k0 - op->ldpc_dec.n_filler : fcw->k0; + ncb_p = fcw->ncb - op->ldpc_dec.n_filler; +- l = k0_p + fcw->rm_e; ++ l = RTE_MIN(k0_p + fcw->rm_e, INT16_MAX); + harq_out_length = (uint16_t) fcw->hcin_size0; +- harq_out_length = RTE_MIN(RTE_MAX(harq_out_length, l), ncb_p); +- harq_out_length = (harq_out_length + 0x3F) & 0xFFC0; +- if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) && +- harq_prun) { ++ harq_out_length = RTE_MAX(harq_out_length, l); ++ ++ /* Stronger alignment when in compression mode */ ++ if (fcw->hcout_comp_mode > 0) ++ harq_out_length = RTE_ALIGN_CEIL(harq_out_length, ACC100_HARQ_ALIGN_COMP); ++ ++ /* Cannot exceed the pruned Ncb circular buffer */ ++ harq_out_length = RTE_MIN(harq_out_length, ncb_p); ++ ++ /* Alignment on next 64B */ ++ harq_out_length = RTE_ALIGN_CEIL(harq_out_length, ACC100_HARQ_ALIGN_64B); ++ ++ /* Stronger alignment when in compression mode enforced again */ ++ if (fcw->hcout_comp_mode > 0) ++ harq_out_length = RTE_ALIGN_FLOOR(harq_out_length, ACC100_HARQ_ALIGN_COMP); ++ ++ if ((k0_p > fcw->hcin_size0 + ACC100_HARQ_OFFSET_THRESHOLD) && harq_prun) { + fcw->hcout_size0 = (uint16_t) fcw->hcin_size0; + fcw->hcout_offset = k0_p & 0xFFC0; + fcw->hcout_size1 = harq_out_length - fcw->hcout_offset; +@@ -1403,6 +1465,14 @@ acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + fcw->hcout_size1 = 0; + fcw->hcout_offset = 0; + } ++ ++ if (fcw->hcout_size0 == 0) { ++ rte_bbdev_log(ERR, " Invalid FCW : HCout %d", ++ fcw->hcout_size0); ++ op->ldpc_dec.op_flags ^= RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE; ++ fcw->hcout_en = 0; ++ } ++ + harq_layout[harq_index].offset = fcw->hcout_offset; + harq_layout[harq_index].size0 = fcw->hcout_size0; + } else { +@@ -1432,6 +1502,8 @@ acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + * Store information about device capabilities + * @param next_triplet + * Index for ACC100 DMA Descriptor triplet ++ * @param scattergather ++ * Flag to support scatter-gather for the mbuf + * + * @return + * Returns index of next triplet on success, other value if lengths of +@@ -1441,12 +1513,16 @@ acc100_fcw_ld_fill(const struct rte_bbdev_dec_op *op, struct acc100_fcw_ld *fcw, + static inline int + acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc, + struct rte_mbuf **input, uint32_t *offset, uint32_t cb_len, +- uint32_t *seg_total_left, int next_triplet) ++ uint32_t *seg_total_left, int next_triplet, ++ bool scattergather) + { + uint32_t part_len; + struct rte_mbuf *m = *input; + +- part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len; ++ if (scattergather) ++ part_len = (*seg_total_left < cb_len) ? *seg_total_left : cb_len; ++ else ++ part_len = cb_len; + cb_len -= part_len; + *seg_total_left -= part_len; + +@@ -1460,8 +1536,7 @@ acc100_dma_fill_blk_type_in(struct acc100_dma_req_desc *desc, next_triplet++; while (cb_len > 0) { @@ -10268,7 +22009,69 @@ index 1c6080f2f8..6cdc6e65f7 100644 m = m->next; *seg_total_left = rte_pktmbuf_data_len(m); -@@ -1765,6 +1766,10 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op, +@@ -1583,7 +1658,9 @@ acc100_dma_desc_te_fill(struct rte_bbdev_enc_op *op, + } + + next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, +- length, seg_total_left, next_triplet); ++ length, seg_total_left, next_triplet, ++ check_bit(op->turbo_enc.op_flags, ++ RTE_BBDEV_TURBO_ENC_SCATTER_GATHER)); + if (unlikely(next_triplet < 0)) { + rte_bbdev_log(ERR, + "Mismatch between data to process and mbuf data length in bbdev_op: %p", +@@ -1619,6 +1696,19 @@ acc100_dma_desc_te_fill(struct rte_bbdev_enc_op *op, + return 0; + } + ++/* May need to pad LDPC Encoder input to avoid small beat for ACC100. */ ++static inline uint16_t ++pad_le_in(uint16_t blen) ++{ ++ uint16_t last_beat; ++ ++ last_beat = blen % 64; ++ if ((last_beat > 0) && (last_beat <= 8)) ++ blen += 8; ++ ++ return blen; ++} ++ + static inline int + acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op, + struct acc100_dma_req_desc *desc, struct rte_mbuf **input, +@@ -1634,8 +1724,7 @@ acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op, + + K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c; + in_length_in_bits = K - enc->n_filler; +- if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) || +- (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)) ++ if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH) + in_length_in_bits -= 24; + in_length_in_bytes = in_length_in_bits >> 3; + +@@ -1648,8 +1737,7 @@ acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op, + } + + next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, +- in_length_in_bytes, +- seg_total_left, next_triplet); ++ pad_le_in(in_length_in_bytes), seg_total_left, next_triplet, false); + if (unlikely(next_triplet < 0)) { + rte_bbdev_log(ERR, + "Mismatch between data to process and mbuf data length in bbdev_op: %p", +@@ -1737,7 +1825,9 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op, + } + + next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, kw, +- seg_total_left, next_triplet); ++ seg_total_left, next_triplet, ++ check_bit(op->turbo_dec.op_flags, ++ RTE_BBDEV_TURBO_DEC_SCATTER_GATHER)); + if (unlikely(next_triplet < 0)) { + rte_bbdev_log(ERR, + "Mismatch between data to process and mbuf data length in bbdev_op: %p", +@@ -1765,6 +1855,10 @@ acc100_dma_desc_td_fill(struct rte_bbdev_dec_op *op, /* Soft output */ if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_SOFT_OUTPUT)) { @@ -10279,7 +22082,496 @@ index 1c6080f2f8..6cdc6e65f7 100644 if (check_bit(op->turbo_dec.op_flags, RTE_BBDEV_TURBO_EQUALIZER)) *s_out_length = e; -@@ -4413,7 +4418,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -1835,7 +1929,9 @@ acc100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op, + + next_triplet = acc100_dma_fill_blk_type_in(desc, input, + in_offset, input_length, +- seg_total_left, next_triplet); ++ seg_total_left, next_triplet, ++ check_bit(op->ldpc_dec.op_flags, ++ RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)); + + if (unlikely(next_triplet < 0)) { + rte_bbdev_log(ERR, +@@ -1978,6 +2074,7 @@ acc100_dma_enqueue(struct acc100_queue *q, uint16_t n, + struct rte_bbdev_stats *queue_stats) + { + union acc100_enqueue_reg_fmt enq_req; ++ union acc100_dma_desc *desc; + #ifdef RTE_BBDEV_OFFLOAD_COST + uint64_t start_time = 0; + queue_stats->acc_offload_cycles = 0; +@@ -1985,13 +2082,17 @@ acc100_dma_enqueue(struct acc100_queue *q, uint16_t n, + RTE_SET_USED(queue_stats); + #endif + ++ /* Set Sdone and IRQ enable bit on last descriptor. */ ++ desc = q->ring_addr + ((q->sw_ring_head + n - 1) & q->sw_ring_wrap_mask); ++ desc->req.sdone_enable = 1; ++ desc->req.irq_enable = q->irq_enable; ++ + enq_req.val = 0; + /* Setting offset, 100b for 256 DMA Desc */ + enq_req.addr_offset = ACC100_DESC_OFFSET; + + /* Split ops into batches */ + do { +- union acc100_dma_desc *desc; + uint16_t enq_batch_size; + uint64_t offset; + rte_iova_t req_elem_addr; +@@ -2081,6 +2182,11 @@ validate_enc_op(struct rte_bbdev_enc_op *op) + return -1; + } + ++ if (unlikely(turbo_enc->input.length == 0)) { ++ rte_bbdev_log(ERR, "input length null"); ++ return -1; ++ } ++ + if (turbo_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) { + tb = &turbo_enc->tb_params; + if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE +@@ -2100,11 +2206,12 @@ validate_enc_op(struct rte_bbdev_enc_op *op) + RTE_BBDEV_TURBO_MAX_CB_SIZE); + return -1; + } +- if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1)) ++ if (unlikely(tb->c_neg > 0)) { + rte_bbdev_log(ERR, +- "c_neg (%u) is out of range 0 <= value <= %u", +- tb->c_neg, +- RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1); ++ "c_neg (%u) expected to be null", ++ tb->c_neg); ++ return -1; ++ } + if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) { + rte_bbdev_log(ERR, + "c (%u) is out of range 1 <= value <= %u", +@@ -2364,7 +2471,7 @@ enqueue_ldpc_enc_n_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ops, + acc100_header_init(&desc->req); + desc->req.numCBs = num; + +- in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len; ++ in_length_in_bytes = pad_le_in(ops[0]->ldpc_enc.input.data->data_len); + out_length = (enc->cb_params.e + 7) >> 3; + desc->req.m2dlen = 1 + num; + desc->req.d2mlen = num; +@@ -2491,6 +2598,10 @@ enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op, + r = op->turbo_enc.tb_params.r; + + while (mbuf_total_left > 0 && r < c) { ++ if (unlikely(input == NULL)) { ++ rte_bbdev_log(ERR, "Not enough input segment"); ++ return -EINVAL; ++ } + seg_total_left = rte_pktmbuf_data_len(input) - in_offset; + /* Set up DMA descriptor */ + desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs) +@@ -2533,7 +2644,6 @@ enqueue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op *op, + + /* Set SDone on last CB descriptor for TB mode. */ + desc->req.sdone_enable = 1; +- desc->req.irq_enable = q->irq_enable; + + return current_enqueued_cbs; + } +@@ -2596,6 +2706,11 @@ validate_dec_op(struct rte_bbdev_dec_op *op) + return -1; + } + ++ if (unlikely(turbo_dec->input.length == 0)) { ++ rte_bbdev_log(ERR, "input length null"); ++ return -1; ++ } ++ + if (turbo_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) { + tb = &turbo_dec->tb_params; + if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE +@@ -2616,11 +2731,13 @@ validate_dec_op(struct rte_bbdev_dec_op *op) + RTE_BBDEV_TURBO_MAX_CB_SIZE); + return -1; + } +- if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1)) ++ if (unlikely(tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))) { + rte_bbdev_log(ERR, + "c_neg (%u) is out of range 0 <= value <= %u", + tb->c_neg, + RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1); ++ return -1; ++ } + if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) { + rte_bbdev_log(ERR, + "c (%u) is out of range 1 <= value <= %u", +@@ -2963,10 +3080,9 @@ enqueue_ldpc_dec_one_op_cb(struct acc100_queue *q, struct rte_bbdev_dec_op *op, + fcw = &desc->req.fcw_ld; + acc100_fcw_ld_fill(op, fcw, harq_layout); + +- /* Special handling when overusing mbuf */ +- if (fcw->rm_e < ACC100_MAX_E_MBUF) +- seg_total_left = rte_pktmbuf_data_len(input) +- - in_offset; ++ /* Special handling when using mbuf or not */ ++ if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)) ++ seg_total_left = rte_pktmbuf_data_len(input) - in_offset; + else + seg_total_left = fcw->rm_e; + +@@ -3041,7 +3157,10 @@ enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op, + + while (mbuf_total_left > 0 && r < c) { + +- seg_total_left = rte_pktmbuf_data_len(input) - in_offset; ++ if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)) ++ seg_total_left = rte_pktmbuf_data_len(input) - in_offset; ++ else ++ seg_total_left = op->ldpc_dec.input.length; + + /* Set up DMA descriptor */ + desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs) +@@ -3068,7 +3187,9 @@ enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op, + rte_memdump(stderr, "Req Desc.", desc, sizeof(*desc)); + #endif + +- if (seg_total_left == 0) { ++ if (check_bit(op->ldpc_dec.op_flags, ++ RTE_BBDEV_LDPC_DEC_SCATTER_GATHER) ++ && (seg_total_left == 0)) { + /* Go to the next mbuf */ + input = input->next; + in_offset = 0; +@@ -3086,7 +3207,6 @@ enqueue_ldpc_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op, + #endif + /* Set SDone on last CB descriptor for TB mode */ + desc->req.sdone_enable = 1; +- desc->req.irq_enable = q->irq_enable; + + return current_enqueued_cbs; + } +@@ -3188,7 +3308,6 @@ enqueue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op *op, + #endif + /* Set SDone on last CB descriptor for TB mode */ + desc->req.sdone_enable = 1; +- desc->req.irq_enable = q->irq_enable; + + return current_enqueued_cbs; + } +@@ -3269,15 +3388,28 @@ get_num_cbs_in_tb_ldpc_dec(struct rte_bbdev_op_ldpc_dec *ldpc_dec) + return cbs_in_tb; + } + ++/* Number of available descriptor in ring to enqueue */ ++static inline uint32_t ++acc100_ring_avail_enq(struct acc100_queue *q) ++{ ++ return (q->sw_ring_depth - 1 + q->sw_ring_tail - q->sw_ring_head) & q->sw_ring_wrap_mask; ++} ++ ++/* Number of available descriptor in ring to dequeue */ ++static inline uint32_t ++acc100_ring_avail_deq(struct acc100_queue *q) ++{ ++ return (q->sw_ring_depth + q->sw_ring_head - q->sw_ring_tail) & q->sw_ring_wrap_mask; ++} ++ + /* Enqueue encode operations for ACC100 device in CB mode. */ + static uint16_t + acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head; ++ int32_t avail = acc100_ring_avail_enq(q); + uint16_t i; +- union acc100_dma_desc *desc; + int ret; + + for (i = 0; i < num; ++i) { +@@ -3294,12 +3426,6 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data, + if (unlikely(i == 0)) + return 0; /* Nothing to enqueue */ + +- /* Set SDone in last CB in enqueued ops for CB mode*/ +- desc = q->ring_addr + ((q->sw_ring_head + i - 1) +- & q->sw_ring_wrap_mask); +- desc->req.sdone_enable = 1; +- desc->req.irq_enable = q->irq_enable; +- + acc100_dma_enqueue(q, i, &q_data->queue_stats); + + /* Update stats */ +@@ -3331,9 +3457,8 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head; ++ int32_t avail = acc100_ring_avail_enq(q); + uint16_t i = 0; +- union acc100_dma_desc *desc; + int ret, desc_idx = 0; + int16_t enq, left = num; + +@@ -3361,12 +3486,6 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, + if (unlikely(i == 0)) + return 0; /* Nothing to enqueue */ + +- /* Set SDone in last CB in enqueued ops for CB mode*/ +- desc = q->ring_addr + ((q->sw_ring_head + desc_idx - 1) +- & q->sw_ring_wrap_mask); +- desc->req.sdone_enable = 1; +- desc->req.irq_enable = q->irq_enable; +- + acc100_dma_enqueue(q, desc_idx, &q_data->queue_stats); + + /* Update stats */ +@@ -3382,7 +3501,7 @@ acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head; ++ int32_t avail = acc100_ring_avail_enq(q); + uint16_t i, enqueued_cbs = 0; + uint8_t cbs_in_tb; + int ret; +@@ -3411,12 +3530,27 @@ acc100_enqueue_enc_tb(struct rte_bbdev_queue_data *q_data, + return i; + } + ++/* Check room in AQ for the enqueues batches into Qmgr */ ++static inline int32_t ++acc100_aq_avail(struct rte_bbdev_queue_data *q_data, uint16_t num_ops) ++{ ++ struct acc100_queue *q = q_data->queue_private; ++ int32_t aq_avail = q->aq_depth - ++ ((q->aq_enqueued - q->aq_dequeued + ++ ACC100_MAX_QUEUE_DEPTH) % ACC100_MAX_QUEUE_DEPTH) ++ - (num_ops >> 7); ++ ++ return aq_avail; ++} ++ + /* Enqueue encode operations for ACC100 device. */ + static uint16_t + acc100_enqueue_enc(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t num) + { +- if (unlikely(num == 0)) ++ int32_t aq_avail = acc100_aq_avail(q_data, num); ++ ++ if (unlikely((aq_avail <= 0) || (num == 0))) + return 0; + if (ops[0]->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) + return acc100_enqueue_enc_tb(q_data, ops, num); +@@ -3429,7 +3563,9 @@ static uint16_t + acc100_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t num) + { +- if (unlikely(num == 0)) ++ int32_t aq_avail = acc100_aq_avail(q_data, num); ++ ++ if (unlikely((aq_avail <= 0) || (num == 0))) + return 0; + if (ops[0]->ldpc_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) + return acc100_enqueue_enc_tb(q_data, ops, num); +@@ -3444,9 +3580,8 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head; ++ int32_t avail = acc100_ring_avail_enq(q); + uint16_t i; +- union acc100_dma_desc *desc; + int ret; + + for (i = 0; i < num; ++i) { +@@ -3463,12 +3598,6 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data, + if (unlikely(i == 0)) + return 0; /* Nothing to enqueue */ + +- /* Set SDone in last CB in enqueued ops for CB mode*/ +- desc = q->ring_addr + ((q->sw_ring_head + i - 1) +- & q->sw_ring_wrap_mask); +- desc->req.sdone_enable = 1; +- desc->req.irq_enable = q->irq_enable; +- + acc100_dma_enqueue(q, i, &q_data->queue_stats); + + /* Update stats */ +@@ -3497,7 +3626,7 @@ acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head; ++ int32_t avail = acc100_ring_avail_enq(q); + uint16_t i, enqueued_cbs = 0; + uint8_t cbs_in_tb; + int ret; +@@ -3515,6 +3644,8 @@ acc100_enqueue_ldpc_dec_tb(struct rte_bbdev_queue_data *q_data, + break; + enqueued_cbs += ret; + } ++ if (unlikely(enqueued_cbs == 0)) ++ return 0; /* Nothing to enqueue */ + + acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats); + +@@ -3530,9 +3661,8 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head; ++ int32_t avail = acc100_ring_avail_enq(q); + uint16_t i; +- union acc100_dma_desc *desc; + int ret; + bool same_op = false; + for (i = 0; i < num; ++i) { +@@ -3558,13 +3688,6 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data, + if (unlikely(i == 0)) + return 0; /* Nothing to enqueue */ + +- /* Set SDone in last CB in enqueued ops for CB mode*/ +- desc = q->ring_addr + ((q->sw_ring_head + i - 1) +- & q->sw_ring_wrap_mask); +- +- desc->req.sdone_enable = 1; +- desc->req.irq_enable = q->irq_enable; +- + acc100_dma_enqueue(q, i, &q_data->queue_stats); + + /* Update stats */ +@@ -3580,7 +3703,7 @@ acc100_enqueue_dec_tb(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- int32_t avail = q->sw_ring_depth + q->sw_ring_tail - q->sw_ring_head; ++ int32_t avail = acc100_ring_avail_enq(q); + uint16_t i, enqueued_cbs = 0; + uint8_t cbs_in_tb; + int ret; +@@ -3612,7 +3735,9 @@ static uint16_t + acc100_enqueue_dec(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t num) + { +- if (unlikely(num == 0)) ++ int32_t aq_avail = acc100_aq_avail(q_data, num); ++ ++ if (unlikely((aq_avail <= 0) || (num == 0))) + return 0; + if (ops[0]->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) + return acc100_enqueue_dec_tb(q_data, ops, num); +@@ -3625,11 +3750,9 @@ static uint16_t + acc100_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_dec_op **ops, uint16_t num) + { +- struct acc100_queue *q = q_data->queue_private; +- int32_t aq_avail = q->aq_depth + +- (q->aq_dequeued - q->aq_enqueued) / 128; ++ int32_t aq_avail = acc100_aq_avail(q_data, num); + +- if (unlikely((aq_avail == 0) || (num == 0))) ++ if (unlikely((aq_avail <= 0) || (num == 0))) + return 0; + + if (ops[0]->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) +@@ -3667,8 +3790,6 @@ dequeue_enc_one_op_cb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op, + /* Clearing status, it will be set based on response */ + op->status = 0; + +- op->status |= ((rsp.input_err) +- ? (1 << RTE_BBDEV_DATA_ERROR) : 0); + op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + +@@ -3739,8 +3860,6 @@ dequeue_enc_one_op_tb(struct acc100_queue *q, struct rte_bbdev_enc_op **ref_op, + rte_bbdev_log_debug("Resp. desc %p: %x", desc, + rsp.val); + +- op->status |= ((rsp.input_err) +- ? (1 << RTE_BBDEV_DATA_ERROR) : 0); + op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + +@@ -3920,16 +4039,19 @@ dequeue_dec_one_op_tb(struct acc100_queue *q, struct rte_bbdev_dec_op **ref_op, + rte_bbdev_log_debug("Resp. desc %p: %x", desc, + rsp.val); + +- op->status |= ((rsp.input_err) +- ? (1 << RTE_BBDEV_DATA_ERROR) : 0); ++ op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0); + op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); + + /* CRC invalid if error exists */ + if (!op->status) + op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR; +- op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, +- op->turbo_dec.iter_count); ++ if (q->op_type == RTE_BBDEV_OP_LDPC_DEC) ++ op->ldpc_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, ++ op->ldpc_dec.iter_count); ++ else ++ op->turbo_dec.iter_count = RTE_MAX((uint8_t) rsp.iter_cnt, ++ op->turbo_dec.iter_count); + + /* Check if this is the last desc in batch (Atomic Queue) */ + if (desc->req.last_desc_in_batch) { +@@ -3955,7 +4077,7 @@ acc100_dequeue_enc(struct rte_bbdev_queue_data *q_data, + { + struct acc100_queue *q = q_data->queue_private; + uint16_t dequeue_num; +- uint32_t avail = q->sw_ring_head - q->sw_ring_tail; ++ uint32_t avail = acc100_ring_avail_deq(q); + uint32_t aq_dequeued = 0; + uint16_t i, dequeued_cbs = 0; + struct rte_bbdev_enc_op *op; +@@ -4000,7 +4122,7 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, + struct rte_bbdev_enc_op **ops, uint16_t num) + { + struct acc100_queue *q = q_data->queue_private; +- uint32_t avail = q->sw_ring_head - q->sw_ring_tail; ++ uint32_t avail = acc100_ring_avail_deq(q); + uint32_t aq_dequeued = 0; + uint16_t dequeue_num, i, dequeued_cbs = 0, dequeued_descs = 0; + int ret; +@@ -4040,7 +4162,7 @@ acc100_dequeue_dec(struct rte_bbdev_queue_data *q_data, + { + struct acc100_queue *q = q_data->queue_private; + uint16_t dequeue_num; +- uint32_t avail = q->sw_ring_head - q->sw_ring_tail; ++ uint32_t avail = acc100_ring_avail_deq(q); + uint32_t aq_dequeued = 0; + uint16_t i; + uint16_t dequeued_cbs = 0; +@@ -4057,6 +4179,8 @@ acc100_dequeue_dec(struct rte_bbdev_queue_data *q_data, + for (i = 0; i < dequeue_num; ++i) { + op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) + & q->sw_ring_wrap_mask))->req.op_addr; ++ if (unlikely(op == NULL)) ++ break; + if (op->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) + ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs, + &aq_dequeued); +@@ -4085,7 +4209,7 @@ acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data, + { + struct acc100_queue *q = q_data->queue_private; + uint16_t dequeue_num; +- uint32_t avail = q->sw_ring_head - q->sw_ring_tail; ++ uint32_t avail = acc100_ring_avail_deq(q); + uint32_t aq_dequeued = 0; + uint16_t i; + uint16_t dequeued_cbs = 0; +@@ -4102,6 +4226,8 @@ acc100_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data, + for (i = 0; i < dequeue_num; ++i) { + op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) + & q->sw_ring_wrap_mask))->req.op_addr; ++ if (unlikely(op == NULL)) ++ break; + if (op->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) + ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs, + &aq_dequeued); +@@ -4413,7 +4539,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) { rte_bbdev_log(INFO, "rte_acc100_configure"); uint32_t value, address, status; @@ -10288,7 +22580,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name); /* Compile time checks */ -@@ -4433,6 +4438,9 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4433,6 +4559,9 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) /* Store configuration */ rte_memcpy(&d->acc100_conf, conf, sizeof(d->acc100_conf)); @@ -10298,7 +22590,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 /* PCIe Bridge configuration */ acc100_reg_write(d, HwPfPcieGpexBridgeControl, ACC100_CFG_PCI_BRIDGE); for (i = 1; i < ACC100_GPEX_AXIMAP_NUM; i++) -@@ -4453,20 +4461,9 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4453,20 +4582,9 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) value = 1; acc100_reg_write(d, address, value); @@ -10322,7 +22614,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 acc100_reg_write(d, address, value); /* Set default descriptor signature */ -@@ -4484,6 +4481,17 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4484,6 +4602,17 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) address = HWPfDmaAxcacheReg; acc100_reg_write(d, address, value); @@ -10340,7 +22632,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 /* Default DMA Configuration (Qmgr Enabled) */ address = HWPfDmaConfig0Reg; value = 0; -@@ -4502,6 +4510,11 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4502,6 +4631,11 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) value = HWPfQmgrEgressQueuesTemplate; acc100_reg_write(d, address, value); @@ -10352,7 +22644,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 /* ===== Qmgr Configuration ===== */ /* Configuration of the AQueue Depth QMGR_GRP_0_DEPTH_LOG2 for UL */ int totalQgs = conf->q_ul_4g.num_qgroups + -@@ -4520,22 +4533,17 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4520,22 +4654,17 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) } /* Template Priority in incremental order */ @@ -10380,7 +22672,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 value = ACC100_TMPL_PRI_3; acc100_reg_write(d, address, value); } -@@ -4586,9 +4594,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4586,9 +4715,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) numEngines++; } else acc100_reg_write(d, address, 0); @@ -10390,7 +22682,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 } printf("Number of 5GUL engines %d\n", numEngines); /* 4GDL */ -@@ -4603,9 +4608,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4603,9 +4729,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) address = HWPfQmgrGrpTmplateReg4Indx + ACC100_BYTES_IN_WORD * template_idx; acc100_reg_write(d, address, value); @@ -10400,7 +22692,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 } /* 5GDL */ numQqsAcc += numQgs; -@@ -4619,13 +4621,10 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4619,13 +4742,10 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) address = HWPfQmgrGrpTmplateReg4Indx + ACC100_BYTES_IN_WORD * template_idx; acc100_reg_write(d, address, value); @@ -10415,7 +22707,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 address = HWPfQmgrGrpFunction0; value = 0; for (qg_idx = 0; qg_idx < 8; qg_idx++) { -@@ -4656,7 +4655,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4656,7 +4776,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) } } @@ -10424,7 +22716,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 uint32_t aram_address = 0; for (qg_idx = 0; qg_idx < totalQgs; qg_idx++) { for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) { -@@ -4681,6 +4680,11 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4681,6 +4801,11 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) /* ==== HI Configuration ==== */ @@ -10436,7 +22728,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 /* Prevent Block on Transmit Error */ address = HWPfHiBlockTransmitOnErrorEn; value = 0; -@@ -4693,10 +4697,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4693,10 +4818,6 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) address = HWPfHiPfMode; value = (conf->pf_mode_en) ? ACC100_PF_VAL : 0; acc100_reg_write(d, address, value); @@ -10447,7 +22739,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 /* QoS overflow init */ value = 1; -@@ -4706,7 +4706,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4706,7 +4827,7 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) acc100_reg_write(d, address, value); /* HARQ DDR Configuration */ @@ -10456,7 +22748,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 for (vf_idx = 0; vf_idx < conf->num_vf_bundles; vf_idx++) { address = HWPfDmaVfDdrBaseRw + vf_idx * 0x10; -@@ -4720,6 +4720,88 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) +@@ -4720,6 +4841,88 @@ rte_acc100_configure(const char *dev_name, struct rte_acc100_conf *conf) if (numEngines < (ACC100_SIG_UL_5G_LAST + 1)) poweron_cleanup(bbdev, d, conf); @@ -10546,7 +22838,7 @@ index 1c6080f2f8..6cdc6e65f7 100644 return 0; } diff --git a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h -index 03ed0b3e1a..071b37cf9d 100644 +index 03ed0b3e1a..9dcdf4653b 100644 --- a/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h +++ b/dpdk/drivers/baseband/acc100/rte_acc100_pmd.h @@ -31,11 +31,6 @@ @@ -10582,7 +22874,7 @@ index 03ed0b3e1a..071b37cf9d 100644 #define ACC100_ENGINE_OFFSET 0x1000 #define ACC100_RESET_HI 0x20100 #define ACC100_RESET_LO 0x20000 -@@ -159,6 +161,15 @@ +@@ -159,6 +161,17 @@ #define ACC100_ENGINES_MAX 9 #define ACC100_LONG_WAIT 1000 #define ACC100_GPEX_AXIMAP_NUM 17 @@ -10595,9 +22887,55 @@ index 03ed0b3e1a..071b37cf9d 100644 +#define ACC100_PRQ_DDR_VER 0x10092020 +#define ACC100_MS_IN_US (1000) +#define ACC100_DDR_TRAINING_MAX (5000) ++#define ACC100_HARQ_ALIGN_COMP 256 ++#define ACC100_HARQ_ALIGN_64B 64 /* ACC100 DMA Descriptor triplet */ struct acc100_dma_triplet { +diff --git a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +index 15d23d6269..6931884901 100644 +--- a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c ++++ b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +@@ -564,17 +564,21 @@ static int + fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id) + { + struct fpga_5gnr_fec_device *d = dev->data->dev_private; ++ struct fpga_queue *q = dev->data->queues[queue_id].queue_private; ++ uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS + ++ (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx); ++ uint8_t enable = 0x01; ++ uint16_t zero = 0x0000; + #ifdef RTE_LIBRTE_BBDEV_DEBUG + if (d == NULL) { + rte_bbdev_log(ERR, "Invalid device pointer"); + return -1; + } + #endif +- struct fpga_queue *q = dev->data->queues[queue_id].queue_private; +- uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS + +- (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx); +- uint8_t enable = 0x01; +- uint16_t zero = 0x0000; ++ if (dev->data->queues[queue_id].queue_private == NULL) { ++ rte_bbdev_log(ERR, "Cannot start invalid queue %d", queue_id); ++ return -1; ++ } + + /* Clear queue head and tail variables */ + q->tail = q->head_free_desc = 0; +@@ -879,9 +883,11 @@ check_desc_error(uint32_t error_code) { + static inline uint16_t + get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index) + { ++ uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c; + if (rv_index == 0) + return 0; +- uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c; ++ if (z_c == 0) ++ return 0; + if (n_cb == n) { + if (rv_index == 1) + return (bg == 1 ? K0_1_1 : K0_1_2) * z_c; diff --git a/dpdk/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c b/dpdk/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c index 92decc3e05..21d35292a3 100644 --- a/dpdk/drivers/baseband/fpga_lte_fec/fpga_lte_fec.c @@ -10664,6 +23002,70 @@ index b234bb751a..c6b1eb8679 100644 #define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues" #define TURBO_SW_SOCKET_ID_ARG "socket_id" +diff --git a/dpdk/drivers/baseband/turbo_sw/meson.build b/dpdk/drivers/baseband/turbo_sw/meson.build +index 477b8b371f..05950d7478 100644 +--- a/dpdk/drivers/baseband/turbo_sw/meson.build ++++ b/dpdk/drivers/baseband/turbo_sw/meson.build +@@ -4,17 +4,17 @@ + path = get_option('flexran_sdk') + + # check for FlexRAN SDK libraries for AVX2 +-lib4g = cc.find_library('libturbo', dirs: [path + '/lib_turbo'], required: false) ++lib4g = cc.find_library('turbo', dirs: [path + '/lib_turbo'], required: false) + if lib4g.found() +- ext_deps += cc.find_library('libturbo', dirs: [path + '/lib_turbo'], required: true) +- ext_deps += cc.find_library('libcrc', dirs: [path + '/lib_crc'], required: true) +- ext_deps += cc.find_library('librate_matching', dirs: [path + '/lib_rate_matching'], required: true) +- ext_deps += cc.find_library('libcommon', dirs: [path + '/lib_common'], required: true) +- ext_deps += cc.find_library('libstdc++', required: true) +- ext_deps += cc.find_library('libirc', required: true) +- ext_deps += cc.find_library('libimf', required: true) +- ext_deps += cc.find_library('libipps', required: true) +- ext_deps += cc.find_library('libsvml', required: true) ++ ext_deps += cc.find_library('turbo', dirs: [path + '/lib_turbo'], required: true) ++ ext_deps += cc.find_library('crc', dirs: [path + '/lib_crc'], required: true) ++ ext_deps += cc.find_library('rate_matching', dirs: [path + '/lib_rate_matching'], required: true) ++ ext_deps += cc.find_library('common', dirs: [path + '/lib_common'], required: true) ++ ext_deps += cc.find_library('stdc++', required: true) ++ ext_deps += cc.find_library('irc', required: true) ++ ext_deps += cc.find_library('imf', required: true) ++ ext_deps += cc.find_library('ipps', required: true) ++ ext_deps += cc.find_library('svml', required: true) + includes += include_directories(path + '/lib_turbo') + includes += include_directories(path + '/lib_crc') + includes += include_directories(path + '/lib_rate_matching') +@@ -25,10 +25,10 @@ endif + # check for FlexRAN SDK libraries for AVX512 + lib5g = cc.find_library('libldpc_decoder_5gnr', dirs: [path + '/lib_ldpc_decoder_5gnr'], required: false) + if lib5g.found() +- ext_deps += cc.find_library('libldpc_encoder_5gnr', dirs: [path + '/lib_ldpc_encoder_5gnr'], required: true) +- ext_deps += cc.find_library('libldpc_decoder_5gnr', dirs: [path + '/lib_ldpc_decoder_5gnr'], required: true) +- ext_deps += cc.find_library('libLDPC_ratematch_5gnr', dirs: [path + '/lib_LDPC_ratematch_5gnr'], required: true) +- ext_deps += cc.find_library('librate_dematching_5gnr', dirs: [path + '/lib_rate_dematching_5gnr'], required: true) ++ ext_deps += cc.find_library('ldpc_encoder_5gnr', dirs: [path + '/lib_ldpc_encoder_5gnr'], required: true) ++ ext_deps += cc.find_library('ldpc_decoder_5gnr', dirs: [path + '/lib_ldpc_decoder_5gnr'], required: true) ++ ext_deps += cc.find_library('LDPC_ratematch_5gnr', dirs: [path + '/lib_LDPC_ratematch_5gnr'], required: true) ++ ext_deps += cc.find_library('rate_dematching_5gnr', dirs: [path + '/lib_rate_dematching_5gnr'], required: true) + includes += include_directories(path + '/lib_ldpc_encoder_5gnr') + includes += include_directories(path + '/lib_ldpc_decoder_5gnr') + includes += include_directories(path + '/lib_LDPC_ratematch_5gnr') +diff --git a/dpdk/drivers/bus/auxiliary/auxiliary_common.c b/dpdk/drivers/bus/auxiliary/auxiliary_common.c +index 2cf8fe672d..a6b4b3de84 100644 +--- a/dpdk/drivers/bus/auxiliary/auxiliary_common.c ++++ b/dpdk/drivers/bus/auxiliary/auxiliary_common.c +@@ -113,6 +113,12 @@ rte_auxiliary_probe_one_driver(struct rte_auxiliary_driver *drv, + dev->device.numa_node = 0; + } + ++ if (rte_dev_is_probed(&dev->device)) { ++ RTE_LOG(DEBUG, EAL, "Device %s is already probed on auxiliary bus\n", ++ dev->device.name); ++ return -EEXIST; ++ } ++ + iova_mode = rte_eal_iova_mode(); + if ((drv->drv_flags & RTE_AUXILIARY_DRV_NEED_IOVA_AS_VA) > 0 && + iova_mode != RTE_IOVA_VA) { diff --git a/dpdk/drivers/bus/auxiliary/version.map b/dpdk/drivers/bus/auxiliary/version.map index a52260657c..dc993e84ff 100644 --- a/dpdk/drivers/bus/auxiliary/version.map @@ -10675,6 +23077,58 @@ index a52260657c..dc993e84ff 100644 + + local: *; }; +diff --git a/dpdk/drivers/bus/dpaa/base/qbman/bman.h b/dpdk/drivers/bus/dpaa/base/qbman/bman.h +index 21a6bee778..b2aa93e046 100644 +--- a/dpdk/drivers/bus/dpaa/base/qbman/bman.h ++++ b/dpdk/drivers/bus/dpaa/base/qbman/bman.h +@@ -519,7 +519,6 @@ static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid) + struct bm_mc_command *bm_cmd; + struct bm_mc_result *bm_res; + +- int aq_count = 0; + bool stop = false; + + while (!stop) { +@@ -532,8 +531,7 @@ static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid) + if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { + /* Pool is empty */ + stop = true; +- } else +- ++aq_count; ++ } + }; + return 0; + } +diff --git a/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/dpdk/drivers/bus/dpaa/base/qbman/qman.c +index 447c091770..aa8da96627 100644 +--- a/dpdk/drivers/bus/dpaa/base/qbman/qman.c ++++ b/dpdk/drivers/bus/dpaa/base/qbman/qman.c +@@ -1,7 +1,7 @@ + /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2008-2016 Freescale Semiconductor Inc. +- * Copyright 2017,2019 NXP ++ * Copyright 2017,2019-2023 NXP + * + */ + +@@ -897,7 +897,7 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 is) + /* Lookup in the retirement table */ + fq = table_find_fq(p, + be32_to_cpu(msg->fq.fqid)); +- DPAA_BUG_ON(!fq); ++ DPAA_BUG_ON(fq != NULL); + fq_state_change(p, fq, &swapped_msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, &swapped_msg); +@@ -909,6 +909,7 @@ static u32 __poll_portal_slow(struct qman_portal *p, u32 is) + #else + fq = (void *)(uintptr_t)msg->fq.contextB; + #endif ++ DPAA_BUG_ON(fq != NULL); + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, &swapped_msg); diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c index 737ac8d8c5..5546a9cb8d 100644 --- a/dpdk/drivers/bus/dpaa/dpaa_bus.c @@ -10894,7 +23348,7 @@ index eb68c9cab5..5375ea386d 100644 * changed to XOFF after it had already become truly-scheduled to a channel, and * a pull dequeue of that channel occurs that selects that FQ for dequeuing, diff --git a/dpdk/drivers/bus/ifpga/ifpga_bus.c b/dpdk/drivers/bus/ifpga/ifpga_bus.c -index cbc6809284..c5c8bbd572 100644 +index cbc6809284..f82b93af65 100644 --- a/dpdk/drivers/bus/ifpga/ifpga_bus.c +++ b/dpdk/drivers/bus/ifpga/ifpga_bus.c @@ -64,8 +64,7 @@ ifpga_find_afu_dev(const struct rte_rawdev *rdev, @@ -10917,11 +23371,36 @@ index cbc6809284..c5c8bbd572 100644 return afu_dev; } return NULL; +@@ -138,6 +136,8 @@ ifpga_scan_one(struct rte_rawdev *rawdev, + goto end; + } + afu_pr_conf.pr_enable = 1; ++ strlcpy(afu_pr_conf.bs_path, path, ++ sizeof(afu_pr_conf.bs_path)); + } else { + afu_pr_conf.pr_enable = 0; + } +@@ -177,7 +177,6 @@ ifpga_scan_one(struct rte_rawdev *rawdev, + rawdev->dev_ops->dev_start(rawdev)) + goto end; + +- strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path)); + if (rawdev->dev_ops && + rawdev->dev_ops->firmware_load && + rawdev->dev_ops->firmware_load(rawdev, diff --git a/dpdk/drivers/bus/pci/linux/pci_vfio.c b/dpdk/drivers/bus/pci/linux/pci_vfio.c -index 1a5e7c2d2a..cd0d0b1670 100644 +index 1a5e7c2d2a..822aa41f9e 100644 --- a/dpdk/drivers/bus/pci/linux/pci_vfio.c +++ b/dpdk/drivers/bus/pci/linux/pci_vfio.c -@@ -815,7 +815,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev) +@@ -2,6 +2,7 @@ + * Copyright(c) 2010-2014 Intel Corporation + */ + ++#include <unistd.h> + #include <string.h> + #include <fcntl.h> + #include <linux/pci_regs.h> +@@ -815,7 +816,7 @@ pci_vfio_map_resource_primary(struct rte_pci_device *dev) continue; } @@ -10931,7 +23410,7 @@ index 1a5e7c2d2a..cd0d0b1670 100644 free(reg); continue; diff --git a/dpdk/drivers/bus/pci/pci_common.c b/dpdk/drivers/bus/pci/pci_common.c -index 4a3a87f24f..def372b67e 100644 +index 4a3a87f24f..6ff136f083 100644 --- a/dpdk/drivers/bus/pci/pci_common.c +++ b/dpdk/drivers/bus/pci/pci_common.c @@ -247,9 +247,12 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr, @@ -10947,7 +23426,7 @@ index 4a3a87f24f..def372b67e 100644 rte_intr_instance_free(dev->vfio_req_intr_handle); dev->vfio_req_intr_handle = NULL; rte_intr_instance_free(dev->intr_handle); -@@ -257,8 +260,6 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr, +@@ -257,11 +260,9 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr, return ret; } } @@ -10955,7 +23434,11 @@ index 4a3a87f24f..def372b67e 100644 - dev->driver = dr; } - RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n", +- RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n", ++ RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%04x) device: "PCI_PRI_FMT" (socket %i)\n", + dr->driver.name, dev->id.vendor_id, dev->id.device_id, + loc->domain, loc->bus, loc->devid, loc->function, + dev->device.numa_node); diff --git a/dpdk/drivers/bus/vdev/rte_bus_vdev.h b/dpdk/drivers/bus/vdev/rte_bus_vdev.h index 2856799953..5af6be009f 100644 --- a/dpdk/drivers/bus/vdev/rte_bus_vdev.h @@ -11062,10 +23545,47 @@ index 519ca9c6fe..367727367e 100644 * and 1 if no driver found for this device. */ diff --git a/dpdk/drivers/common/cnxk/cnxk_security.c b/dpdk/drivers/common/cnxk/cnxk_security.c -index 30562b46e3..787138b059 100644 +index 30562b46e3..7f4f0a643b 100644 --- a/dpdk/drivers/common/cnxk/cnxk_security.c +++ b/dpdk/drivers/common/cnxk/cnxk_security.c -@@ -444,10 +444,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa, +@@ -201,6 +201,14 @@ ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa) + return size; + } + ++static void ++ot_ipsec_update_ipv6_addr_endianness(uint64_t *addr) ++{ ++ *addr = rte_be_to_cpu_64(*addr); ++ addr++; ++ *addr = rte_be_to_cpu_64(*addr); ++} ++ + static int + ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa, + struct rte_security_ipsec_xform *ipsec_xfrm) +@@ -237,6 +245,10 @@ ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa, + memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr, + sizeof(struct in6_addr)); + ++ /* IP Source and Dest are in LE/CPU endian */ ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr); ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr); ++ + break; + default: + return -EINVAL; +@@ -421,6 +433,10 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa, + memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr, + sizeof(struct in6_addr)); + ++ /* IP Source and Dest are in LE/CPU endian */ ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr); ++ ot_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr); ++ + /* Outer header flow label source */ + if (!ipsec_xfrm->options.copy_flabel) { + sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = +@@ -444,10 +460,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa, return -EINVAL; } @@ -11076,6 +23596,19 @@ index 30562b46e3..787138b059 100644 skip_tunnel_info: /* ESN */ sa->w0.s.esn_en = !!ipsec_xfrm->options.esn; +diff --git a/dpdk/drivers/common/cnxk/cnxk_security_ar.h b/dpdk/drivers/common/cnxk/cnxk_security_ar.h +index 3ec4c296c2..b4c51bd50e 100644 +--- a/dpdk/drivers/common/cnxk/cnxk_security_ar.h ++++ b/dpdk/drivers/common/cnxk/cnxk_security_ar.h +@@ -17,7 +17,7 @@ + BITS_PER_LONG_LONG) + + #define WORD_SHIFT 6 +-#define WORD_SIZE (1 << WORD_SHIFT) ++#define WORD_SIZE (1ULL << WORD_SHIFT) + #define WORD_MASK (WORD_SIZE - 1) + + #define IPSEC_ANTI_REPLAY_FAILED (-1) diff --git a/dpdk/drivers/common/cnxk/cnxk_telemetry_nix.c b/dpdk/drivers/common/cnxk/cnxk_telemetry_nix.c index df6458039d..4119e9ee4f 100644 --- a/dpdk/drivers/common/cnxk/cnxk_telemetry_nix.c @@ -11124,6 +23657,30 @@ index 919f8420f0..3ade4dc0c9 100644 }; } w1; }; +diff --git a/dpdk/drivers/common/cnxk/hw/nix.h b/dpdk/drivers/common/cnxk/hw/nix.h +index dd2ebecc6a..8231c94f0f 100644 +--- a/dpdk/drivers/common/cnxk/hw/nix.h ++++ b/dpdk/drivers/common/cnxk/hw/nix.h +@@ -616,6 +616,7 @@ + #define NIX_RX_ACTIONOP_RSS (0x4ull) + #define NIX_RX_ACTIONOP_PF_FUNC_DROP (0x5ull) + #define NIX_RX_ACTIONOP_MIRROR (0x6ull) ++#define NIX_RX_ACTIONOP_DEFAULT (0xfull) + + #define NIX_RX_VTAGACTION_VTAG0_RELPTR (0x0ull) + #define NIX_RX_VTAGACTION_VTAG1_RELPTR (0x4ull) +diff --git a/dpdk/drivers/common/cnxk/meson.build b/dpdk/drivers/common/cnxk/meson.build +index 4928f7e549..9e7ccd285a 100644 +--- a/dpdk/drivers/common/cnxk/meson.build ++++ b/dpdk/drivers/common/cnxk/meson.build +@@ -8,7 +8,6 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + subdir_done() + endif + +-config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON' + deps = ['eal', 'pci', 'bus_pci', 'mbuf', 'security'] + sources = files( + 'roc_ae.c', diff --git a/dpdk/drivers/common/cnxk/roc_bphy_cgx.c b/dpdk/drivers/common/cnxk/roc_bphy_cgx.c index 7449cbe77a..0cd7dff655 100644 --- a/dpdk/drivers/common/cnxk/roc_bphy_cgx.c @@ -11210,7 +23767,7 @@ index 847d969268..be6ddb56aa 100644 } diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c -index 926a916e44..9a869698c4 100644 +index 926a916e44..0a9c722db3 100644 --- a/dpdk/drivers/common/cnxk/roc_dev.c +++ b/dpdk/drivers/common/cnxk/roc_dev.c @@ -57,7 +57,7 @@ pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp) @@ -11247,7 +23804,15 @@ index 926a916e44..9a869698c4 100644 mbox_rsp_init(msg->id, rsp); /* PF/VF function ID */ -@@ -988,6 +999,9 @@ dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova) +@@ -940,6 +951,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) + case PCI_DEVID_CNXK_RVU_AF_VF: + case PCI_DEVID_CNXK_RVU_VF: + case PCI_DEVID_CNXK_RVU_SDP_VF: ++ case PCI_DEVID_CNXK_RVU_NIX_INL_VF: + dev->hwcap |= DEV_HWCAP_F_VF; + break; + } +@@ -988,6 +1000,9 @@ dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova) struct lmtst_tbl_setup_req *req; req = mbox_alloc_msg_lmtst_tbl_setup(mbox); @@ -11257,7 +23822,7 @@ index 926a916e44..9a869698c4 100644 /* This pcifunc is defined with primary pcifunc whose LMT address * will be shared. If call contains valid IOVA, following pcifunc * field is of no use. -@@ -1061,6 +1075,11 @@ dev_lmt_setup(struct dev *dev) +@@ -1061,6 +1076,11 @@ dev_lmt_setup(struct dev *dev) */ if (!dev->disable_shared_lmt) { idev = idev_get_cfg(); @@ -11269,6 +23834,68 @@ index 926a916e44..9a869698c4 100644 if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) { idev->lmt_base_addr = dev->lmt_base; idev->lmt_pf_func = dev->pf_func; +diff --git a/dpdk/drivers/common/cnxk/roc_dpi.c b/dpdk/drivers/common/cnxk/roc_dpi.c +index 23b2cc41a4..1666fe5fad 100644 +--- a/dpdk/drivers/common/cnxk/roc_dpi.c ++++ b/dpdk/drivers/common/cnxk/roc_dpi.c +@@ -81,10 +81,10 @@ roc_dpi_configure(struct roc_dpi *roc_dpi) + return rc; + } + +- snprintf(name, sizeof(name), "dpimem%d", roc_dpi->vfid); ++ snprintf(name, sizeof(name), "dpimem%d:%d:%d:%d", pci_dev->addr.domain, pci_dev->addr.bus, ++ pci_dev->addr.devid, pci_dev->addr.function); + buflen = DPI_CMD_QUEUE_SIZE * DPI_CMD_QUEUE_BUFS; +- dpi_mz = plt_memzone_reserve_aligned(name, buflen, 0, +- DPI_CMD_QUEUE_SIZE); ++ dpi_mz = plt_memzone_reserve_aligned(name, buflen, 0, DPI_CMD_QUEUE_SIZE); + if (dpi_mz == NULL) { + plt_err("dpi memzone reserve failed"); + rc = -ENOMEM; +diff --git a/dpdk/drivers/common/cnxk/roc_io.h b/dpdk/drivers/common/cnxk/roc_io.h +index fe5f7f46d0..d533d6cda6 100644 +--- a/dpdk/drivers/common/cnxk/roc_io.h ++++ b/dpdk/drivers/common/cnxk/roc_io.h +@@ -111,7 +111,8 @@ roc_lmt_submit_ldeor(plt_iova_t io_address) + + asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeor xzr, %x[rf], [%[rs]]" + : [rf] "=r"(result) +- : [rs] "r"(io_address)); ++ : [rs] "r"(io_address) ++ : "memory"); + return result; + } + +@@ -122,7 +123,8 @@ roc_lmt_submit_ldeorl(plt_iova_t io_address) + + asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeorl xzr,%x[rf],[%[rs]]" + : [rf] "=r"(result) +- : [rs] "r"(io_address)); ++ : [rs] "r"(io_address) ++ : "memory"); + return result; + } + +@@ -131,7 +133,8 @@ roc_lmt_submit_steor(uint64_t data, plt_iova_t io_address) + { + asm volatile(PLT_CPU_FEATURE_PREAMBLE + "steor %x[d], [%[rs]]" ::[d] "r"(data), +- [rs] "r"(io_address)); ++ [rs] "r"(io_address) ++ : "memory"); + } + + static __plt_always_inline void +@@ -139,7 +142,8 @@ roc_lmt_submit_steorl(uint64_t data, plt_iova_t io_address) + { + asm volatile(PLT_CPU_FEATURE_PREAMBLE + "steorl %x[d], [%[rs]]" ::[d] "r"(data), +- [rs] "r"(io_address)); ++ [rs] "r"(io_address) ++ : "memory"); + } + + static __plt_always_inline void diff --git a/dpdk/drivers/common/cnxk/roc_irq.c b/dpdk/drivers/common/cnxk/roc_irq.c index 7a24297d72..010b121176 100644 --- a/dpdk/drivers/common/cnxk/roc_irq.c @@ -11286,7 +23913,7 @@ index 7a24297d72..010b121176 100644 vec : (uint32_t)plt_intr_nb_efd_get(intr_handle); plt_intr_nb_efd_set(intr_handle, nb_efd); diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h -index b63fe108c9..9a8ae6b216 100644 +index b63fe108c9..564bf29bc2 100644 --- a/dpdk/drivers/common/cnxk/roc_mbox.h +++ b/dpdk/drivers/common/cnxk/roc_mbox.h @@ -114,7 +114,7 @@ struct mbox_msghdr { @@ -11318,6 +23945,15 @@ index b63fe108c9..9a8ae6b216 100644 /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ +@@ -449,7 +453,7 @@ struct lmtst_tbl_setup_req { + + struct cgx_stats_rsp { + struct mbox_msghdr hdr; +-#define CGX_RX_STATS_COUNT 13 ++#define CGX_RX_STATS_COUNT 9 + #define CGX_TX_STATS_COUNT 18 + uint64_t __io rx_stats[CGX_RX_STATS_COUNT]; + uint64_t __io tx_stats[CGX_TX_STATS_COUNT]; @@ -1240,6 +1244,33 @@ struct ssow_lf_free_req { uint16_t __io hws; }; @@ -11959,7 +24595,7 @@ index b3d8ebd3c2..506cb066ce 100644 aq->ctype = NIX_AQ_CTYPE_SQ; aq->op = NIX_AQ_INSTOP_WRITE; diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c b/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c -index 3257fa67c7..3d81247a12 100644 +index 3257fa67c7..191bb20558 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c +++ b/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c @@ -107,7 +107,7 @@ nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile) @@ -11971,6 +24607,86 @@ index 3257fa67c7..3d81247a12 100644 * considers each unit as Byte, so we need convert * user pps to bps */ +@@ -880,19 +880,29 @@ roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id, + TAILQ_FOREACH(sibling, list, node) { + if (sibling->parent != node->parent) + continue; +- k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k], +- &req->regval[k]); ++ k += nix_tm_sw_xoff_prep(sibling, true, &req->reg[k], &req->regval[k]); ++ if (k >= MAX_REGS_PER_MBOX_MSG) { ++ req->num_regs = k; ++ rc = mbox_process(mbox); ++ if (rc) ++ return rc; ++ k = 0; ++ req = mbox_alloc_msg_nix_txschq_cfg(mbox); ++ req->lvl = node->hw_lvl; ++ } ++ } ++ ++ if (k) { ++ req->num_regs = k; ++ rc = mbox_process(mbox); ++ if (rc) ++ return rc; ++ /* Update new weight for current node */ ++ req = mbox_alloc_msg_nix_txschq_cfg(mbox); + } +- req->num_regs = k; +- rc = mbox_process(mbox); +- if (rc) +- return rc; + +- /* Update new weight for current node */ +- req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = node->hw_lvl; +- req->num_regs = +- nix_tm_sched_reg_prep(nix, node, req->reg, req->regval); ++ req->num_regs = nix_tm_sched_reg_prep(nix, node, req->reg, req->regval); + rc = mbox_process(mbox); + if (rc) + return rc; +@@ -905,19 +915,29 @@ roc_nix_tm_node_parent_update(struct roc_nix *roc_nix, uint32_t node_id, + TAILQ_FOREACH(sibling, list, node) { + if (sibling->parent != node->parent) + continue; +- k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k], +- &req->regval[k]); ++ k += nix_tm_sw_xoff_prep(sibling, false, &req->reg[k], &req->regval[k]); ++ if (k >= MAX_REGS_PER_MBOX_MSG) { ++ req->num_regs = k; ++ rc = mbox_process(mbox); ++ if (rc) ++ return rc; ++ k = 0; ++ req = mbox_alloc_msg_nix_txschq_cfg(mbox); ++ req->lvl = node->hw_lvl; ++ } ++ } ++ ++ if (k) { ++ req->num_regs = k; ++ rc = mbox_process(mbox); ++ if (rc) ++ return rc; ++ /* XON Parent node */ ++ req = mbox_alloc_msg_nix_txschq_cfg(mbox); + } +- req->num_regs = k; +- rc = mbox_process(mbox); +- if (rc) +- return rc; + +- /* XON Parent node */ +- req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = node->parent->hw_lvl; +- req->num_regs = nix_tm_sw_xoff_prep(node->parent, false, +- req->reg, req->regval); ++ req->num_regs = nix_tm_sw_xoff_prep(node->parent, false, req->reg, req->regval); + rc = mbox_process(mbox); + if (rc) + return rc; diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c b/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c index 543adf9e56..9e80c2a5fe 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c @@ -11992,11 +24708,68 @@ index 543adf9e56..9e80c2a5fe 100644 k++; break; +diff --git a/dpdk/drivers/common/cnxk/roc_npa.c b/dpdk/drivers/common/cnxk/roc_npa.c +index efcb7582eb..ddd66c62ed 100644 +--- a/dpdk/drivers/common/cnxk/roc_npa.c ++++ b/dpdk/drivers/common/cnxk/roc_npa.c +@@ -115,6 +115,8 @@ npa_aura_pool_fini(struct mbox *mbox, uint32_t aura_id, uint64_t aura_handle) + aura_req->op = NPA_AQ_INSTOP_WRITE; + aura_req->aura.ena = 0; + aura_req->aura_mask.ena = ~aura_req->aura_mask.ena; ++ aura_req->aura.bp_ena = 0; ++ aura_req->aura_mask.bp_ena = ~aura_req->aura_mask.bp_ena; + + rc = mbox_process(mbox); + if (rc < 0) +@@ -305,7 +307,11 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size, + /* Update pool fields */ + pool->stack_base = mz->iova; + pool->ena = 1; +- pool->buf_size = block_size / ROC_ALIGN; ++ /* In opaque mode buffer size must be 0 */ ++ if (!pool->nat_align) ++ pool->buf_size = 0; ++ else ++ pool->buf_size = block_size / ROC_ALIGN; + pool->stack_max_pages = stack_size; + pool->shift = plt_log2_u32(block_count); + pool->shift = pool->shift < 8 ? 0 : pool->shift - 8; diff --git a/dpdk/drivers/common/cnxk/roc_npc.c b/dpdk/drivers/common/cnxk/roc_npc.c -index 503c74748f..5ee7ff5e41 100644 +index 503c74748f..9422a42457 100644 --- a/dpdk/drivers/common/cnxk/roc_npc.c +++ b/dpdk/drivers/common/cnxk/roc_npc.c -@@ -969,14 +969,14 @@ npc_vtag_insert_action_configure(struct mbox *mbox, struct roc_npc_flow *flow, +@@ -516,11 +516,15 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, + if (req_act == ROC_NPC_ACTION_TYPE_VLAN_STRIP) { + /* Only VLAN action is provided */ + flow->npc_action = NIX_RX_ACTIONOP_UCAST; +- } else if (req_act & +- (ROC_NPC_ACTION_TYPE_PF | ROC_NPC_ACTION_TYPE_VF)) { +- flow->npc_action = NIX_RX_ACTIONOP_UCAST; +- if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) +- flow->npc_action |= (uint64_t)rq << 20; ++ } else if (req_act & (ROC_NPC_ACTION_TYPE_PF | ROC_NPC_ACTION_TYPE_VF)) { ++ /* Check if any other action is set */ ++ if ((req_act == ROC_NPC_ACTION_TYPE_PF) || (req_act == ROC_NPC_ACTION_TYPE_VF)) { ++ flow->npc_action = NIX_RX_ACTIONOP_DEFAULT; ++ } else { ++ flow->npc_action = NIX_RX_ACTIONOP_UCAST; ++ if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) ++ flow->npc_action |= (uint64_t)rq << 20; ++ } + } else if (req_act & ROC_NPC_ACTION_TYPE_DROP) { + flow->npc_action = NIX_RX_ACTIONOP_DROP; + } else if (req_act & ROC_NPC_ACTION_TYPE_QUEUE) { +@@ -531,8 +535,7 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, + } else if (req_act & ROC_NPC_ACTION_TYPE_SEC) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC; + flow->npc_action |= (uint64_t)rq << 20; +- } else if (req_act & +- (ROC_NPC_ACTION_TYPE_FLAG | ROC_NPC_ACTION_TYPE_MARK)) { ++ } else if (req_act & (ROC_NPC_ACTION_TYPE_FLAG | ROC_NPC_ACTION_TYPE_MARK)) { + flow->npc_action = NIX_RX_ACTIONOP_UCAST; + } else if (req_act & ROC_NPC_ACTION_TYPE_COUNT) { + /* Keep ROC_NPC_ACTION_TYPE_COUNT_ACT always at the end +@@ -969,14 +972,14 @@ npc_vtag_insert_action_configure(struct mbox *mbox, struct roc_npc_flow *flow, vtag_cfg->cfg_type = VTAG_TX; vtag_cfg->vtag_size = NIX_VTAGSIZE_T4; vtag_cfg->tx.vtag0 = @@ -12013,7 +24786,7 @@ index 503c74748f..5ee7ff5e41 100644 (vlan_info[1].vlan_pcp << 13) | vlan_info[1].vlan_id); vtag_cfg->tx.cfg_vtag1 = 1; -@@ -1246,6 +1246,16 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow) +@@ -1246,6 +1249,16 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow) return rc; } @@ -12030,8 +24803,38 @@ index 503c74748f..5ee7ff5e41 100644 rc = npc_mcam_free_entry(npc, flow->mcam_id); if (rc != 0) return rc; +diff --git a/dpdk/drivers/common/cnxk/roc_npc.h b/dpdk/drivers/common/cnxk/roc_npc.h +index e13d557136..1b3aadf34a 100644 +--- a/dpdk/drivers/common/cnxk/roc_npc.h ++++ b/dpdk/drivers/common/cnxk/roc_npc.h +@@ -58,6 +58,25 @@ struct roc_npc_flow_item_raw { + const uint8_t *pattern; /**< Byte string to look for. */ + }; + ++struct roc_vlan_hdr { ++ uint16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */ ++ uint16_t eth_proto; /**< Ethernet type of encapsulated frame. */ ++} __plt_packed; ++ ++PLT_STD_C11 ++struct roc_npc_flow_item_vlan { ++ union { ++ struct { ++ uint16_t tci; /**< Tag control information. */ ++ uint16_t inner_type; /**< Inner EtherType or TPID. */ ++ }; ++ struct roc_vlan_hdr hdr; ++ }; ++ uint32_t has_more_vlan : 1; ++ /**< Packet header contains at least one more VLAN, after this VLAN. */ ++ uint32_t reserved : 31; /**< Reserved, must be zero. */ ++}; ++ + #define ROC_NPC_MAX_ACTION_COUNT 19 + + enum roc_npc_action_type { diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam.c b/dpdk/drivers/common/cnxk/roc_npc_mcam.c -index ba7f89b45b..a16ba3f7be 100644 +index ba7f89b45b..eb0a982589 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_mcam.c +++ b/dpdk/drivers/common/cnxk/roc_npc_mcam.c @@ -234,7 +234,7 @@ npc_get_kex_capability(struct npc *npc) @@ -12052,8 +24855,20 @@ index ba7f89b45b..a16ba3f7be 100644 (void)mbox_alloc_msg_npc_read_base_steer_rule(npc->mbox); rc = mbox_process_msg(npc->mbox, (void *)&base_rule_rsp); if (rc) { +@@ -711,8 +711,10 @@ npc_flow_free_all_resources(struct npc *npc) + for (idx = 0; idx < npc->flow_max_priority; idx++) { + while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) { + npc_rss_group_free(npc, flow); +- if (flow->ctr_id != NPC_COUNTER_NONE) ++ if (flow->ctr_id != NPC_COUNTER_NONE) { ++ rc |= npc_mcam_clear_counter(npc, flow->ctr_id); + rc |= npc_mcam_free_counter(npc, flow->ctr_id); ++ } + + npc_delete_prio_list_entry(npc, flow); + diff --git a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c -index 19b4901a52..278056591e 100644 +index 19b4901a52..1ba3d8f2cf 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c +++ b/dpdk/drivers/common/cnxk/roc_npc_mcam_dump.c @@ -159,6 +159,12 @@ npc_flow_print_parse_nibbles(FILE *file, struct roc_npc_flow *flow, @@ -12166,8 +24981,32 @@ index 19b4901a52..278056591e 100644 } static void +@@ -308,8 +308,10 @@ npc_flow_print_item(FILE *file, struct npc *npc, struct npc_xtract_info *xinfo, + for (i = 0; i < NPC_MAX_LFL; i++) { + lflags_info = npc->prx_fxcfg[intf][ld][i].xtract; + +- npc_flow_print_xtractinfo(file, lflags_info, flow, lid, +- lt); ++ if (!lflags_info->enable) ++ continue; ++ ++ npc_flow_print_xtractinfo(file, lflags_info, flow, lid, lt); + } + } + } +@@ -440,6 +442,10 @@ npc_flow_dump_rx_action(FILE *file, uint64_t npc_action) + plt_strlcpy(index_name, "Multicast/mirror table index", + NPC_MAX_FIELD_NAME_SIZE); + break; ++ case NIX_RX_ACTIONOP_DEFAULT: ++ fprintf(file, "NIX_RX_ACTIONOP_DEFAULT (%" PRIu64 ")\n", ++ (uint64_t)NIX_RX_ACTIONOP_DEFAULT); ++ break; + default: + plt_err("Unknown NIX_RX_ACTIONOP found"); + return; diff --git a/dpdk/drivers/common/cnxk/roc_npc_parse.c b/dpdk/drivers/common/cnxk/roc_npc_parse.c -index 8125035dd8..9742ac90f7 100644 +index 8125035dd8..849fadb662 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_parse.c +++ b/dpdk/drivers/common/cnxk/roc_npc_parse.c @@ -38,6 +38,7 @@ npc_parse_cpt_hdr(struct npc_parse_state *pst) @@ -12194,15 +25033,247 @@ index 8125035dd8..9742ac90f7 100644 info.hw_mask = &hw_mask; info.len = pst->pattern->size; npc_get_hw_supp_mask(pst, &info, lid, lt); -@@ -179,6 +182,7 @@ npc_parse_lb(struct npc_parse_state *pst) - int nr_vlans = 0; +@@ -165,6 +168,184 @@ npc_flow_raw_item_prepare(const struct roc_npc_flow_item_raw *raw_spec, + return 0; + } + ++#define NPC_MAX_SUPPORTED_VLANS 3 ++ ++static int ++npc_parse_vlan_count(const struct roc_npc_item_info *pattern, ++ const struct roc_npc_item_info **pattern_list, ++ const struct roc_npc_flow_item_vlan **vlan_items, int *vlan_count) ++{ ++ *vlan_count = 0; ++ while (pattern->type == ROC_NPC_ITEM_TYPE_VLAN) { ++ if (*vlan_count > NPC_MAX_SUPPORTED_VLANS - 1) ++ return NPC_ERR_PATTERN_NOTSUP; ++ ++ /* Don't support ranges */ ++ if (pattern->last != NULL) ++ return NPC_ERR_INVALID_RANGE; ++ ++ /* If spec is NULL, both mask and last must be NULL, this ++ * makes it to match ANY value (eq to mask = 0). ++ * Setting either mask or last without spec is an error ++ */ ++ if (pattern->spec == NULL) { ++ if (pattern->last != NULL && pattern->mask != NULL) ++ return NPC_ERR_INVALID_SPEC; ++ } ++ ++ pattern_list[*vlan_count] = pattern; ++ vlan_items[*vlan_count] = pattern->spec; ++ (*vlan_count)++; ++ ++ pattern++; ++ pattern = npc_parse_skip_void_and_any_items(pattern); ++ } ++ ++ return 0; ++} ++ ++static int ++npc_parse_vlan_ltype_get(struct npc_parse_state *pst, ++ const struct roc_npc_flow_item_vlan **vlan_item, int vlan_count, ++ int *ltype, int *lflags) ++{ ++ switch (vlan_count) { ++ case 1: ++ *ltype = NPC_LT_LB_CTAG; ++ if (vlan_item[0] && vlan_item[0]->has_more_vlan) ++ *ltype = NPC_LT_LB_STAG_QINQ; ++ break; ++ case 2: ++ if (vlan_item[1] && vlan_item[1]->has_more_vlan) { ++ if (!(pst->npc->keyx_supp_nmask[pst->nix_intf] & ++ 0x3ULL << NPC_LFLAG_LB_OFFSET)) ++ return NPC_ERR_PATTERN_NOTSUP; ++ ++ /* This lflag value will match either one of ++ * NPC_F_LB_L_WITH_STAG_STAG, ++ * NPC_F_LB_L_WITH_QINQ_CTAG, ++ * NPC_F_LB_L_WITH_QINQ_QINQ and ++ * NPC_F_LB_L_WITH_ITAG (0b0100 to 0b0111). For ++ * NPC_F_LB_L_WITH_ITAG, ltype is NPC_LT_LB_ETAG ++ * hence will not match. ++ */ ++ ++ *lflags = NPC_F_LB_L_WITH_QINQ_CTAG & NPC_F_LB_L_WITH_QINQ_QINQ & ++ NPC_F_LB_L_WITH_STAG_STAG; ++ } ++ *ltype = NPC_LT_LB_STAG_QINQ; ++ break; ++ case 3: ++ if (vlan_item[2] && vlan_item[2]->has_more_vlan) ++ return NPC_ERR_PATTERN_NOTSUP; ++ if (!(pst->npc->keyx_supp_nmask[pst->nix_intf] & 0x3ULL << NPC_LFLAG_LB_OFFSET)) ++ return NPC_ERR_PATTERN_NOTSUP; ++ *ltype = NPC_LT_LB_STAG_QINQ; ++ *lflags = NPC_F_STAG_STAG_CTAG; ++ break; ++ default: ++ return NPC_ERR_PATTERN_NOTSUP; ++ } ++ ++ return 0; ++} ++ ++static int ++npc_update_vlan_parse_state(struct npc_parse_state *pst, const struct roc_npc_item_info *pattern, ++ int lid, int lt, uint8_t lflags, int vlan_count) ++{ ++ uint8_t vlan_spec[NPC_MAX_SUPPORTED_VLANS * sizeof(struct roc_vlan_hdr)]; ++ uint8_t vlan_mask[NPC_MAX_SUPPORTED_VLANS * sizeof(struct roc_vlan_hdr)]; ++ int rc = 0, i, offset = NPC_TPID_LENGTH; ++ struct npc_parse_item_info parse_info; ++ char hw_mask[NPC_MAX_EXTRACT_HW_LEN]; ++ ++ memset(vlan_spec, 0, sizeof(struct roc_vlan_hdr) * NPC_MAX_SUPPORTED_VLANS); ++ memset(vlan_mask, 0, sizeof(struct roc_vlan_hdr) * NPC_MAX_SUPPORTED_VLANS); ++ memset(&parse_info, 0, sizeof(parse_info)); ++ ++ if (vlan_count > 2) ++ vlan_count = 2; ++ ++ for (i = 0; i < vlan_count; i++) { ++ if (pattern[i].spec) ++ memcpy(vlan_spec + offset, pattern[i].spec, sizeof(struct roc_vlan_hdr)); ++ if (pattern[i].mask) ++ memcpy(vlan_mask + offset, pattern[i].mask, sizeof(struct roc_vlan_hdr)); ++ ++ offset += 4; ++ } ++ ++ parse_info.def_mask = NULL; ++ parse_info.spec = vlan_spec; ++ parse_info.mask = vlan_mask; ++ parse_info.def_mask = NULL; ++ parse_info.hw_hdr_len = 0; ++ ++ lid = NPC_LID_LB; ++ parse_info.hw_mask = hw_mask; ++ ++ if (lt == NPC_LT_LB_CTAG) ++ parse_info.len = sizeof(struct roc_vlan_hdr) + NPC_TPID_LENGTH; ++ ++ if (lt == NPC_LT_LB_STAG_QINQ) ++ parse_info.len = sizeof(struct roc_vlan_hdr) * 2 + NPC_TPID_LENGTH; ++ ++ memset(hw_mask, 0, sizeof(hw_mask)); ++ ++ parse_info.hw_mask = &hw_mask; ++ npc_get_hw_supp_mask(pst, &parse_info, lid, lt); ++ ++ rc = npc_mask_is_supported(parse_info.mask, parse_info.hw_mask, parse_info.len); ++ if (!rc) ++ return NPC_ERR_INVALID_MASK; ++ ++ /* Point pattern to last item consumed */ ++ pst->pattern = pattern; ++ return npc_update_parse_state(pst, &parse_info, lid, lt, lflags); ++} ++ ++static int ++npc_parse_lb_vlan(struct npc_parse_state *pst) ++{ ++ const struct roc_npc_flow_item_vlan *vlan_items[NPC_MAX_SUPPORTED_VLANS]; ++ const struct roc_npc_item_info *pattern_list[NPC_MAX_SUPPORTED_VLANS]; ++ const struct roc_npc_item_info *last_pattern; ++ int vlan_count = 0, rc = 0; ++ int lid, lt, lflags; ++ ++ lid = NPC_LID_LB; ++ lflags = 0; ++ last_pattern = pst->pattern; ++ ++ rc = npc_parse_vlan_count(pst->pattern, pattern_list, vlan_items, &vlan_count); ++ if (rc) ++ return rc; ++ ++ rc = npc_parse_vlan_ltype_get(pst, vlan_items, vlan_count, <, &lflags); ++ if (rc) ++ return rc; ++ ++ if (vlan_count == 3) { ++ if (pattern_list[2]->spec != NULL && pattern_list[2]->mask != NULL && ++ pattern_list[2]->last != NULL) ++ return NPC_ERR_PATTERN_NOTSUP; ++ ++ /* Matching can be done only for two tags. */ ++ vlan_count = 2; ++ last_pattern++; ++ } ++ ++ rc = npc_update_vlan_parse_state(pst, pattern_list[0], lid, lt, lflags, vlan_count); ++ if (rc) ++ return rc; ++ ++ if (vlan_count > 1) ++ pst->pattern = last_pattern + vlan_count; ++ ++ return 0; ++} ++ + int + npc_parse_lb(struct npc_parse_state *pst) + { +@@ -176,9 +357,9 @@ npc_parse_lb(struct npc_parse_state *pst) + char hw_mask[NPC_MAX_EXTRACT_HW_LEN]; + struct npc_parse_item_info info; + int lid, lt, lflags, len = 0; +- int nr_vlans = 0; int rc; + info.def_mask = NULL; info.spec = NULL; info.mask = NULL; info.def_mask = NULL; -@@ -307,12 +311,12 @@ npc_parse_mpls_label_stack(struct npc_parse_state *pst, int *flag) +@@ -192,41 +373,10 @@ npc_parse_lb(struct npc_parse_state *pst) + /* RTE vlan is either 802.1q or 802.1ad, + * this maps to either CTAG/STAG. We need to decide + * based on number of VLANS present. Matching is +- * supported on first tag only. ++ * supported on first two tags. + */ +- info.hw_mask = NULL; +- info.len = pst->pattern->size; +- +- pattern = pst->pattern; +- while (pattern->type == ROC_NPC_ITEM_TYPE_VLAN) { +- nr_vlans++; +- +- /* Basic validation of Second/Third vlan item */ +- if (nr_vlans > 1) { +- rc = npc_parse_item_basic(pattern, &info); +- if (rc != 0) +- return rc; +- } +- last_pattern = pattern; +- pattern++; +- pattern = npc_parse_skip_void_and_any_items(pattern); +- } + +- switch (nr_vlans) { +- case 1: +- lt = NPC_LT_LB_CTAG; +- break; +- case 2: +- lt = NPC_LT_LB_STAG_QINQ; +- lflags = NPC_F_STAG_CTAG; +- break; +- case 3: +- lt = NPC_LT_LB_STAG_QINQ; +- lflags = NPC_F_STAG_STAG_CTAG; +- break; +- default: +- return NPC_ERR_PATTERN_NOTSUP; +- } ++ return npc_parse_lb_vlan(pst); + } else if (pst->pattern->type == ROC_NPC_ITEM_TYPE_E_TAG) { + /* we can support ETAG and match a subsequent CTAG + * without any matching support. +@@ -307,12 +457,12 @@ npc_parse_mpls_label_stack(struct npc_parse_state *pst, int *flag) * pst->pattern points to first MPLS label. We only check * that subsequent labels do not have anything to match. */ @@ -12216,7 +25287,7 @@ index 8125035dd8..9742ac90f7 100644 while (pattern->type == ROC_NPC_ITEM_TYPE_MPLS) { nr_labels++; -@@ -358,6 +362,7 @@ npc_parse_mpls(struct npc_parse_state *pst, int lid) +@@ -358,6 +508,7 @@ npc_parse_mpls(struct npc_parse_state *pst, int lid) info.len = pst->pattern->size; info.spec = NULL; info.mask = NULL; @@ -12224,7 +25295,7 @@ index 8125035dd8..9742ac90f7 100644 info.hw_hdr_len = 0; npc_get_hw_supp_mask(pst, &info, lid, lt); -@@ -405,6 +410,7 @@ npc_parse_lc(struct npc_parse_state *pst) +@@ -405,6 +556,7 @@ npc_parse_lc(struct npc_parse_state *pst) if (pst->pattern->type == ROC_NPC_ITEM_TYPE_MPLS) return npc_parse_mpls(pst, NPC_LID_LC); @@ -12232,7 +25303,7 @@ index 8125035dd8..9742ac90f7 100644 info.hw_mask = &hw_mask; info.spec = NULL; info.mask = NULL; -@@ -492,10 +498,10 @@ npc_parse_ld(struct npc_parse_state *pst) +@@ -492,10 +644,10 @@ npc_parse_ld(struct npc_parse_state *pst) return npc_parse_mpls(pst, NPC_LID_LD); return 0; } @@ -12244,7 +25315,7 @@ index 8125035dd8..9742ac90f7 100644 info.len = 0; info.hw_hdr_len = 0; -@@ -529,11 +535,13 @@ npc_parse_ld(struct npc_parse_state *pst) +@@ -529,11 +681,13 @@ npc_parse_ld(struct npc_parse_state *pst) case ROC_NPC_ITEM_TYPE_GRE: lt = NPC_LT_LD_GRE; info.len = pst->pattern->size; @@ -12258,7 +25329,7 @@ index 8125035dd8..9742ac90f7 100644 break; case ROC_NPC_ITEM_TYPE_NVGRE: lt = NPC_LT_LD_NVGRE; -@@ -651,6 +659,7 @@ npc_parse_lf(struct npc_parse_state *pst) +@@ -651,6 +805,7 @@ npc_parse_lf(struct npc_parse_state *pst) lflags = 0; /* No match support for vlan tags */ @@ -12266,7 +25337,7 @@ index 8125035dd8..9742ac90f7 100644 info.hw_mask = NULL; info.len = pst->pattern->size; info.spec = NULL; -@@ -709,6 +718,7 @@ npc_parse_lg(struct npc_parse_state *pst) +@@ -709,6 +864,7 @@ npc_parse_lg(struct npc_parse_state *pst) if (!pst->tunnel) return 0; @@ -12274,7 +25345,7 @@ index 8125035dd8..9742ac90f7 100644 info.hw_mask = &hw_mask; info.spec = NULL; info.mask = NULL; -@@ -745,6 +755,7 @@ npc_parse_lh(struct npc_parse_state *pst) +@@ -745,6 +901,7 @@ npc_parse_lh(struct npc_parse_state *pst) if (!pst->tunnel) return 0; @@ -12283,10 +25354,22 @@ index 8125035dd8..9742ac90f7 100644 info.spec = NULL; info.mask = NULL; diff --git a/dpdk/drivers/common/cnxk/roc_npc_priv.h b/dpdk/drivers/common/cnxk/roc_npc_priv.h -index 712302bc5c..74e0fb2ece 100644 +index 712302bc5c..54e96157cc 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_priv.h +++ b/dpdk/drivers/common/cnxk/roc_npc_priv.h -@@ -363,7 +363,7 @@ struct npc { +@@ -67,6 +67,11 @@ + #define NPC_ACTION_MAX_VLAN_PARAMS 3 + #define NPC_ACTION_MAX_VLANS_STRIPPED 2 + ++#define NPC_LTYPE_OFFSET_START 7 ++/* LB OFFSET : START + LA (2b flags + 1b ltype) + LB (2b flags) */ ++#define NPC_LTYPE_LB_OFFSET (NPC_LTYPE_OFFSET_START + 5) ++#define NPC_LFLAG_LB_OFFSET (NPC_LTYPE_OFFSET_START + 3) ++ + struct npc_action_vtag_info { + uint16_t vlan_id; + uint16_t vlan_ethtype; +@@ -363,7 +368,7 @@ struct npc { uint32_t rss_grps; /* rss groups supported */ uint16_t flow_prealloc_size; /* Pre allocated mcam size */ uint16_t flow_max_priority; /* Max priority for flow */ @@ -12295,10 +25378,27 @@ index 712302bc5c..74e0fb2ece 100644 uint32_t mark_actions; /* Number of mark actions */ uint32_t vtag_strip_actions; /* vtag insert/strip actions */ uint16_t pf_func; /* pf_func of device */ +@@ -402,6 +407,7 @@ int npc_update_parse_state(struct npc_parse_state *pst, + uint8_t flags); + void npc_get_hw_supp_mask(struct npc_parse_state *pst, + struct npc_parse_item_info *info, int lid, int lt); ++int npc_mask_is_supported(const char *mask, const char *hw_mask, int len); + int npc_parse_item_basic(const struct roc_npc_item_info *item, + struct npc_parse_item_info *info); + int npc_parse_meta_items(struct npc_parse_state *pst); diff --git a/dpdk/drivers/common/cnxk/roc_npc_utils.c b/dpdk/drivers/common/cnxk/roc_npc_utils.c -index ed0ef5c462..e36a312576 100644 +index ed0ef5c462..2f5d003b94 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_utils.c +++ b/dpdk/drivers/common/cnxk/roc_npc_utils.c +@@ -88,7 +88,7 @@ npc_get_hw_supp_mask(struct npc_parse_state *pst, + } + } + +-static inline int ++inline int + npc_mask_is_supported(const char *mask, const char *hw_mask, int len) + { + /* @@ -145,6 +145,9 @@ npc_parse_item_basic(const struct roc_npc_item_info *item, info->mask = item->mask; } @@ -12328,6 +25428,15 @@ index ed0ef5c462..e36a312576 100644 return 0; } +@@ -632,7 +638,7 @@ npc_alloc_mcam_by_ref_entry(struct mbox *mbox, struct roc_npc_flow *flow, + npc_find_mcam_ref_entry(flow, npc, &prio, &ref_entry, dir); + rc = npc_allocate_mcam_entry(mbox, prio, rsp_local, ref_entry); + if (rc && !retry_done) { +- plt_info( ++ plt_npc_dbg( + "npc: Failed to allocate lower priority entry. Retrying for higher priority"); + + dir = NPC_MCAM_HIGHER_PRIO; @@ -664,14 +670,14 @@ npc_get_free_mcam_entry(struct mbox *mbox, struct roc_npc_flow *flow, new_entry->flow = flow; @@ -12389,9 +25498,18 @@ index ffe537af30..3f0821e400 100644 se_ctx->zsk_flags = 0; diff --git a/dpdk/drivers/common/cnxk/roc_se.h b/dpdk/drivers/common/cnxk/roc_se.h -index 5be832fa75..500f94ac11 100644 +index 5be832fa75..fbe4e3e856 100644 --- a/dpdk/drivers/common/cnxk/roc_se.h +++ b/dpdk/drivers/common/cnxk/roc_se.h +@@ -280,7 +280,7 @@ struct roc_se_ctx { + uint64_t enc_cipher : 8; + uint64_t hash_type : 8; + uint64_t mac_len : 8; +- uint64_t auth_key_len : 8; ++ uint64_t auth_key_len : 16; + uint64_t fc_type : 4; + uint64_t hmac : 1; + uint64_t zsk_flags : 3; @@ -297,6 +297,27 @@ struct roc_se_ctx { uint8_t *auth_key; }; @@ -12606,6 +25724,33 @@ index 3d661102cc..9daac4bc03 100644 } int +diff --git a/dpdk/drivers/common/iavf/iavf_adminq.c b/dpdk/drivers/common/iavf/iavf_adminq.c +index 9c36e8908e..06644b02a1 100644 +--- a/dpdk/drivers/common/iavf/iavf_adminq.c ++++ b/dpdk/drivers/common/iavf/iavf_adminq.c +@@ -788,7 +788,8 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, + } + + /* if ready, copy the desc back to temp */ +- if (iavf_asq_done(hw)) { ++ if (iavf_asq_done(hw) && ++ !details->async && !details->postpone) { + iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc), + IAVF_DMA_TO_NONDMA); + if (buff != NULL) +diff --git a/dpdk/drivers/common/iavf/iavf_common.c b/dpdk/drivers/common/iavf/iavf_common.c +index 855a0ab2f5..dc7662bc1b 100644 +--- a/dpdk/drivers/common/iavf/iavf_common.c ++++ b/dpdk/drivers/common/iavf/iavf_common.c +@@ -27,6 +27,8 @@ enum iavf_status iavf_set_mac_type(struct iavf_hw *hw) + break; + case IAVF_DEV_ID_VF: + case IAVF_DEV_ID_VF_HV: ++ hw->mac.type = IAVF_MAC_XL710; ++ break; + case IAVF_DEV_ID_ADAPTIVE_VF: + hw->mac.type = IAVF_MAC_VF; + break; diff --git a/dpdk/drivers/common/iavf/iavf_type.h b/dpdk/drivers/common/iavf/iavf_type.h index 51267ca3b3..1cd87587d6 100644 --- a/dpdk/drivers/common/iavf/iavf_type.h @@ -12653,7 +25798,7 @@ index 1e9134501e..2f4bf15725 100644 diff --git a/dpdk/drivers/common/mlx5/linux/meson.build b/dpdk/drivers/common/mlx5/linux/meson.build -index 7909f23e21..4c7b53b9bd 100644 +index 7909f23e21..8be82850ba 100644 --- a/dpdk/drivers/common/mlx5/linux/meson.build +++ b/dpdk/drivers/common/mlx5/linux/meson.build @@ -36,7 +36,7 @@ foreach libname:libnames @@ -12665,11 +25810,34 @@ index 7909f23e21..4c7b53b9bd 100644 ext_deps += declare_dependency(compile_args: ibv_cflags.split()) endif if static_ibverbs +@@ -202,6 +202,8 @@ has_sym_args = [ + 'mlx5dv_dr_domain_allow_duplicate_rules' ], + [ 'HAVE_MLX5_IBV_REG_MR_IOVA', 'infiniband/verbs.h', + 'ibv_reg_mr_iova' ], ++ [ 'HAVE_IBV_FORK_UNNEEDED', 'infiniband/verbs.h', ++ 'ibv_is_fork_initialized'], + ] + config = configuration_data() + foreach arg:has_sym_args diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c -index 0d3e24e04e..eeb583a553 100644 +index 0d3e24e04e..9fd6c1b5f0 100644 --- a/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c +++ b/dpdk/drivers/common/mlx5/linux/mlx5_common_os.c -@@ -456,21 +456,33 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) +@@ -95,10 +95,11 @@ mlx5_translate_port_name(const char *port_name_in, + char ctrl = 0, pf_c1, pf_c2, vf_c1, vf_c2, eol; + char *end; + int sc_items; ++ int32_t ctrl_num = -1; + +- sc_items = sscanf(port_name_in, "%c%d", +- &ctrl, &port_info_out->ctrl_num); ++ sc_items = sscanf(port_name_in, "%c%d", &ctrl, &ctrl_num); + if (sc_items == 2 && ctrl == 'c') { ++ port_info_out->ctrl_num = ctrl_num; + port_name_in++; /* 'c' */ + port_name_in += snprintf(NULL, 0, "%d", + port_info_out->ctrl_num); +@@ -456,21 +457,33 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) int n; struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n); struct ibv_device *ibv_match = NULL; @@ -12710,7 +25878,7 @@ index 0d3e24e04e..eeb583a553 100644 } if (ibv_match == NULL) { DRV_LOG(WARNING, -@@ -487,7 +499,7 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) +@@ -487,7 +500,7 @@ mlx5_os_get_ibv_device(const struct rte_pci_addr *addr) static int mlx5_nl_roce_disable(const char *addr) { @@ -12739,6 +25907,21 @@ index 83066e752d..a6190a34e6 100644 int mlx5_get_device_guid(const struct rte_pci_addr *dev, uint8_t *guid, size_t len); +diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_glue.c b/dpdk/drivers/common/mlx5/linux/mlx5_glue.c +index bc6622053f..e29b3b8eae 100644 +--- a/dpdk/drivers/common/mlx5/linux/mlx5_glue.c ++++ b/dpdk/drivers/common/mlx5/linux/mlx5_glue.c +@@ -19,6 +19,10 @@ + static int + mlx5_glue_fork_init(void) + { ++#ifdef HAVE_IBV_FORK_UNNEEDED ++ if (ibv_is_fork_initialized() == IBV_FORK_UNNEEDED) ++ return 0; /* ibv_fork_init() not needed */ ++#endif + return ibv_fork_init(); + } + diff --git a/dpdk/drivers/common/mlx5/linux/mlx5_nl.c b/dpdk/drivers/common/mlx5/linux/mlx5_nl.c index fd4c2d2625..5d04857b38 100644 --- a/dpdk/drivers/common/mlx5/linux/mlx5_nl.c @@ -12899,8 +26082,29 @@ index 2063c0deeb..0b7552338a 100644 +int mlx5_nl_parse_link_status_update(struct nlmsghdr *hdr, uint32_t *ifindex); + #endif /* RTE_PMD_MLX5_NL_H_ */ +diff --git a/dpdk/drivers/common/mlx5/meson.build b/dpdk/drivers/common/mlx5/meson.build +index 6ddbde7e8f..d969b2a31a 100644 +--- a/dpdk/drivers/common/mlx5/meson.build ++++ b/dpdk/drivers/common/mlx5/meson.build +@@ -1,9 +1,14 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright 2019 Mellanox Technologies, Ltd + +-if not (is_linux or (is_windows and is_ms_linker)) ++if not (is_linux or is_windows) + build = false +- reason = 'only supported on Linux and Windows build with clang' ++ reason = 'only supported on Linux and Windows' ++ subdir_done() ++endif ++if is_windows and not is_ms_linker and not meson.is_cross_build() ++ build = false ++ reason = 'MinGW is supported only for cross-compilation test' + subdir_done() + endif + diff --git a/dpdk/drivers/common/mlx5/mlx5_common.c b/dpdk/drivers/common/mlx5/mlx5_common.c -index f1650f94c6..4faae6c86d 100644 +index f1650f94c6..f355b3d741 100644 --- a/dpdk/drivers/common/mlx5/mlx5_common.c +++ b/dpdk/drivers/common/mlx5/mlx5_common.c @@ -111,6 +111,11 @@ mlx5_common_args_check_handler(const char *key, const char *val, void *opaque) @@ -12915,7 +26119,54 @@ index f1650f94c6..4faae6c86d 100644 errno = 0; tmp = strtol(val, NULL, 0); if (errno) { -@@ -616,7 +621,6 @@ drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes) +@@ -404,6 +409,11 @@ mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp, + } + } + ++/** ++ * Primary and secondary processes share the `cdev` pointer. ++ * Callbacks addresses are local in each process. ++ * Therefore, each process can register private callbacks. ++ */ + int + mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev) + { +@@ -412,18 +422,16 @@ mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev) + if (!cdev->config.mr_mempool_reg_en) + return 0; + rte_rwlock_write_lock(&cdev->mr_scache.mprwlock); +- if (cdev->mr_scache.mp_cb_registered) +- goto exit; + /* Callback for this device may be already registered. */ + ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb, + cdev); +- if (ret != 0 && rte_errno != EEXIST) +- goto exit; + /* Register mempools only once for this device. */ +- if (ret == 0) ++ if (ret == 0 && rte_eal_process_type() == RTE_PROC_PRIMARY) { + rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev); +- ret = 0; +- cdev->mr_scache.mp_cb_registered = 1; ++ goto exit; ++ } ++ if (ret != 0 && rte_errno == EEXIST) ++ ret = 0; + exit: + rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock); + return ret; +@@ -434,8 +442,8 @@ mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev) + { + int ret; + +- if (!cdev->mr_scache.mp_cb_registered || +- !cdev->config.mr_mempool_reg_en) ++ MLX5_ASSERT(cdev->dev != NULL); ++ if (!cdev->config.mr_mempool_reg_en) + return; + /* Stop watching for mempool events and unregister all mempools. */ + ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb, +@@ -616,7 +624,6 @@ drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes) unsigned int i = 0; int ret = 0; @@ -12923,7 +26174,7 @@ index f1650f94c6..4faae6c86d 100644 while (enabled_classes) { driver = driver_get(RTE_BIT64(i)); if (driver != NULL) { -@@ -640,7 +644,7 @@ drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes) +@@ -640,7 +647,7 @@ drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes) struct mlx5_class_driver *driver; uint32_t enabled_classes = 0; bool already_loaded; @@ -12932,7 +26183,7 @@ index f1650f94c6..4faae6c86d 100644 TAILQ_FOREACH(driver, &drivers_list, next) { if ((driver->drv_class & user_classes) == 0) -@@ -662,12 +666,16 @@ drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes) +@@ -662,12 +669,16 @@ drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes) } enabled_classes |= driver->drv_class; } @@ -12953,7 +26204,7 @@ index f1650f94c6..4faae6c86d 100644 drivers_remove(cdev, enabled_classes); return ret; } -@@ -754,6 +762,7 @@ mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr, +@@ -754,6 +765,7 @@ mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr, uint64_t iova __rte_unused, size_t len) { struct mlx5_common_device *dev; @@ -12961,7 +26212,7 @@ index f1650f94c6..4faae6c86d 100644 struct mlx5_mr *mr; dev = to_mlx5_device(rte_dev); -@@ -771,7 +780,36 @@ mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr, +@@ -771,7 +783,36 @@ mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr, rte_errno = EINVAL; return -1; } @@ -12998,7 +26249,7 @@ index f1650f94c6..4faae6c86d 100644 LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr); /* Insert to the global cache table. */ mlx5_mr_insert_cache(&dev->mr_scache, mr); -@@ -854,7 +892,7 @@ static void mlx5_common_driver_init(void) +@@ -854,7 +895,7 @@ static void mlx5_common_driver_init(void) static bool mlx5_common_initialized; /** @@ -13008,7 +26259,7 @@ index f1650f94c6..4faae6c86d 100644 * must invoke in its constructor. */ diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.c b/dpdk/drivers/common/mlx5/mlx5_common_mr.c -index c694aaf28c..26fdf22386 100644 +index c694aaf28c..7f56e1f973 100644 --- a/dpdk/drivers/common/mlx5/mlx5_common_mr.c +++ b/dpdk/drivers/common/mlx5/mlx5_common_mr.c @@ -78,7 +78,7 @@ mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque) @@ -13281,7 +26532,15 @@ index c694aaf28c..26fdf22386 100644 /* Victim in top-half cache to replace with new entry. */ struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head]; -@@ -1086,7 +1161,6 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) +@@ -1064,7 +1139,6 @@ mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket) + &share_cache->dereg_mr_cb); + rte_rwlock_init(&share_cache->rwlock); + rte_rwlock_init(&share_cache->mprwlock); +- share_cache->mp_cb_registered = 0; + /* Initialize B-tree and allocate memory for global MR cache table. */ + return mlx5_mr_btree_init(&share_cache->cache, + MLX5_MR_BTREE_CACHE_N * 2, socket); +@@ -1086,7 +1160,6 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache)); /* Reset the B-tree table. */ mr_ctrl->cache_bh.len = 1; @@ -13289,7 +26548,7 @@ index c694aaf28c..26fdf22386 100644 /* Update the generation number. */ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr; DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d", -@@ -1290,11 +1364,12 @@ mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque, +@@ -1290,11 +1363,12 @@ mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque, unsigned int idx) { struct mlx5_range *ranges = opaque, *range = &ranges[idx]; @@ -13304,7 +26563,7 @@ index c694aaf28c..26fdf22386 100644 } /** -@@ -1541,7 +1616,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n, +@@ -1541,7 +1615,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n, * Destroy a mempool registration object. * * @param standalone @@ -13313,7 +26572,7 @@ index c694aaf28c..26fdf22386 100644 */ static void mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache, -@@ -1834,12 +1909,13 @@ mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr, +@@ -1834,12 +1908,13 @@ mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr, for (i = 0; i < mpr->mrs_n; i++) { const struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr; @@ -13331,7 +26590,7 @@ index c694aaf28c..26fdf22386 100644 entry->lkey = lkey; break; } -@@ -1932,7 +2008,7 @@ mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl, +@@ -1932,7 +2007,7 @@ mlx5_mr_mempool_populate_cache(struct mlx5_mr_ctrl *mr_ctrl, struct mlx5_mempool_mr *mr = &mpr->mrs[i]; struct mr_cache_entry entry; uint32_t lkey; @@ -13340,7 +26599,7 @@ index c694aaf28c..26fdf22386 100644 lkey = mr_btree_lookup(bt, &idx, (uintptr_t)mr->pmd_mr.addr); if (lkey != UINT32_MAX) -@@ -1970,7 +2046,7 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, +@@ -1970,7 +2045,7 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, { struct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head]; uint32_t lkey; @@ -13350,7 +26609,7 @@ index c694aaf28c..26fdf22386 100644 /* Binary-search MR translation table. */ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr); diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.h b/dpdk/drivers/common/mlx5/mlx5_common_mr.h -index cf384b6748..213f5427cb 100644 +index cf384b6748..13eb350980 100644 --- a/dpdk/drivers/common/mlx5/mlx5_common_mr.h +++ b/dpdk/drivers/common/mlx5/mlx5_common_mr.h @@ -56,9 +56,8 @@ struct mr_cache_entry { @@ -13365,7 +26624,15 @@ index cf384b6748..213f5427cb 100644 struct mr_cache_entry (*table)[]; } __rte_packed; -@@ -218,6 +217,8 @@ void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused); +@@ -82,7 +81,6 @@ struct mlx5_mr_share_cache { + uint32_t dev_gen; /* Generation number to flush local caches. */ + rte_rwlock_t rwlock; /* MR cache Lock. */ + rte_rwlock_t mprwlock; /* Mempool Registration Lock. */ +- uint8_t mp_cb_registered; /* Mempool are Registered. */ + struct mlx5_mr_btree cache; /* Global MR cache table. */ + struct mlx5_mr_list mr_list; /* Registered MR list. */ + struct mlx5_mr_list mr_free_list; /* Freed MR list. */ +@@ -218,6 +216,8 @@ void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused); __rte_internal uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mempool *mp, uintptr_t addr); @@ -13696,6 +26963,197 @@ index 3afce56cd9..61fc8dd761 100644 * with at least a minimal alignment size. * * @param[in] align +diff --git a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h +index 9f709ff30d..0c09325444 100644 +--- a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h ++++ b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h +@@ -2,8 +2,10 @@ + * Copyright (C) Mellanox Technologies, Ltd. 2001-2020. + */ + +-#ifndef __MLX5_WIN_DEFS_H__ +-#define __MLX5_WIN_DEFS_H__ ++#ifndef MLX5_WIN_DEFS_H ++#define MLX5_WIN_DEFS_H ++ ++#include <rte_bitops.h> + + #ifdef __cplusplus + extern "C" { +@@ -44,29 +46,29 @@ enum { + }; + + enum mlx5dv_cq_init_attr_mask { +- MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE = 1 << 0, +- MLX5DV_CQ_INIT_ATTR_MASK_FLAGS = 1 << 1, +- MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE = 1 << 2, ++ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE = RTE_BIT32(0), ++ MLX5DV_CQ_INIT_ATTR_MASK_FLAG = RTE_BIT32(1), ++ MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE = RTE_BIT32(2), + }; + + enum mlx5dv_cqe_comp_res_format { +- MLX5DV_CQE_RES_FORMAT_HASH = 1 << 0, +- MLX5DV_CQE_RES_FORMAT_CSUM = 1 << 1, +- MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2, ++ MLX5DV_CQE_RES_FORMAT_HASH = RTE_BIT32(0), ++ MLX5DV_CQE_RES_FORMAT_CSUM = RTE_BIT32(1), ++ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX = RTE_BIT32(2), + }; + + enum ibv_access_flags { +- IBV_ACCESS_LOCAL_WRITE = 1, +- IBV_ACCESS_REMOTE_WRITE = 1 << 1, +- IBV_ACCESS_REMOTE_READ = 1 << 2, +- IBV_ACCESS_REMOTE_ATOMIC = 1 << 3, +- IBV_ACCESS_MW_BIND = 1 << 4, +- IBV_ACCESS_ZERO_BASED = 1 << 5, +- IBV_ACCESS_ON_DEMAND = 1 << 6, ++ IBV_ACCESS_LOCAL_WRITE = RTE_BIT32(0), ++ IBV_ACCESS_REMOTE_WRITE = RTE_BIT32(1), ++ IBV_ACCESS_REMOTE_READ = RTE_BIT32(2), ++ IBV_ACCESS_REMOTE_ATOMIC = RTE_BIT32(3), ++ IBV_ACCESS_MW_BIND = RTE_BIT32(4), ++ IBV_ACCESS_ZERO_BASED = RTE_BIT32(5), ++ IBV_ACCESS_ON_DEMAND = RTE_BIT32(6), + }; + + enum mlx5_ib_uapi_devx_create_event_channel_flags { +- MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA = 1 << 0, ++ MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA = RTE_BIT32(0), + }; + + #define MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA \ +@@ -89,15 +91,15 @@ enum { + }; + + enum { +- MLX5_ETH_WQE_L3_CSUM = (1 << 6), +- MLX5_ETH_WQE_L4_CSUM = (1 << 7), ++ MLX5_ETH_WQE_L3_CSUM = RTE_BIT32(6), ++ MLX5_ETH_WQE_L4_CSUM = RTE_BIT32(7), + }; + + enum { +- MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, +- MLX5_WQE_CTRL_SOLICITED = 1 << 1, +- MLX5_WQE_CTRL_FENCE = 4 << 5, +- MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5, ++ MLX5_WQE_CTRL_SOLICITED = RTE_BIT32(1), ++ MLX5_WQE_CTRL_CQ_UPDATE = RTE_BIT32(3), ++ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = RTE_BIT32(5), ++ MLX5_WQE_CTRL_FENCE = RTE_BIT32(7), + }; + + enum { +@@ -105,6 +107,11 @@ enum { + MLX5_SEND_WQE_SHIFT = 6, + }; + ++/* Verbs headers do not support -pedantic. */ ++#ifdef PEDANTIC ++#pragma GCC diagnostic ignored "-Wpedantic" ++#endif ++ + /* + * RX Hash fields enable to set which incoming packet's field should + * participates in RX Hash. Each flag represent certain packet's field, +@@ -114,18 +121,22 @@ enum { + * TCP and UDP flags can't be enabled together on the same QP. + */ + enum ibv_rx_hash_fields { +- IBV_RX_HASH_SRC_IPV4 = 1 << 0, +- IBV_RX_HASH_DST_IPV4 = 1 << 1, +- IBV_RX_HASH_SRC_IPV6 = 1 << 2, +- IBV_RX_HASH_DST_IPV6 = 1 << 3, +- IBV_RX_HASH_SRC_PORT_TCP = 1 << 4, +- IBV_RX_HASH_DST_PORT_TCP = 1 << 5, +- IBV_RX_HASH_SRC_PORT_UDP = 1 << 6, +- IBV_RX_HASH_DST_PORT_UDP = 1 << 7, +- IBV_RX_HASH_IPSEC_SPI = 1 << 8, +- IBV_RX_HASH_INNER = (1 << 31), ++ IBV_RX_HASH_SRC_IPV4 = RTE_BIT32(0), ++ IBV_RX_HASH_DST_IPV4 = RTE_BIT32(1), ++ IBV_RX_HASH_SRC_IPV6 = RTE_BIT32(2), ++ IBV_RX_HASH_DST_IPV6 = RTE_BIT32(3), ++ IBV_RX_HASH_SRC_PORT_TCP = RTE_BIT32(4), ++ IBV_RX_HASH_DST_PORT_TCP = RTE_BIT32(5), ++ IBV_RX_HASH_SRC_PORT_UDP = RTE_BIT32(6), ++ IBV_RX_HASH_DST_PORT_UDP = RTE_BIT32(7), ++ IBV_RX_HASH_IPSEC_SPI = RTE_BIT32(8), ++ IBV_RX_HASH_INNER = RTE_BIT32(31), + }; + ++#ifdef PEDANTIC ++#pragma GCC diagnostic error "-Wpedantic" ++#endif ++ + enum { + MLX5_RCV_DBR = 0, + MLX5_SND_DBR = 1, +@@ -145,9 +156,9 @@ enum { + #endif + + enum ibv_flow_flags { +- IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1 << 0, +- IBV_FLOW_ATTR_FLAGS_DONT_TRAP = 1 << 1, +- IBV_FLOW_ATTR_FLAGS_EGRESS = 1 << 2, ++ IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = RTE_BIT32(0), ++ IBV_FLOW_ATTR_FLAGS_DONT_TRAP = RTE_BIT32(1), ++ IBV_FLOW_ATTR_FLAGS_EGRESS = RTE_BIT32(2), + }; + + enum ibv_flow_attr_type { +@@ -244,11 +255,11 @@ struct mlx5_wqe_data_seg { + rte_be64_t addr; + }; + +-#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) +-#define IBV_DEVICE_RAW_IP_CSUM (1 << 26) +-#define IBV_RAW_PACKET_CAP_CVLAN_STRIPPING (1 << 0) +-#define IBV_RAW_PACKET_CAP_SCATTER_FCS (1 << 1) +-#define IBV_QPT_RAW_PACKET 8 ++#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP RTE_BIT32(4) ++#define IBV_DEVICE_RAW_IP_CSUM RTE_BIT32(26) ++#define IBV_RAW_PACKET_CAP_CVLAN_STRIPPING RTE_BIT32(0) ++#define IBV_RAW_PACKET_CAP_SCATTER_FCS RTE_BIT32(1) ++#define IBV_QPT_RAW_PACKET 8 + + enum { + MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0x0, +@@ -258,8 +269,9 @@ enum { + }; + + enum { +- MLX5_MATCH_OUTER_HEADERS = 1 << 0, +- MLX5_MATCH_MISC_PARAMETERS = 1 << 1, +- MLX5_MATCH_INNER_HEADERS = 1 << 2, ++ MLX5_MATCH_OUTER_HEADERS = RTE_BIT32(0), ++ MLX5_MATCH_MISC_PARAMETERS = RTE_BIT32(1), ++ MLX5_MATCH_INNER_HEADERS = RTE_BIT32(2), + }; +-#endif /* __MLX5_WIN_DEFS_H__ */ ++ ++#endif /* MLX5_WIN_DEFS_H */ +diff --git a/dpdk/drivers/common/qat/meson.build b/dpdk/drivers/common/qat/meson.build +index af92271a75..1606fadef0 100644 +--- a/dpdk/drivers/common/qat/meson.build ++++ b/dpdk/drivers/common/qat/meson.build +@@ -35,14 +35,6 @@ if qat_crypto and not libcrypto.found() + 'missing dependency, libcrypto') + endif + +-# The driver should not build if both compression and crypto are disabled +-#FIXME common code depends on compression files so check only compress! +-if not qat_compress # and not qat_crypto +- build = false +- reason = '' # rely on reason for compress/crypto above +- subdir_done() +-endif +- + deps += ['bus_pci', 'cryptodev', 'net', 'compressdev'] + sources += files( + 'qat_common.c', diff --git a/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h index a6d403fac3..12a7258c60 100644 --- a/dpdk/drivers/common/qat/qat_adf/adf_transport_access_macros.h @@ -13709,8 +27167,335 @@ index a6d403fac3..12a7258c60 100644 #define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ ADF_RING_SIZE_4K : SIZE) #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) +diff --git a/dpdk/drivers/common/qat/qat_pf2vf.c b/dpdk/drivers/common/qat/qat_pf2vf.c +index 4e9ffc72fc..621f12fce2 100644 +--- a/dpdk/drivers/common/qat/qat_pf2vf.c ++++ b/dpdk/drivers/common/qat/qat_pf2vf.c +@@ -59,7 +59,7 @@ int qat_pf2vf_exch_msg(struct qat_pci_device *qat_dev, + * the message + */ + do { +- rte_delay_us_sleep(5); ++ rte_delay_us_sleep(ADF_IOV_MSG_ACK_DELAY_US * 2); + val = ADF_CSR_RD(pmisc_bar_addr, vf_csr_off); + } while ((val & ADF_PFVF_INT) && + (++count < ADF_IOV_MSG_ACK_MAX_RETRY)); +@@ -70,6 +70,8 @@ int qat_pf2vf_exch_msg(struct qat_pci_device *qat_dev, + } + + uint32_t pf_val = ADF_CSR_RD(pmisc_bar_addr, pf_csr_off); ++ msg &= ~ADF_PFVF_INT; ++ ADF_CSR_WR(pmisc_bar_addr, pf_csr_off, msg); + + *(ret + i) = (uint8_t)(pf_val >> (pf2vf_msg.block_hdr > 0 ? + 10 : 8) & 0xff); +diff --git a/dpdk/drivers/common/qat/qat_qp.c b/dpdk/drivers/common/qat/qat_qp.c +index cde421eb77..5a39a90a0b 100644 +--- a/dpdk/drivers/common/qat/qat_qp.c ++++ b/dpdk/drivers/common/qat/qat_qp.c +@@ -451,20 +451,6 @@ adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen) + return 0; + } + +-static inline void +-txq_write_tail(enum qat_device_gen qat_dev_gen, +- struct qat_qp *qp, struct qat_queue *q) +-{ +- struct qat_qp_hw_spec_funcs *ops = +- qat_qp_hw_spec[qat_dev_gen]; +- +- /* +- * Pointer check should be done during +- * initialization +- */ +- ops->qat_qp_csr_write_tail(qp, q); +-} +- + static inline void + qat_qp_csr_write_head(enum qat_device_gen qat_dev_gen, struct qat_qp *qp, + struct qat_queue *q, uint32_t new_head) +@@ -643,179 +629,6 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) + return nb_ops_sent; + } + +-/* Use this for compression only - but keep consistent with above common +- * function as much as possible. +- */ +-uint16_t +-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops) +-{ +- register struct qat_queue *queue; +- struct qat_qp *tmp_qp = (struct qat_qp *)qp; +- register uint32_t nb_ops_sent = 0; +- register int nb_desc_to_build; +- uint16_t nb_ops_possible = nb_ops; +- register uint8_t *base_addr; +- register uint32_t tail; +- +- int descriptors_built, total_descriptors_built = 0; +- int nb_remaining_descriptors; +- int overflow = 0; +- +- if (unlikely(nb_ops == 0)) +- return 0; +- +- /* read params used a lot in main loop into registers */ +- queue = &(tmp_qp->tx_q); +- base_addr = (uint8_t *)queue->base_addr; +- tail = queue->tail; +- +- /* Find how many can actually fit on the ring */ +- { +- /* dequeued can only be written by one thread, but it may not +- * be this thread. As it's 4-byte aligned it will be read +- * atomically here by any Intel CPU. +- * enqueued can wrap before dequeued, but cannot +- * lap it as var size of enq/deq (uint32_t) > var size of +- * max_inflights (uint16_t). In reality inflights is never +- * even as big as max uint16_t, as it's <= ADF_MAX_DESC. +- * On wrapping, the calculation still returns the correct +- * positive value as all three vars are unsigned. +- */ +- uint32_t inflights = +- tmp_qp->enqueued - tmp_qp->dequeued; +- +- /* Find how many can actually fit on the ring */ +- overflow = (inflights + nb_ops) - tmp_qp->max_inflights; +- if (overflow > 0) { +- nb_ops_possible = nb_ops - overflow; +- if (nb_ops_possible == 0) +- return 0; +- } +- +- /* QAT has plenty of work queued already, so don't waste cycles +- * enqueueing, wait til the application has gathered a bigger +- * burst or some completed ops have been dequeued +- */ +- if (tmp_qp->min_enq_burst_threshold && inflights > +- QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible < +- tmp_qp->min_enq_burst_threshold) { +- tmp_qp->stats.threshold_hit_count++; +- return 0; +- } +- } +- +- /* At this point nb_ops_possible is assuming a 1:1 mapping +- * between ops and descriptors. +- * Fewer may be sent if some ops have to be split. +- * nb_ops_possible is <= burst size. +- * Find out how many spaces are actually available on the qp in case +- * more are needed. +- */ +- nb_remaining_descriptors = nb_ops_possible +- + ((overflow >= 0) ? 0 : overflow * (-1)); +- QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d", +- nb_ops, nb_remaining_descriptors); +- +- while (nb_ops_sent != nb_ops_possible && +- nb_remaining_descriptors > 0) { +- struct qat_comp_op_cookie *cookie = +- tmp_qp->op_cookies[tail >> queue->trailz]; +- +- descriptors_built = 0; +- +- QAT_DP_LOG(DEBUG, "--- data length: %u", +- ((struct rte_comp_op *)*ops)->src.length); +- +- nb_desc_to_build = qat_comp_build_request(*ops, +- base_addr + tail, cookie, tmp_qp->qat_dev_gen); +- QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, " +- "%d ops sent, %d descriptors needed", +- total_descriptors_built, nb_remaining_descriptors, +- nb_ops_sent, nb_desc_to_build); +- +- if (unlikely(nb_desc_to_build < 0)) { +- /* this message cannot be enqueued */ +- tmp_qp->stats.enqueue_err_count++; +- if (nb_ops_sent == 0) +- return 0; +- goto kick_tail; +- } else if (unlikely(nb_desc_to_build > 1)) { +- /* this op is too big and must be split - get more +- * descriptors and retry +- */ +- +- QAT_DP_LOG(DEBUG, "Build %d descriptors for this op", +- nb_desc_to_build); +- +- nb_remaining_descriptors -= nb_desc_to_build; +- if (nb_remaining_descriptors >= 0) { +- /* There are enough remaining descriptors +- * so retry +- */ +- int ret2 = qat_comp_build_multiple_requests( +- *ops, tmp_qp, tail, +- nb_desc_to_build); +- +- if (unlikely(ret2 < 1)) { +- QAT_DP_LOG(DEBUG, +- "Failed to build (%d) descriptors, status %d", +- nb_desc_to_build, ret2); +- +- qat_comp_free_split_op_memzones(cookie, +- nb_desc_to_build - 1); +- +- tmp_qp->stats.enqueue_err_count++; +- +- /* This message cannot be enqueued */ +- if (nb_ops_sent == 0) +- return 0; +- goto kick_tail; +- } else { +- descriptors_built = ret2; +- total_descriptors_built += +- descriptors_built; +- nb_remaining_descriptors -= +- descriptors_built; +- QAT_DP_LOG(DEBUG, +- "Multiple descriptors (%d) built ok", +- descriptors_built); +- } +- } else { +- QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) " +- "exceeds number of available descriptors (%d)", +- nb_desc_to_build, +- nb_remaining_descriptors + +- nb_desc_to_build); +- +- qat_comp_free_split_op_memzones(cookie, +- nb_desc_to_build - 1); +- +- /* Not enough extra descriptors */ +- if (nb_ops_sent == 0) +- return 0; +- goto kick_tail; +- } +- } else { +- descriptors_built = 1; +- total_descriptors_built++; +- nb_remaining_descriptors--; +- QAT_DP_LOG(DEBUG, "Single descriptor built ok"); +- } +- +- tail = adf_modulo(tail + (queue->msg_size * descriptors_built), +- queue->modulo_mask); +- ops++; +- nb_ops_sent++; +- } +- +-kick_tail: +- queue->tail = tail; +- tmp_qp->enqueued += total_descriptors_built; +- tmp_qp->stats.enqueued_count += nb_ops_sent; +- txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue); +- return nb_ops_sent; +-} +- + uint16_t + qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) + { +diff --git a/dpdk/drivers/common/qat/qat_qp.h b/dpdk/drivers/common/qat/qat_qp.h +index deafb407b3..272934bc30 100644 +--- a/dpdk/drivers/common/qat/qat_qp.h ++++ b/dpdk/drivers/common/qat/qat_qp.h +@@ -80,9 +80,6 @@ struct qat_qp_config { + uint16_t + qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +-uint16_t +-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops); +- + uint16_t + qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +@@ -153,6 +150,21 @@ struct qat_qp_hw_spec_funcs { + qat_qp_get_hw_data_t qat_qp_get_hw_data; + }; + +-extern struct qat_qp_hw_spec_funcs *qat_qp_hw_spec[]; ++extern struct qat_qp_hw_spec_funcs* ++ qat_qp_hw_spec[]; ++ ++static inline void ++txq_write_tail(enum qat_device_gen qat_dev_gen, ++ struct qat_qp *qp, struct qat_queue *q) ++{ ++ struct qat_qp_hw_spec_funcs *ops = ++ qat_qp_hw_spec[qat_dev_gen]; ++ ++ /* ++ * Pointer check should be done during ++ * initialization ++ */ ++ ops->qat_qp_csr_write_tail(qp, q); ++} + + #endif /* _QAT_QP_H_ */ +diff --git a/dpdk/drivers/common/sfc_efx/base/ef10_nic.c b/dpdk/drivers/common/sfc_efx/base/ef10_nic.c +index 355d274470..4b0e4c10b4 100644 +--- a/dpdk/drivers/common/sfc_efx/base/ef10_nic.c ++++ b/dpdk/drivers/common/sfc_efx/base/ef10_nic.c +@@ -2185,7 +2185,7 @@ ef10_nic_board_cfg( + /* Alignment for WPTR updates */ + encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; + +- encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); ++ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_TX_KER_BYTE_CNT); + /* No boundary crossing limits */ + encp->enc_tx_dma_desc_boundary = 0; + +diff --git a/dpdk/drivers/common/sfc_efx/base/efx.h b/dpdk/drivers/common/sfc_efx/base/efx.h +index 96769935c0..398eb8dbd2 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx.h ++++ b/dpdk/drivers/common/sfc_efx/base/efx.h +@@ -4535,6 +4535,24 @@ efx_mae_action_set_populate_mark( + __in efx_mae_actions_t *spec, + __in uint32_t mark_value); + ++/* ++ * Whilst efx_mae_action_set_populate_mark() can be used to request setting ++ * a user mark in matching packets and demands that the request come before ++ * setting the final destination (deliver action), this API can be invoked ++ * after deliver action has been added in order to request mark reset if ++ * the user's own mark request has not been added as a result of parsing. ++ * ++ * It is useful when the driver chains an outer rule (OR) with an action ++ * rule (AR) by virtue of a recirculation ID. The OR may set mark from ++ * this ID to help the driver identify packets that hit the OR and do ++ * not hit the AR. But, for packets that do hit the AR, the driver ++ * wants to reset the mark value to avoid confusing recipients. ++ */ ++LIBEFX_API ++extern void ++efx_mae_action_set_populate_mark_reset( ++ __in efx_mae_actions_t *spec); ++ + LIBEFX_API + extern __checkReturn efx_rc_t + efx_mae_action_set_populate_deliver( +@@ -4683,6 +4701,20 @@ efx_mae_action_set_fill_in_counter_id( + __in efx_mae_actions_t *spec, + __in const efx_counter_t *counter_idp); + ++/* ++ * Clears dangling FW object IDs (counter ID, for instance) in ++ * the action set specification. Useful for adapter restarts, ++ * when all MAE objects need to be reallocated by the driver. ++ * ++ * This method only clears the IDs in the specification. ++ * The driver is still responsible for keeping the IDs ++ * separately and freeing them when stopping the port. ++ */ ++LIBEFX_API ++extern void ++efx_mae_action_set_clear_fw_rsrc_ids( ++ __in efx_mae_actions_t *spec); ++ + /* Action set ID */ + typedef struct efx_mae_aset_id_s { + uint32_t id; diff --git a/dpdk/drivers/common/sfc_efx/base/efx_impl.h b/dpdk/drivers/common/sfc_efx/base/efx_impl.h -index e2802e6672..ba00eeeb47 100644 +index e2802e6672..99a6a732f4 100644 --- a/dpdk/drivers/common/sfc_efx/base/efx_impl.h +++ b/dpdk/drivers/common/sfc_efx/base/efx_impl.h @@ -1555,6 +1555,12 @@ efx_mcdi_intf_from_pcie( @@ -13726,8 +27511,19 @@ index e2802e6672..ba00eeeb47 100644 LIBEFX_INTERNAL extern __checkReturn efx_rc_t efx_mcdi_init_evq( +@@ -1793,6 +1799,10 @@ typedef struct efx_mae_action_vlan_push_s { + uint16_t emavp_tci_be; + } efx_mae_action_vlan_push_t; + ++/* ++ * Helper efx_mae_action_set_clear_fw_rsrc_ids() is responsible ++ * to initialise every field in this structure to INVALID value. ++ */ + typedef struct efx_mae_actions_rsrc_s { + efx_mae_mac_id_t emar_dst_mac_id; + efx_mae_mac_id_t emar_src_mac_id; diff --git a/dpdk/drivers/common/sfc_efx/base/efx_mae.c b/dpdk/drivers/common/sfc_efx/base/efx_mae.c -index 7b24e3fee4..31f51b5548 100644 +index 7b24e3fee4..4c33471f28 100644 --- a/dpdk/drivers/common/sfc_efx/base/efx_mae.c +++ b/dpdk/drivers/common/sfc_efx/base/efx_mae.c @@ -1027,6 +1027,10 @@ efx_mae_match_spec_field_set( @@ -13752,7 +27548,38 @@ index 7b24e3fee4..31f51b5548 100644 default: EFSYS_ASSERT(B_FALSE); } -@@ -2242,7 +2250,8 @@ efx_mae_outer_rule_insert( +@@ -1386,10 +1394,7 @@ efx_mae_action_set_spec_init( + goto fail1; + } + +- spec->ema_rsrc.emar_dst_mac_id.id = EFX_MAE_RSRC_ID_INVALID; +- spec->ema_rsrc.emar_src_mac_id.id = EFX_MAE_RSRC_ID_INVALID; +- spec->ema_rsrc.emar_eh_id.id = EFX_MAE_RSRC_ID_INVALID; +- spec->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID; ++ efx_mae_action_set_clear_fw_rsrc_ids(spec); + + /* + * Helpers which populate v2 actions must reject them when v2 is not +@@ -1908,6 +1913,18 @@ efx_mae_action_set_populate_mark( + EFX_MAE_ACTION_MARK, sizeof (mark_value), arg)); + } + ++ void ++efx_mae_action_set_populate_mark_reset( ++ __in efx_mae_actions_t *spec) ++{ ++ uint32_t action_mask = (1U << EFX_MAE_ACTION_MARK); ++ ++ if ((spec->ema_actions & action_mask) == 0) { ++ spec->ema_actions |= action_mask; ++ spec->ema_mark_value = 0; ++ } ++} ++ + __checkReturn efx_rc_t + efx_mae_action_set_populate_deliver( + __in efx_mae_actions_t *spec, +@@ -2242,7 +2259,8 @@ efx_mae_outer_rule_insert( memcpy(payload + offset, spec->emms_mask_value_pairs.outer, MAE_ENC_FIELD_PAIRS_LEN); @@ -13762,6 +27589,23 @@ index 7b24e3fee4..31f51b5548 100644 spec->emms_outer_rule_recirc_id); efx_mcdi_execute(enp, &req); +@@ -3006,6 +3024,16 @@ efx_mae_action_set_fill_in_counter_id( + return (rc); + } + ++ void ++efx_mae_action_set_clear_fw_rsrc_ids( ++ __in efx_mae_actions_t *spec) ++{ ++ spec->ema_rsrc.emar_dst_mac_id.id = EFX_MAE_RSRC_ID_INVALID; ++ spec->ema_rsrc.emar_src_mac_id.id = EFX_MAE_RSRC_ID_INVALID; ++ spec->ema_rsrc.emar_eh_id.id = EFX_MAE_RSRC_ID_INVALID; ++ spec->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID; ++} ++ + __checkReturn efx_rc_t + efx_mae_counters_alloc( + __in efx_nic_t *enp, diff --git a/dpdk/drivers/common/sfc_efx/base/efx_mcdi.c b/dpdk/drivers/common/sfc_efx/base/efx_mcdi.c index 9189a7a8b3..404ca23d58 100644 --- a/dpdk/drivers/common/sfc_efx/base/efx_mcdi.c @@ -13859,6 +27703,58 @@ index 9189a7a8b3..404ca23d58 100644 /* * This function returns the pf and vf number of a function. If it is a pf the * vf number is 0xffff. The vf number is the index of the vf on that +diff --git a/dpdk/drivers/common/sfc_efx/base/efx_rx.c b/dpdk/drivers/common/sfc_efx/base/efx_rx.c +index 7e63363be7..631c2fd063 100644 +--- a/dpdk/drivers/common/sfc_efx/base/efx_rx.c ++++ b/dpdk/drivers/common/sfc_efx/base/efx_rx.c +@@ -896,8 +896,10 @@ efx_rx_qcreate_internal( + + rss_hash_field = + &erplp->erpl_fields[EFX_RX_PREFIX_FIELD_RSS_HASH]; +- if (rss_hash_field->erpfi_width_bits == 0) ++ if (rss_hash_field->erpfi_width_bits == 0) { ++ rc = ENOTSUP; + goto fail5; ++ } + } + + enp->en_rx_qcount++; +diff --git a/dpdk/drivers/common/sfc_efx/base/rhead_virtio.c b/dpdk/drivers/common/sfc_efx/base/rhead_virtio.c +index 335cb747d1..7f087170fe 100644 +--- a/dpdk/drivers/common/sfc_efx/base/rhead_virtio.c ++++ b/dpdk/drivers/common/sfc_efx/base/rhead_virtio.c +@@ -47,14 +47,6 @@ rhead_virtio_qstart( + goto fail2; + } + +- if (evvdp != NULL) { +- if ((evvdp->evvd_vq_cidx > evvcp->evvc_vq_size) || +- (evvdp->evvd_vq_pidx > evvcp->evvc_vq_size)) { +- rc = EINVAL; +- goto fail3; +- } +- } +- + req.emr_cmd = MC_CMD_VIRTIO_INIT_QUEUE; + req.emr_in_buf = payload; + req.emr_in_length = MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN; +@@ -116,15 +108,13 @@ rhead_virtio_qstart( + + if (req.emr_rc != 0) { + rc = req.emr_rc; +- goto fail4; ++ goto fail3; + } + + evvp->evv_vi_index = vi_index; + + return (0); + +-fail4: +- EFSYS_PROBE(fail4); + fail3: + EFSYS_PROBE(fail3); + fail2: diff --git a/dpdk/drivers/common/sfc_efx/efsys.h b/dpdk/drivers/common/sfc_efx/efsys.h index 3860c2835a..224254bee7 100644 --- a/dpdk/drivers/common/sfc_efx/efsys.h @@ -13872,11 +27768,74 @@ index 3860c2835a..224254bee7 100644 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb() /* TIMESTAMP */ +diff --git a/dpdk/drivers/common/sfc_efx/version.map b/dpdk/drivers/common/sfc_efx/version.map +index 97dd943ec4..9387bc6ce8 100644 +--- a/dpdk/drivers/common/sfc_efx/version.map ++++ b/dpdk/drivers/common/sfc_efx/version.map +@@ -89,6 +89,7 @@ INTERNAL { + efx_mae_action_rule_insert; + efx_mae_action_rule_remove; + efx_mae_action_set_alloc; ++ efx_mae_action_set_clear_fw_rsrc_ids; + efx_mae_action_set_fill_in_counter_id; + efx_mae_action_set_fill_in_dst_mac_id; + efx_mae_action_set_fill_in_eh_id; +@@ -103,6 +104,7 @@ INTERNAL { + efx_mae_action_set_populate_encap; + efx_mae_action_set_populate_flag; + efx_mae_action_set_populate_mark; ++ efx_mae_action_set_populate_mark_reset; + efx_mae_action_set_populate_set_dst_mac; + efx_mae_action_set_populate_set_src_mac; + efx_mae_action_set_populate_vlan_pop; +diff --git a/dpdk/drivers/compress/mlx5/meson.build b/dpdk/drivers/compress/mlx5/meson.build +index 7aac329986..fb04835f36 100644 +--- a/dpdk/drivers/compress/mlx5/meson.build ++++ b/dpdk/drivers/compress/mlx5/meson.build +@@ -7,7 +7,6 @@ if not is_linux + subdir_done() + endif + +-fmt_name = 'mlx5_compress' + deps += ['common_mlx5', 'eal', 'compressdev'] + sources = files( + 'mlx5_compress.c', diff --git a/dpdk/drivers/compress/mlx5/mlx5_compress.c b/dpdk/drivers/compress/mlx5/mlx5_compress.c -index 82b871bd86..a18ec8a6cf 100644 +index 82b871bd86..515b5dfa67 100644 --- a/dpdk/drivers/compress/mlx5/mlx5_compress.c +++ b/dpdk/drivers/compress/mlx5/mlx5_compress.c -@@ -563,7 +563,18 @@ mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp, +@@ -96,9 +96,7 @@ static const struct rte_compressdev_capabilities mlx5_caps[] = { + RTE_COMP_FF_HUFFMAN_DYNAMIC, + .window_size = {.min = 10, .max = 15, .increment = 1}, + }, +- { +- .algo = RTE_COMP_ALGO_LIST_END, +- } ++ RTE_COMP_END_OF_CAPABILITIES_LIST() + }; + + static void +@@ -246,8 +244,8 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, + mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format); + qp_attr.num_of_receive_wqes = 0; + qp_attr.num_of_send_wqbbs = RTE_BIT32(log_ops_n); +- qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp +- && priv->mmo_dma_qp; ++ qp_attr.mmo = priv->mmo_decomp_qp || priv->mmo_comp_qp || ++ priv->mmo_dma_qp; + ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp, + qp_attr.num_of_send_wqbbs * + MLX5_WQE_SIZE, &qp_attr, socket_id); +@@ -316,7 +314,7 @@ mlx5_compress_xform_create(struct rte_compressdev *dev, + DRV_LOG(ERR, "Not enough capabilities to support decompress operation, maybe old FW/OFED version?"); + return -ENOTSUP; + } +- if (xform->compress.hash_algo != RTE_COMP_HASH_ALGO_NONE) { ++ if (xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE) { + DRV_LOG(ERR, "SHA is not supported."); + return -ENOTSUP; + } +@@ -563,7 +561,18 @@ mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp, qp->qp.wqes; volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr; @@ -13896,6 +27855,15 @@ index 82b871bd86..a18ec8a6cf 100644 op->consumed = 0; op->produced = 0; op->output_chksum = 0; +@@ -625,7 +634,7 @@ mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops, + break; + case RTE_COMP_CHECKSUM_ADLER32: + op->output_chksum = (uint64_t)rte_be_to_cpu_32 +- (opaq[idx].adler32) << 32; ++ (opaq[idx].adler32); + break; + case RTE_COMP_CHECKSUM_CRC32_ADLER32: + op->output_chksum = (uint64_t)rte_be_to_cpu_32 diff --git a/dpdk/drivers/compress/octeontx/include/zip_regs.h b/dpdk/drivers/compress/octeontx/include/zip_regs.h index 96e538bb75..94a48cde66 100644 --- a/dpdk/drivers/compress/octeontx/include/zip_regs.h @@ -13956,6 +27924,235 @@ index dd62285b86..1b6178f661 100644 qp->name = name; +diff --git a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c +index 12d9d89072..3a8484eef1 100644 +--- a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c ++++ b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen1.c +@@ -26,7 +26,7 @@ const struct rte_compressdev_capabilities qat_gen1_comp_capabilities[] = { + RTE_COMP_FF_OOP_LB_IN_SGL_OUT | + RTE_COMP_FF_STATEFUL_DECOMPRESSION, + .window_size = {.min = 15, .max = 15, .increment = 0} }, +- {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; ++ RTE_COMP_END_OF_CAPABILITIES_LIST() }; + + static int + qat_comp_dev_config_gen1(struct rte_compressdev *dev, +diff --git a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c +index 79b2ceb414..05906f13e0 100644 +--- a/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c ++++ b/dpdk/drivers/compress/qat/dev/qat_comp_pmd_gen4.c +@@ -25,7 +25,7 @@ qat_gen4_comp_capabilities[] = { + RTE_COMP_FF_OOP_SGL_IN_LB_OUT | + RTE_COMP_FF_OOP_LB_IN_SGL_OUT, + .window_size = {.min = 15, .max = 15, .increment = 0} }, +- {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; ++ RTE_COMP_END_OF_CAPABILITIES_LIST() }; + + static int + qat_comp_dev_config_gen4(struct rte_compressdev *dev, +diff --git a/dpdk/drivers/compress/qat/qat_comp.c b/dpdk/drivers/compress/qat/qat_comp.c +index e8f57c3cc4..1282ffc2ab 100644 +--- a/dpdk/drivers/compress/qat/qat_comp.c ++++ b/dpdk/drivers/compress/qat/qat_comp.c +@@ -1144,3 +1144,185 @@ qat_comp_stream_free(struct rte_compressdev *dev, void *stream) + } + return -EINVAL; + } ++ ++/** ++ * Enqueue packets for processing on queue pair of a device ++ * ++ * @param qp ++ * qat queue pair ++ * @param ops ++ * Compressdev operation ++ * @param nb_ops ++ * number of operations ++ * @return ++ * - nb_ops_sent if successful ++ */ ++uint16_t ++qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops) ++{ ++ register struct qat_queue *queue; ++ struct qat_qp *tmp_qp = (struct qat_qp *)qp; ++ register uint32_t nb_ops_sent = 0; ++ register int nb_desc_to_build; ++ uint16_t nb_ops_possible = nb_ops; ++ register uint8_t *base_addr; ++ register uint32_t tail; ++ ++ int descriptors_built, total_descriptors_built = 0; ++ int nb_remaining_descriptors; ++ int overflow = 0; ++ ++ if (unlikely(nb_ops == 0)) ++ return 0; ++ ++ /* read params used a lot in main loop into registers */ ++ queue = &(tmp_qp->tx_q); ++ base_addr = (uint8_t *)queue->base_addr; ++ tail = queue->tail; ++ ++ /* Find how many can actually fit on the ring */ ++ { ++ /* dequeued can only be written by one thread, but it may not ++ * be this thread. As it's 4-byte aligned it will be read ++ * atomically here by any Intel CPU. ++ * enqueued can wrap before dequeued, but cannot ++ * lap it as var size of enq/deq (uint32_t) > var size of ++ * max_inflights (uint16_t). In reality inflights is never ++ * even as big as max uint16_t, as it's <= ADF_MAX_DESC. ++ * On wrapping, the calculation still returns the correct ++ * positive value as all three vars are unsigned. ++ */ ++ uint32_t inflights = ++ tmp_qp->enqueued - tmp_qp->dequeued; ++ ++ /* Find how many can actually fit on the ring */ ++ overflow = (inflights + nb_ops) - tmp_qp->max_inflights; ++ if (overflow > 0) { ++ nb_ops_possible = nb_ops - overflow; ++ if (nb_ops_possible == 0) ++ return 0; ++ } ++ ++ /* QAT has plenty of work queued already, so don't waste cycles ++ * enqueueing, wait til the application has gathered a bigger ++ * burst or some completed ops have been dequeued ++ */ ++ if (tmp_qp->min_enq_burst_threshold && inflights > ++ QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible < ++ tmp_qp->min_enq_burst_threshold) { ++ tmp_qp->stats.threshold_hit_count++; ++ return 0; ++ } ++ } ++ ++ /* At this point nb_ops_possible is assuming a 1:1 mapping ++ * between ops and descriptors. ++ * Fewer may be sent if some ops have to be split. ++ * nb_ops_possible is <= burst size. ++ * Find out how many spaces are actually available on the qp in case ++ * more are needed. ++ */ ++ nb_remaining_descriptors = nb_ops_possible ++ + ((overflow >= 0) ? 0 : overflow * (-1)); ++ QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d", ++ nb_ops, nb_remaining_descriptors); ++ ++ while (nb_ops_sent != nb_ops_possible && ++ nb_remaining_descriptors > 0) { ++ struct qat_comp_op_cookie *cookie = ++ tmp_qp->op_cookies[tail >> queue->trailz]; ++ ++ descriptors_built = 0; ++ ++ QAT_DP_LOG(DEBUG, "--- data length: %u", ++ ((struct rte_comp_op *)*ops)->src.length); ++ ++ nb_desc_to_build = qat_comp_build_request(*ops, ++ base_addr + tail, cookie, tmp_qp->qat_dev_gen); ++ QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, " ++ "%d ops sent, %d descriptors needed", ++ total_descriptors_built, nb_remaining_descriptors, ++ nb_ops_sent, nb_desc_to_build); ++ ++ if (unlikely(nb_desc_to_build < 0)) { ++ /* this message cannot be enqueued */ ++ tmp_qp->stats.enqueue_err_count++; ++ if (nb_ops_sent == 0) ++ return 0; ++ goto kick_tail; ++ } else if (unlikely(nb_desc_to_build > 1)) { ++ /* this op is too big and must be split - get more ++ * descriptors and retry ++ */ ++ ++ QAT_DP_LOG(DEBUG, "Build %d descriptors for this op", ++ nb_desc_to_build); ++ ++ nb_remaining_descriptors -= nb_desc_to_build; ++ if (nb_remaining_descriptors >= 0) { ++ /* There are enough remaining descriptors ++ * so retry ++ */ ++ int ret2 = qat_comp_build_multiple_requests( ++ *ops, tmp_qp, tail, ++ nb_desc_to_build); ++ ++ if (unlikely(ret2 < 1)) { ++ QAT_DP_LOG(DEBUG, ++ "Failed to build (%d) descriptors, status %d", ++ nb_desc_to_build, ret2); ++ ++ qat_comp_free_split_op_memzones(cookie, ++ nb_desc_to_build - 1); ++ ++ tmp_qp->stats.enqueue_err_count++; ++ ++ /* This message cannot be enqueued */ ++ if (nb_ops_sent == 0) ++ return 0; ++ goto kick_tail; ++ } else { ++ descriptors_built = ret2; ++ total_descriptors_built += ++ descriptors_built; ++ nb_remaining_descriptors -= ++ descriptors_built; ++ QAT_DP_LOG(DEBUG, ++ "Multiple descriptors (%d) built ok", ++ descriptors_built); ++ } ++ } else { ++ QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) " ++ "exceeds number of available descriptors (%d)", ++ nb_desc_to_build, ++ nb_remaining_descriptors + ++ nb_desc_to_build); ++ ++ qat_comp_free_split_op_memzones(cookie, ++ nb_desc_to_build - 1); ++ ++ /* Not enough extra descriptors */ ++ if (nb_ops_sent == 0) ++ return 0; ++ goto kick_tail; ++ } ++ } else { ++ descriptors_built = 1; ++ total_descriptors_built++; ++ nb_remaining_descriptors--; ++ QAT_DP_LOG(DEBUG, "Single descriptor built ok"); ++ } ++ ++ tail = adf_modulo(tail + (queue->msg_size * descriptors_built), ++ queue->modulo_mask); ++ ops++; ++ nb_ops_sent++; ++ } ++ ++kick_tail: ++ queue->tail = tail; ++ tmp_qp->enqueued += total_descriptors_built; ++ tmp_qp->stats.enqueued_count += nb_ops_sent; ++ txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue); ++ return nb_ops_sent; ++} +diff --git a/dpdk/drivers/compress/qat/qat_comp.h b/dpdk/drivers/compress/qat/qat_comp.h +index da7b9a6eec..dc220cd6e3 100644 +--- a/dpdk/drivers/compress/qat/qat_comp.h ++++ b/dpdk/drivers/compress/qat/qat_comp.h +@@ -141,5 +141,8 @@ qat_comp_stream_create(struct rte_compressdev *dev, + int + qat_comp_stream_free(struct rte_compressdev *dev, void *stream); + ++uint16_t ++qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops); ++ + #endif + #endif diff --git a/dpdk/drivers/compress/qat/qat_comp_pmd.c b/dpdk/drivers/compress/qat/qat_comp_pmd.c index 9b24d46e97..da6404c017 100644 --- a/dpdk/drivers/compress/qat/qat_comp_pmd.c @@ -14098,7 +28295,7 @@ index e4ee102344..583ba3b523 100644 * @retval -1 value for error * diff --git a/dpdk/drivers/crypto/ccp/ccp_crypto.c b/dpdk/drivers/crypto/ccp/ccp_crypto.c -index 70daed791e..4bab18323b 100644 +index 70daed791e..351d8ac63e 100644 --- a/dpdk/drivers/crypto/ccp/ccp_crypto.c +++ b/dpdk/drivers/crypto/ccp/ccp_crypto.c @@ -2,6 +2,8 @@ @@ -14110,7 +28307,88 @@ index 70daed791e..4bab18323b 100644 #include <dirent.h> #include <fcntl.h> #include <stdio.h> -@@ -1299,7 +1301,7 @@ ccp_auth_slot(struct ccp_session *session) +@@ -31,8 +33,6 @@ + #include <openssl/err.h> + #include <openssl/hmac.h> + +-extern int iommu_mode; +-void *sha_ctx; + /* SHA initial context values */ + uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA1_H4, SHA1_H3, +@@ -746,13 +746,8 @@ ccp_configure_session_cipher(struct ccp_session *sess, + CCP_LOG_ERR("Invalid CCP Engine"); + return -ENOTSUP; + } +- if (iommu_mode == 2) { +- sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); +- } else { +- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); +- } ++ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); ++ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); + return 0; + } + +@@ -791,7 +786,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha1_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA1_HMAC: + if (sess->auth_opt) { +@@ -830,7 +824,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha224_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + if (sess->auth_opt) { +@@ -893,7 +886,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha256_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + if (sess->auth_opt) { +@@ -956,7 +948,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha384_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + if (sess->auth_opt) { +@@ -1021,7 +1012,6 @@ ccp_configure_session_auth(struct ccp_session *sess, + sess->auth.ctx = (void *)ccp_sha512_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE; +- rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE); + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + if (sess->auth_opt) { +@@ -1171,13 +1161,8 @@ ccp_configure_session_aead(struct ccp_session *sess, + CCP_LOG_ERR("Unsupported aead algo"); + return -ENOTSUP; + } +- if (iommu_mode == 2) { +- sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); +- } else { +- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); +- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); +- } ++ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce); ++ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp); + return 0; + } + +@@ -1299,7 +1284,7 @@ ccp_auth_slot(struct ccp_session *session) case CCP_AUTH_ALGO_SHA512_HMAC: /** * 1. Load PHash1 = H(k ^ ipad); to LSB @@ -14119,6 +28397,156 @@ index 70daed791e..4bab18323b 100644 * as init values); * 3. Retrieve IHash 2 slots for 384/512 * 4. Load Phash2 = H(k ^ opad); to LSB +@@ -1592,14 +1577,8 @@ ccp_perform_hmac(struct rte_crypto_op *op, + op->sym->auth.data.offset); + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); +- } +- dest_addr_t = dest_addr; ++ dest_addr_t = dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); + + /** Load PHash1 to LSB*/ + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); +@@ -1681,10 +1660,7 @@ ccp_perform_hmac(struct rte_crypto_op *op, + + /** Load PHash2 to LSB*/ + addr += session->auth.ctx_len; +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; +@@ -1772,14 +1748,8 @@ ccp_perform_sha(struct rte_crypto_op *op, + op->sym->auth.data.offset); + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); +- pst.src_addr = (phys_addr_t)sha_ctx; +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) +- session->auth.ctx); +- } ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)session->auth.ctx); ++ dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr); + + /** Passthru sha context*/ + +@@ -1869,15 +1839,8 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op, + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2iova( +- session->auth.pre_compute); +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2phy( +- session->auth.pre_compute); +- } ++ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); ++ ctx_paddr = (phys_addr_t)rte_mem_virt2iova(session->auth.pre_compute); + dest_addr_t = dest_addr + (session->auth.ctx_len / 2); + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); +@@ -2015,13 +1978,8 @@ ccp_perform_sha3(struct rte_crypto_op *op, + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } +- if (iommu_mode == 2) { +- dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); +- } else { +- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); +- ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); +- } ++ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr); ++ ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); + + ctx_addr = session->auth.sha3_ctx; + +@@ -2097,13 +2055,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op, + + ctx_addr = session->auth.pre_compute; + memset(ctx_addr, 0, AES_BLOCK_SIZE); +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova( +- (void *)ctx_addr); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy( +- (void *)ctx_addr); +- ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; +@@ -2141,12 +2093,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op, + } else { + ctx_addr = session->auth.pre_compute + CCP_SB_BYTES; + memset(ctx_addr, 0, AES_BLOCK_SIZE); +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova( +- (void *)ctx_addr); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy( +- (void *)ctx_addr); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; +@@ -2340,12 +2287,7 @@ ccp_perform_3des(struct rte_crypto_op *op, + + rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length), + iv, session->iv.length); +- if (iommu_mode == 2) +- pst.src_addr = (phys_addr_t)rte_mem_virt2iova( +- (void *) lsb_buf); +- else +- pst.src_addr = (phys_addr_t)rte_mem_virt2phy( +- (void *) lsb_buf); ++ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *) lsb_buf); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; +@@ -2368,11 +2310,7 @@ ccp_perform_3des(struct rte_crypto_op *op, + else + dest_addr = src_addr; + +- if (iommu_mode == 2) +- key_addr = rte_mem_virt2iova(session->cipher.key_ccp); +- else +- key_addr = rte_mem_virt2phy(session->cipher.key_ccp); +- ++ key_addr = rte_mem_virt2iova(session->cipher.key_ccp); + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + memset(desc, 0, Q_DESC_SIZE); +@@ -2766,12 +2704,7 @@ process_ops_to_enqueue(struct ccp_qp *qp, + b_info->lsb_buf_idx = 0; + b_info->desccnt = 0; + b_info->cmd_q = cmd_q; +- if (iommu_mode == 2) +- b_info->lsb_buf_phys = +- (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf); +- else +- b_info->lsb_buf_phys = +- (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf); ++ b_info->lsb_buf_phys = (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf); + + rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req); + diff --git a/dpdk/drivers/crypto/ccp/ccp_crypto.h b/dpdk/drivers/crypto/ccp/ccp_crypto.h index 8e6d03efc8..d307f73ee4 100644 --- a/dpdk/drivers/crypto/ccp/ccp_crypto.h @@ -14132,6 +28560,72 @@ index 8e6d03efc8..d307f73ee4 100644 #define SHA1_H0 0x67452301UL #define SHA1_H1 0xefcdab89UL +diff --git a/dpdk/drivers/crypto/ccp/ccp_dev.c b/dpdk/drivers/crypto/ccp/ccp_dev.c +index 0eb1b0328e..54c568afd1 100644 +--- a/dpdk/drivers/crypto/ccp/ccp_dev.c ++++ b/dpdk/drivers/crypto/ccp/ccp_dev.c +@@ -23,7 +23,6 @@ + #include "ccp_pci.h" + #include "ccp_pmd_private.h" + +-int iommu_mode; + struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list); + static int ccp_dev_id; + +@@ -362,7 +361,7 @@ ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + weight++; + +- printf("Queue %d can access %d LSB regions of mask %lu\n", ++ CCP_LOG_DBG("Queue %d can access %d LSB regions of mask %lu\n", + (int)cmd_q->id, weight, cmd_q->lsbmask); + + return weight ? 0 : -EINVAL; +@@ -652,8 +651,7 @@ is_ccp_device(const char *dirname, + static int + ccp_probe_device(int ccp_type, struct rte_pci_device *pci_dev) + { +- struct ccp_device *ccp_dev = NULL; +- int uio_fd = -1; ++ struct ccp_device *ccp_dev; + + ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev), + RTE_CACHE_LINE_SIZE); +@@ -671,8 +669,6 @@ ccp_probe_device(int ccp_type, struct rte_pci_device *pci_dev) + return 0; + fail: + CCP_LOG_ERR("CCP Device probe failed"); +- if (uio_fd >= 0) +- close(uio_fd); + if (ccp_dev) + rte_free(ccp_dev); + return -1; +@@ -687,16 +683,10 @@ ccp_probe_devices(struct rte_pci_device *pci_dev, + struct dirent *d; + DIR *dir; + int ret = 0; +- int module_idx = 0; + uint16_t domain; + uint8_t bus, devid, function; + char dirname[PATH_MAX]; + +- module_idx = ccp_check_pci_uio_module(); +- if (module_idx < 0) +- return -1; +- +- iommu_mode = module_idx; + TAILQ_INIT(&ccp_list); + dir = opendir(SYSFS_PCI_DEVICES); + if (dir == NULL) +@@ -710,7 +700,7 @@ ccp_probe_devices(struct rte_pci_device *pci_dev, + snprintf(dirname, sizeof(dirname), "%s/%s", + SYSFS_PCI_DEVICES, d->d_name); + if (is_ccp_device(dirname, ccp_id, &ccp_type)) { +- printf("CCP : Detected CCP device with ID = 0x%x\n", ++ CCP_LOG_DBG("CCP : Detected CCP device with ID = 0x%x\n", + ccp_id[ccp_type].device_id); + ret = ccp_probe_device(ccp_type, pci_dev); + if (ret == 0) diff --git a/dpdk/drivers/crypto/ccp/ccp_dev.h b/dpdk/drivers/crypto/ccp/ccp_dev.h index 85c8fc47a2..2a205cd446 100644 --- a/dpdk/drivers/crypto/ccp/ccp_dev.h @@ -14145,6 +28639,101 @@ index 85c8fc47a2..2a205cd446 100644 #define MAX_HW_QUEUES 5 #define CCP_MAX_TRNG_RETRIES 10 #define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y) +diff --git a/dpdk/drivers/crypto/ccp/ccp_pci.c b/dpdk/drivers/crypto/ccp/ccp_pci.c +index 38029a9081..bd1a037f76 100644 +--- a/dpdk/drivers/crypto/ccp/ccp_pci.c ++++ b/dpdk/drivers/crypto/ccp/ccp_pci.c +@@ -12,39 +12,6 @@ + + #include "ccp_pci.h" + +-static const char * const uio_module_names[] = { +- "igb_uio", +- "uio_pci_generic", +- "vfio_pci" +-}; +- +-int +-ccp_check_pci_uio_module(void) +-{ +- FILE *fp; +- int i; +- char buf[BUFSIZ]; +- +- fp = fopen(PROC_MODULES, "r"); +- if (fp == NULL) +- return -1; +- i = 0; +- while (uio_module_names[i] != NULL) { +- while (fgets(buf, sizeof(buf), fp) != NULL) { +- if (!strncmp(buf, uio_module_names[i], +- strlen(uio_module_names[i]))) { +- fclose(fp); +- return i; +- } +- } +- i++; +- rewind(fp); +- } +- fclose(fp); +- printf("Insert igb_uio or uio_pci_generic kernel module(s)"); +- return -1;/* uio not inserted */ +-} +- + /* + * split up a pci address into its constituent parts. + */ +diff --git a/dpdk/drivers/crypto/ccp/ccp_pci.h b/dpdk/drivers/crypto/ccp/ccp_pci.h +index 7ed3bac406..f393a04d6f 100644 +--- a/dpdk/drivers/crypto/ccp/ccp_pci.h ++++ b/dpdk/drivers/crypto/ccp/ccp_pci.h +@@ -10,9 +10,6 @@ + #include <rte_bus_pci.h> + + #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" +-#define PROC_MODULES "/proc/modules" +- +-int ccp_check_pci_uio_module(void); + + int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function); +diff --git a/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c b/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c +index a35a8cd775..0d84c8cd0e 100644 +--- a/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c ++++ b/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c +@@ -22,7 +22,6 @@ + static unsigned int ccp_pmd_init_done; + uint8_t ccp_cryptodev_driver_id; + uint8_t cryptodev_cnt; +-extern void *sha_ctx; + + struct ccp_pmd_init_params { + struct rte_cryptodev_pmd_init_params def_p; +@@ -213,7 +212,6 @@ cryptodev_ccp_remove(struct rte_pci_device *pci_dev) + return -ENODEV; + + ccp_pmd_init_done = 0; +- rte_free(sha_ctx); + + RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", + name, rte_socket_id()); +@@ -250,7 +248,7 @@ cryptodev_ccp_create(const char *name, + goto init_error; + } + +- printf("CCP : Crypto device count = %d\n", cryptodev_cnt); ++ CCP_LOG_DBG("CCP : Crypto device count = %d\n", cryptodev_cnt); + dev->device = &pci_dev->device; + dev->device->driver = &pci_drv->driver; + dev->driver_id = ccp_cryptodev_driver_id; +@@ -300,7 +298,6 @@ cryptodev_ccp_probe(struct rte_pci_driver *pci_drv __rte_unused, + .auth_opt = CCP_PMD_AUTH_OPT_CCP, + }; + +- sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64); + if (ccp_pmd_init_done) { + RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); + return -EFAULT; diff --git a/dpdk/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/dpdk/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h index 2dc8913feb..2b0261e057 100644 --- a/dpdk/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h @@ -14671,7 +29260,7 @@ index 522685f8cf..29f4e6d40b 100644 if (is_encode(ses)) { diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c -index 189262c4ad..58ea4ee476 100644 +index 189262c4ad..32d09cd5e3 100644 --- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c @@ -221,8 +221,11 @@ ipsec_mb_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, @@ -14698,6 +29287,49 @@ index 189262c4ad..58ea4ee476 100644 return ret; } +@@ -317,15 +319,22 @@ ipsec_mb_sym_session_configure( + struct ipsec_mb_dev_private *internals = dev->data->dev_private; + struct ipsec_mb_internals *pmd_data = + &ipsec_mb_pmds[internals->pmd_type]; +- IMB_MGR *mb_mgr = alloc_init_mb_mgr(); ++ struct ipsec_mb_qp *qp = dev->data->queue_pairs[0]; ++ IMB_MGR *mb_mgr; + int ret = 0; + ++ if (qp != NULL) ++ mb_mgr = qp->mb_mgr; ++ else ++ mb_mgr = alloc_init_mb_mgr(); ++ + if (!mb_mgr) + return -ENOMEM; + + if (unlikely(sess == NULL)) { + IPSEC_MB_LOG(ERR, "invalid session struct"); +- free_mb_mgr(mb_mgr); ++ if (qp == NULL) ++ free_mb_mgr(mb_mgr); + return -EINVAL; + } + +@@ -341,13 +350,15 @@ ipsec_mb_sym_session_configure( + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); +- free_mb_mgr(mb_mgr); ++ if (qp == NULL) ++ free_mb_mgr(mb_mgr); + return ret; + } + + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); + +- free_mb_mgr(mb_mgr); ++ if (qp == NULL) ++ free_mb_mgr(mb_mgr); + return 0; + } + diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h index 866722d6f4..e53101acf1 100644 --- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h @@ -14718,6 +29350,22 @@ index 866722d6f4..e53101acf1 100644 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { if (next == NULL) { if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { +diff --git a/dpdk/drivers/crypto/ipsec_mb/meson.build b/dpdk/drivers/crypto/ipsec_mb/meson.build +index a89b29d6c3..e6448532bd 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/meson.build ++++ b/dpdk/drivers/crypto/ipsec_mb/meson.build +@@ -12,6 +12,11 @@ lib = cc.find_library('IPSec_MB', required: false) + if not lib.found() + build = false + reason = 'missing dependency, "libIPSec_MB"' ++# if the lib is found, check it's the right format ++elif meson.version().version_compare('>=0.60') and not cc.links( ++ 'int main(void) {return 0;}', dependencies: lib) ++ build = false ++ reason = 'incompatible dependency, "libIPSec_MB"' + else + ext_deps += lib + diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c index 2c203795ab..2c033c6f28 100644 --- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_gcm.c @@ -15015,26 +29663,60 @@ index d37cc787a0..d177961ea5 100644 snow3g_key_schedule_t pKeySched_snow3g_auth; /* *< SNOW3G scheduled authentication key */ diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c -index ebc9a0b562..9a85f46721 100644 +index ebc9a0b562..f365f7bc38 100644 --- a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c +++ b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c -@@ -422,12 +422,13 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, +@@ -376,9 +376,10 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, + /** Process a crypto op with length/offset in bits. */ + static int + process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, +- struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops) ++ struct ipsec_mb_qp *qp) + { +- uint32_t enqueued_op, processed_op; ++ unsigned int processed_op; ++ int ret; + + switch (session->op) { + case IPSEC_MB_OP_ENCRYPT_ONLY: +@@ -422,12 +423,14 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, op->sym->session = NULL; } - enqueued_op = rte_ring_enqueue_burst(qp->ingress_queue, - (void **)&op, processed_op, NULL); +- qp->stats.enqueued_count += enqueued_op; +- *accumulated_enqueued_ops += enqueued_op; + if (unlikely(processed_op != 1)) + return 0; -+ enqueued_op = rte_ring_enqueue(qp->ingress_queue, op); - qp->stats.enqueued_count += enqueued_op; - *accumulated_enqueued_ops += enqueued_op; ++ ++ ret = rte_ring_enqueue(qp->ingress_queue, op); ++ if (ret != 0) ++ return ret; - return enqueued_op; + return 1; } static uint16_t +@@ -441,7 +444,6 @@ snow3g_pmd_dequeue_burst(void *queue_pair, + struct snow3g_session *prev_sess = NULL, *curr_sess = NULL; + uint32_t i; + uint8_t burst_size = 0; +- uint16_t enqueued_ops = 0; + uint8_t processed_ops; + uint32_t nb_dequeued; + +@@ -481,8 +483,7 @@ snow3g_pmd_dequeue_burst(void *queue_pair, + prev_sess = NULL; + } + +- processed_ops = process_op_bit(curr_c_op, curr_sess, +- qp, &enqueued_ops); ++ processed_ops = process_op_bit(curr_c_op, curr_sess, qp); + if (processed_ops != 1) + break; + diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c b/dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c index 2eae1d1ec7..e36c7092d6 100644 --- a/dpdk/drivers/crypto/ipsec_mb/pmd_zuc.c @@ -15151,6 +29833,18 @@ index 46d5bfae37..76fd6758c2 100644 /* *< Buffers used to store the digest generated * by the driver when verifying a digest provided * by the user (using authentication verify operation) +diff --git a/dpdk/drivers/crypto/mlx5/meson.build b/dpdk/drivers/crypto/mlx5/meson.build +index 9d9c9c00bc..b09fc1a9d5 100644 +--- a/dpdk/drivers/crypto/mlx5/meson.build ++++ b/dpdk/drivers/crypto/mlx5/meson.build +@@ -7,7 +7,6 @@ if not (is_linux or is_windows) + subdir_done() + endif + +-fmt_name = 'mlx5_crypto' + deps += ['common_mlx5', 'eal', 'cryptodev'] + sources = files( + 'mlx5_crypto.c', diff --git a/dpdk/drivers/crypto/mlx5/mlx5_crypto.c b/dpdk/drivers/crypto/mlx5/mlx5_crypto.c index 421c23748a..36db31aae5 100644 --- a/dpdk/drivers/crypto/mlx5/mlx5_crypto.c @@ -15163,6 +29857,66 @@ index 421c23748a..36db31aae5 100644 mlx5_devx_uar_release(&priv->uar); rte_cryptodev_pmd_destroy(priv->crypto_dev); return -1; +diff --git a/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c b/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c +index 9edb0cc00f..d7e8ff7db4 100644 +--- a/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c ++++ b/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c +@@ -10,8 +10,11 @@ + #include "nitrox_sym_reqmgr.h" + #include "nitrox_logs.h" + +-#define MAX_SGBUF_CNT 16 +-#define MAX_SGCOMP_CNT 5 ++#define MAX_SUPPORTED_MBUF_SEGS 16 ++/* IV + AAD + ORH + CC + DIGEST */ ++#define ADDITIONAL_SGBUF_CNT 5 ++#define MAX_SGBUF_CNT (MAX_SUPPORTED_MBUF_SEGS + ADDITIONAL_SGBUF_CNT) ++#define MAX_SGCOMP_CNT (RTE_ALIGN_MUL_CEIL(MAX_SGBUF_CNT, 4) / 4) + /* SLC_STORE_INFO */ + #define MIN_UDD_LEN 16 + /* PKT_IN_HDR + SLC_STORE_INFO */ +@@ -303,7 +306,7 @@ create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf, + datalen -= mlen; + } + +- RTE_VERIFY(cnt <= MAX_SGBUF_CNT); ++ RTE_ASSERT(cnt <= MAX_SGBUF_CNT); + sgtbl->map_bufs_cnt = cnt; + return 0; + } +@@ -375,7 +378,7 @@ create_cipher_outbuf(struct nitrox_softreq *sr) + sr->out.sglist[cnt].virt = &sr->resp.completion; + cnt++; + +- RTE_VERIFY(cnt <= MAX_SGBUF_CNT); ++ RTE_ASSERT(cnt <= MAX_SGBUF_CNT); + sr->out.map_bufs_cnt = cnt; + + create_sgcomp(&sr->out); +@@ -600,7 +603,7 @@ create_aead_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest) + resp.completion); + sr->out.sglist[cnt].virt = &sr->resp.completion; + cnt++; +- RTE_VERIFY(cnt <= MAX_SGBUF_CNT); ++ RTE_ASSERT(cnt <= MAX_SGBUF_CNT); + sr->out.map_bufs_cnt = cnt; + + create_sgcomp(&sr->out); +@@ -774,6 +777,14 @@ nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op, + { + int err; + ++ if (unlikely(op->sym->m_src->nb_segs > MAX_SUPPORTED_MBUF_SEGS || ++ (op->sym->m_dst && ++ op->sym->m_dst->nb_segs > MAX_SUPPORTED_MBUF_SEGS))) { ++ NITROX_LOG(ERR, "Mbuf segments not supported. " ++ "Max supported %d\n", MAX_SUPPORTED_MBUF_SEGS); ++ return -ENOTSUP; ++ } ++ + softreq_init(sr, sr->iova); + sr->ctx = ctx; + sr->op = op; diff --git a/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c b/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c index 20b288334a..27604459e4 100644 --- a/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.c @@ -15203,7 +29957,7 @@ index 9e8fd495cf..f7ca8a8a8e 100644 return NULL; } diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -index 5794ed8159..5977bc746c 100644 +index 5794ed8159..514e93229f 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c @@ -2,6 +2,8 @@ @@ -15215,6 +29969,56 @@ index 5794ed8159..5977bc746c 100644 #include <rte_common.h> #include <rte_hexdump.h> #include <rte_cryptodev.h> +@@ -1059,8 +1061,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, + int srclen, uint8_t *aad, int aadlen, uint8_t *iv, + uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) + { +- int len = 0, unused = 0; ++ int len = 0; ++#if OPENSSL_VERSION_NUMBER < 0x10100000L ++ int unused = 0; + uint8_t empty[] = {}; ++#endif + + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) + goto process_auth_encryption_gcm_err; +@@ -1074,9 +1079,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, + srclen, ctx, 0)) + goto process_auth_encryption_gcm_err; + ++#if OPENSSL_VERSION_NUMBER < 0x10100000L + /* Workaround open ssl bug in version less then 1.0.1f */ + if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0) + goto process_auth_encryption_gcm_err; ++#endif + + if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0) + goto process_auth_encryption_gcm_err; +@@ -1138,8 +1145,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, + int srclen, uint8_t *aad, int aadlen, uint8_t *iv, + uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) + { +- int len = 0, unused = 0; ++ int len = 0; ++#if OPENSSL_VERSION_NUMBER < 0x10100000L ++ int unused = 0; + uint8_t empty[] = {}; ++#endif + + if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0) + goto process_auth_decryption_gcm_err; +@@ -1156,9 +1166,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, + srclen, ctx, 0)) + goto process_auth_decryption_gcm_err; + ++#if OPENSSL_VERSION_NUMBER < 0x10100000L + /* Workaround open ssl bug in version less then 1.0.1f */ + if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0) + goto process_auth_decryption_gcm_err; ++#endif + + if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0) + return -EFAULT; diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c index 52715f86f8..35c4ad13ba 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -15260,7 +30064,7 @@ index f893508030..bd0bf5f0cb 100644 qat_req->input_param_count = QAT_ASYM_RSA_QT_NUM_IN_PARAMS; diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c -index 93b257522b..0dd83ee2ee 100644 +index 93b257522b..ff3d1ec484 100644 --- a/dpdk/drivers/crypto/qat/qat_sym.c +++ b/dpdk/drivers/crypto/qat/qat_sym.c @@ -2,6 +2,8 @@ @@ -15272,6 +30076,19 @@ index 93b257522b..0dd83ee2ee 100644 #include <openssl/evp.h> #include <rte_mempool.h> +@@ -392,9 +394,9 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, + + } + min_ofs = auth_ofs; +- +- if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL || +- ctx->auth_op == ICP_QAT_HW_AUTH_VERIFY) ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) ++ auth_param->auth_res_addr = cookie->digest_null_phys_addr; ++ else + auth_param->auth_res_addr = + op->sym->auth.digest.phys_addr; + @@ -419,7 +421,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) { @@ -15281,11 +30098,165 @@ index 93b257522b..0dd83ee2ee 100644 */ uint8_t *aad_data = op->sym->aead.aad.data; /* This is true AAD length, it not includes 18 bytes of +diff --git a/dpdk/drivers/crypto/qat/qat_sym.h b/dpdk/drivers/crypto/qat/qat_sym.h +index e3ec7f0de4..91a4c38c37 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym.h ++++ b/dpdk/drivers/crypto/qat/qat_sym.h +@@ -52,6 +52,8 @@ struct qat_sym_op_cookie { + phys_addr_t cd_phys_addr; + } spc_gmac; + } opt; ++ uint8_t digest_null[4]; ++ phys_addr_t digest_null_phys_addr; + }; + + int diff --git a/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c b/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c -index 12825e448b..792ad2b213 100644 +index 12825e448b..8b505a87e0 100644 --- a/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c +++ b/dpdk/drivers/crypto/qat/qat_sym_hw_dp.c -@@ -533,8 +533,20 @@ enqueue_one_aead_job(struct qat_sym_session *ctx, +@@ -251,13 +251,17 @@ qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx, + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; ++ struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = digest; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); ++ cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); +@@ -266,7 +270,11 @@ qat_sym_dp_enqueue_single_auth(void *qp_data, uint8_t *drv_ctx, + return -1; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; + +- enqueue_one_auth_job(ctx, req, digest, auth_iv, ofs, ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } ++ enqueue_one_auth_job(ctx, req, job_digest, auth_iv, ofs, + (uint32_t)data_len); + + dp_ctx->tail = tail; +@@ -283,11 +291,14 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx, + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; ++ struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = NULL; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { +@@ -301,6 +312,7 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx, + for (i = 0; i < n; i++) { + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); ++ cookie = qp->op_cookies[tail >> tx_queue->trailz]; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_dp_parse_data_vec(qp, req, +@@ -309,7 +321,12 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t *drv_ctx, + if (unlikely(data_len < 0)) + break; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; +- enqueue_one_auth_job(ctx, req, &vec->digest[i], ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } else ++ job_digest = &vec->digest[i]; ++ enqueue_one_auth_job(ctx, req, job_digest, + &vec->auth_iv[i], ofs, (uint32_t)data_len); + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + } +@@ -433,23 +450,31 @@ qat_sym_dp_enqueue_single_chain(void *qp_data, uint8_t *drv_ctx, + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; ++ struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; + uint32_t tail = dp_ctx->tail; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest = digest; + + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); ++ cookie = qp->op_cookies[tail >> tx_queue->trailz]; + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail); + data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs); + if (unlikely(data_len < 0)) + return -1; ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data; + + if (unlikely(enqueue_one_chain_job(ctx, req, data, n_data_vecs, +- cipher_iv, digest, auth_iv, ofs, (uint32_t)data_len))) ++ cipher_iv, job_digest, auth_iv, ofs, (uint32_t)data_len))) + return -1; + + dp_ctx->tail = tail; +@@ -466,11 +491,14 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx, + struct qat_qp *qp = qp_data; + struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; + struct qat_queue *tx_queue = &qp->tx_q; ++ struct qat_sym_op_cookie *cookie; + struct qat_sym_session *ctx = dp_ctx->session; + uint32_t i, n; + uint32_t tail; + struct icp_qat_fw_la_bulk_req *req; + int32_t data_len; ++ struct rte_crypto_va_iova_ptr null_digest; ++ struct rte_crypto_va_iova_ptr *job_digest; + + n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num); + if (unlikely(n == 0)) { +@@ -484,6 +512,7 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx, + for (i = 0; i < n; i++) { + req = (struct icp_qat_fw_la_bulk_req *)( + (uint8_t *)tx_queue->base_addr + tail); ++ cookie = qp->op_cookies[tail >> tx_queue->trailz]; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + data_len = qat_sym_dp_parse_data_vec(qp, req, +@@ -491,10 +520,15 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx, + vec->src_sgl[i].num); + if (unlikely(data_len < 0)) + break; ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ null_digest.iova = cookie->digest_null_phys_addr; ++ job_digest = &null_digest; ++ } else ++ job_digest = &vec->digest[i]; + req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i]; + if (unlikely(enqueue_one_chain_job(ctx, req, + vec->src_sgl[i].vec, vec->src_sgl[i].num, +- &vec->iv[i], &vec->digest[i], ++ &vec->iv[i], job_digest, + &vec->auth_iv[i], ofs, (uint32_t)data_len))) + break; + +@@ -533,8 +567,20 @@ enqueue_one_aead_job(struct qat_sym_session *ctx, /* CPM 1.7 uses single pass to treat AEAD as cipher operation */ if (ctx->is_single_pass) { enqueue_one_cipher_job(ctx, req, iv, ofs, data_len); @@ -15308,8 +30279,24 @@ index 12825e448b..792ad2b213 100644 return; } +diff --git a/dpdk/drivers/crypto/qat/qat_sym_pmd.c b/dpdk/drivers/crypto/qat/qat_sym_pmd.c +index b835245f17..8069989ce8 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym_pmd.c ++++ b/dpdk/drivers/crypto/qat/qat_sym_pmd.c +@@ -43,6 +43,11 @@ qat_sym_init_op_cookie(void *op_cookie) + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + opt.spc_gmac.cd_cipher); ++ ++ cookie->digest_null_phys_addr = ++ rte_mempool_virt2iova(cookie) + ++ offsetof(struct qat_sym_op_cookie, ++ digest_null); + } + + static uint16_t diff --git a/dpdk/drivers/crypto/qat/qat_sym_session.c b/dpdk/drivers/crypto/qat/qat_sym_session.c -index 8ca475ca8b..80d6fbfa46 100644 +index 8ca475ca8b..eae08f65d1 100644 --- a/dpdk/drivers/crypto/qat/qat_sym_session.c +++ b/dpdk/drivers/crypto/qat/qat_sym_session.c @@ -2,6 +2,8 @@ @@ -15333,6 +30320,64 @@ index 8ca475ca8b..80d6fbfa46 100644 return ret; } +@@ -1431,6 +1435,10 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, + QAT_LOG(ERR, "invalid keylen %u", auth_keylen); + return -EFAULT; + } ++ ++ RTE_VERIFY(auth_keylen <= sizeof(ipad)); ++ RTE_VERIFY(auth_keylen <= sizeof(opad)); ++ + rte_memcpy(ipad, auth_key, auth_keylen); + rte_memcpy(opad, auth_key, auth_keylen); + +@@ -1620,9 +1628,10 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc, + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 + || cdesc->qat_cipher_alg == +- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) ++ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; +- else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) ++ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; ++ } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; +@@ -1810,7 +1819,12 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, + hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); + hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; + hash->auth_config.reserved = 0; +- hash->auth_config.config = ++ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) ++ hash->auth_config.config = ++ ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode, ++ cdesc->qat_hash_alg, 4); ++ else ++ hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode, + cdesc->qat_hash_alg, digestsize); + +@@ -2071,10 +2085,16 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, + /* Auth CD config setup */ + hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; + hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; +- hash_cd_ctrl->inner_res_sz = digestsize; +- hash_cd_ctrl->final_sz = digestsize; + hash_cd_ctrl->inner_state1_sz = state1_size; +- auth_param->auth_res_sz = digestsize; ++ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { ++ hash_cd_ctrl->inner_res_sz = 4; ++ hash_cd_ctrl->final_sz = 4; ++ auth_param->auth_res_sz = 4; ++ } else { ++ hash_cd_ctrl->inner_res_sz = digestsize; ++ hash_cd_ctrl->final_sz = digestsize; ++ auth_param->auth_res_sz = digestsize; ++ } + + hash_cd_ctrl->inner_state2_sz = state2_size; + hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + diff --git a/dpdk/drivers/crypto/scheduler/scheduler_failover.c b/dpdk/drivers/crypto/scheduler/scheduler_failover.c index 5023577ef8..2a0e29fa72 100644 --- a/dpdk/drivers/crypto/scheduler/scheduler_failover.c @@ -15347,6 +30392,20 @@ index 5023577ef8..2a0e29fa72 100644 rte_memcpy(&qp_ctx->primary_worker, &sched_ctx->workers[PRIMARY_WORKER_IDX], sizeof(struct scheduler_worker)); +diff --git a/dpdk/drivers/crypto/scheduler/scheduler_pmd.c b/dpdk/drivers/crypto/scheduler/scheduler_pmd.c +index dd198080bf..f1cc2b8344 100644 +--- a/dpdk/drivers/crypto/scheduler/scheduler_pmd.c ++++ b/dpdk/drivers/crypto/scheduler/scheduler_pmd.c +@@ -47,7 +47,8 @@ static const char * const scheduler_valid_params[] = { + RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, + RTE_CRYPTODEV_VDEV_SOCKET_ID, + RTE_CRYPTODEV_VDEV_COREMASK, +- RTE_CRYPTODEV_VDEV_CORELIST ++ RTE_CRYPTODEV_VDEV_CORELIST, ++ NULL + }; + + struct scheduler_parse_map { diff --git a/dpdk/drivers/crypto/virtio/virtio_rxtx.c b/dpdk/drivers/crypto/virtio/virtio_rxtx.c index a65524a306..08359b3a39 100644 --- a/dpdk/drivers/crypto/virtio/virtio_rxtx.c @@ -15374,6 +30433,39 @@ index bf10c6579b..c96ca62992 100644 * which is a serialization instruction itself. */ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); +diff --git a/dpdk/drivers/dma/cnxk/cnxk_dmadev.c b/dpdk/drivers/dma/cnxk/cnxk_dmadev.c +index 2824c1b44f..e923b2cea3 100644 +--- a/dpdk/drivers/dma/cnxk/cnxk_dmadev.c ++++ b/dpdk/drivers/dma/cnxk/cnxk_dmadev.c +@@ -447,8 +447,7 @@ static const struct rte_dma_dev_ops cnxk_dmadev_ops = { + }; + + static int +-cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, +- struct rte_pci_device *pci_dev) ++cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) + { + struct cnxk_dpi_vf_s *dpivf = NULL; + char name[RTE_DEV_NAME_MAX_LEN]; +@@ -467,8 +466,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, + memset(name, 0, sizeof(name)); + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + +- dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, +- sizeof(*dpivf)); ++ dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, sizeof(*dpivf)); + if (dmadev == NULL) { + plt_err("dma device allocation failed for %s", name); + return -ENOMEM; +@@ -493,6 +491,8 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, + if (rc < 0) + goto err_out_free; + ++ dmadev->state = RTE_DMA_DEV_READY; ++ + return 0; + + err_out_free: diff --git a/dpdk/drivers/dma/cnxk/meson.build b/dpdk/drivers/dma/cnxk/meson.build index 633e92a20d..d4be4ee860 100644 --- a/dpdk/drivers/dma/cnxk/meson.build @@ -15546,6 +30638,21 @@ index fcc27822ef..3f5d5ee752 100755 def main(args): +diff --git a/dpdk/drivers/dma/idxd/idxd_bus.c b/dpdk/drivers/dma/idxd/idxd_bus.c +index 08639e9dce..594b3e1d5a 100644 +--- a/dpdk/drivers/dma/idxd/idxd_bus.c ++++ b/dpdk/drivers/dma/idxd/idxd_bus.c +@@ -314,6 +314,10 @@ dsa_scan(void) + IDXD_PMD_DEBUG("%s(): found %s/%s", __func__, path, wq->d_name); + + dev = malloc(sizeof(*dev)); ++ if (dev == NULL) { ++ closedir(dev_dir); ++ return -ENOMEM; ++ } + if (dsa_addr_parse(wq->d_name, &dev->addr) < 0) { + IDXD_PMD_ERR("Error parsing WQ name: %s", wq->d_name); + free(dev); diff --git a/dpdk/drivers/dma/idxd/idxd_common.c b/dpdk/drivers/dma/idxd/idxd_common.c index fc11b11337..c77200a457 100644 --- a/dpdk/drivers/dma/idxd/idxd_common.c @@ -15844,8 +30951,75 @@ index f1396be945..c5403b431c 100644 sources = files( 'idxd_common.c', 'idxd_pci.c', +diff --git a/dpdk/drivers/dma/ioat/ioat_dmadev.c b/dpdk/drivers/dma/ioat/ioat_dmadev.c +index a230496b11..bfc61b3bf3 100644 +--- a/dpdk/drivers/dma/ioat/ioat_dmadev.c ++++ b/dpdk/drivers/dma/ioat/ioat_dmadev.c +@@ -142,10 +142,20 @@ ioat_dev_start(struct rte_dma_dev *dev) + ioat->regs->chainaddr = ioat->ring_addr; + /* Inform hardware of where to write the status/completions. */ + ioat->regs->chancmp = ioat->status_addr; ++ /* Ensure channel control is set to abort on error, so we get status writeback. */ ++ ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN | ++ IOAT_CHANCTRL_ERR_COMPLETION_EN; + + /* Prime the status register to be set to the last element. */ + ioat->status = ioat->ring_addr + ((ioat->qcfg.nb_desc - 1) * DESC_SZ); + ++ /* reset all counters */ ++ ioat->next_read = 0; ++ ioat->next_write = 0; ++ ioat->last_write = 0; ++ ioat->offset = 0; ++ ioat->failure = 0; ++ + printf("IOAT.status: %s [0x%"PRIx64"]\n", + chansts_readable[ioat->status & IOAT_CHANSTS_STATUS], + ioat->status); +@@ -166,17 +176,28 @@ static int + ioat_dev_stop(struct rte_dma_dev *dev) + { + struct ioat_dmadev *ioat = dev->fp_obj->dev_private; ++ unsigned int chansts; + uint32_t retry = 0; + +- ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND; ++ chansts = (unsigned int)(ioat->regs->chansts & IOAT_CHANSTS_STATUS); ++ if (chansts == IOAT_CHANSTS_ACTIVE || chansts == IOAT_CHANSTS_IDLE) ++ ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND; ++ else ++ ioat->regs->chancmd = IOAT_CHANCMD_RESET; + + do { + rte_pause(); + retry++; +- } while ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) != IOAT_CHANSTS_SUSPENDED +- && retry < 200); ++ chansts = (unsigned int)(ioat->regs->chansts & IOAT_CHANSTS_STATUS); ++ } while (chansts != IOAT_CHANSTS_SUSPENDED && ++ chansts != IOAT_CHANSTS_HALTED && retry < 200); + +- return ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED) ? 0 : -1; ++ if (chansts == IOAT_CHANSTS_SUSPENDED || chansts == IOAT_CHANSTS_HALTED) ++ return 0; ++ ++ IOAT_PMD_WARN("Channel could not be suspended on stop. (chansts = %u [%s])", ++ chansts, chansts_readable[chansts]); ++ return -1; + } + + /* Get device information of a device. */ +@@ -664,8 +685,6 @@ ioat_dmadev_create(const char *name, struct rte_pci_device *dev) + return -EIO; + } + } +- ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN | +- IOAT_CHANCTRL_ERR_COMPLETION_EN; + + dmadev->fp_obj->dev_private = ioat; + diff --git a/dpdk/drivers/dma/skeleton/skeleton_dmadev.c b/dpdk/drivers/dma/skeleton/skeleton_dmadev.c -index d9e4f731d7..6b0bb14e2c 100644 +index d9e4f731d7..08bf20e899 100644 --- a/dpdk/drivers/dma/skeleton/skeleton_dmadev.c +++ b/dpdk/drivers/dma/skeleton/skeleton_dmadev.c @@ -118,6 +118,7 @@ skeldma_start(struct rte_dma_dev *dev) @@ -15912,6 +31086,23 @@ index d9e4f731d7..6b0bb14e2c 100644 return count; } +@@ -500,9 +511,15 @@ skeldma_parse_lcore(const char *key __rte_unused, + const char *value, + void *opaque) + { +- int lcore_id = atoi(value); ++ int lcore_id; ++ ++ if (value == NULL || opaque == NULL) ++ return -EINVAL; ++ ++ lcore_id = atoi(value); + if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE) + *(int *)opaque = lcore_id; ++ + return 0; + } + diff --git a/dpdk/drivers/dma/skeleton/skeleton_dmadev.h b/dpdk/drivers/dma/skeleton/skeleton_dmadev.h index 91eb5460fc..6f89400480 100644 --- a/dpdk/drivers/dma/skeleton/skeleton_dmadev.h @@ -15925,7 +31116,7 @@ index 91eb5460fc..6f89400480 100644 /* Cache delimiter for cpucopy thread's operation data */ diff --git a/dpdk/drivers/event/cnxk/cn10k_eventdev.c b/dpdk/drivers/event/cnxk/cn10k_eventdev.c -index c5a8c1ae8f..4d878fc2b7 100644 +index c5a8c1ae8f..5fce6a22ac 100644 --- a/dpdk/drivers/event/cnxk/cn10k_eventdev.c +++ b/dpdk/drivers/event/cnxk/cn10k_eventdev.c @@ -111,10 +111,10 @@ cn10k_sso_hws_release(void *arg, void *hws) @@ -15941,6 +31132,50 @@ index c5a8c1ae8f..4d878fc2b7 100644 memset(ws, 0, sizeof(*ws)); } +@@ -748,8 +748,8 @@ static int + cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, + const struct rte_cryptodev *cdev, uint32_t *caps) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", ENOTSUP); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", ENOTSUP); + + *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | + RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; +@@ -767,8 +767,8 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, + + RTE_SET_USED(event); + +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); + + dev->is_ca_internal_port = 1; + cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); +@@ -781,8 +781,8 @@ cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, + const struct rte_cryptodev *cdev, + int32_t queue_pair_id) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k", EINVAL); + + return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id); + } +@@ -818,6 +818,10 @@ static struct eventdev_ops cn10k_sso_dev_ops = { + .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add, + .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del, + ++ .xstats_get = cnxk_sso_xstats_get, ++ .xstats_reset = cnxk_sso_xstats_reset, ++ .xstats_get_names = cnxk_sso_xstats_get_names, ++ + .dump = cnxk_sso_dump, + .dev_start = cn10k_sso_start, + .dev_stop = cn10k_sso_stop, diff --git a/dpdk/drivers/event/cnxk/cn10k_worker.h b/dpdk/drivers/event/cnxk/cn10k_worker.h index f8331e88d7..f67c36f888 100644 --- a/dpdk/drivers/event/cnxk/cn10k_worker.h @@ -15971,7 +31206,7 @@ index f8331e88d7..f67c36f888 100644 m = ev->mbuf; diff --git a/dpdk/drivers/event/cnxk/cn9k_eventdev.c b/dpdk/drivers/event/cnxk/cn9k_eventdev.c -index b68ce6c0a4..d2ec4aedd7 100644 +index b68ce6c0a4..16075aab86 100644 --- a/dpdk/drivers/event/cnxk/cn9k_eventdev.c +++ b/dpdk/drivers/event/cnxk/cn9k_eventdev.c @@ -109,24 +109,21 @@ cn9k_sso_hws_release(void *arg, void *hws) @@ -16003,8 +31238,57 @@ index b68ce6c0a4..d2ec4aedd7 100644 memset(ws, 0, sizeof(*ws)); } } +@@ -999,11 +996,11 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev, + } + + static int +-cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, +- const struct rte_cryptodev *cdev, uint32_t *caps) ++cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, ++ uint32_t *caps) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", ENOTSUP); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", ENOTSUP); + + *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | + RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; +@@ -1020,8 +1017,8 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, + + RTE_SET_USED(event); + +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", EINVAL); + + dev->is_ca_internal_port = 1; + cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); +@@ -1034,8 +1031,8 @@ cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, + const struct rte_cryptodev *cdev, + int32_t queue_pair_id) + { +- CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k"); +- CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k"); ++ CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k", EINVAL); ++ CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k", EINVAL); + + return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id); + } +@@ -1069,6 +1066,10 @@ static struct eventdev_ops cn9k_sso_dev_ops = { + .crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add, + .crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del, + ++ .xstats_get = cnxk_sso_xstats_get, ++ .xstats_reset = cnxk_sso_xstats_reset, ++ .xstats_get_names = cnxk_sso_xstats_get_names, ++ + .dump = cnxk_sso_dump, + .dev_start = cn9k_sso_start, + .dev_stop = cn9k_sso_stop, diff --git a/dpdk/drivers/event/cnxk/cn9k_worker.h b/dpdk/drivers/event/cnxk/cn9k_worker.h -index 9377fa50e7..8abdd13b66 100644 +index 9377fa50e7..695e3ae429 100644 --- a/dpdk/drivers/event/cnxk/cn9k_worker.h +++ b/dpdk/drivers/event/cnxk/cn9k_worker.h @@ -209,7 +209,6 @@ cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base, @@ -16023,10 +31307,30 @@ index 9377fa50e7..8abdd13b66 100644 (uint64_t *)tstamp_ptr); gw.u64[1] = mbuf; } +@@ -587,6 +585,7 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base, + + rte_io_wmb(); + cn9k_sso_txq_fc_wait(txq); ++ cn9k_nix_sec_fc_wait_one(txq); + + /* Write CPT instruction to lmt line */ + vst1q_u64(lmt_addr, cmd01); diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -index f7a5026250..46a788ef4e 100644 +index f7a5026250..27c1840f71 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c +++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c +@@ -356,9 +356,9 @@ int + cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns, + uint64_t *tmo_ticks) + { +- RTE_SET_USED(event_dev); +- *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz()); ++ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + ++ *tmo_ticks = dev->deq_tmo_ns ? ns / dev->deq_tmo_ns : 0; + return 0; + } + @@ -417,10 +417,10 @@ cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, plt_sso_dbg(); @@ -16051,8 +31355,28 @@ index f7a5026250..46a788ef4e 100644 struct cnxk_sso_evdev *dev = opaque; char *tok = strtok(value, "-"); struct cnxk_sso_qos *old_ptr; +@@ -574,7 +574,7 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) + &dev->force_ena_bp); + rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag, + &single_ws); +- rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag, ++ rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value, + &dev->gw_mode); + dev->dual_ws = !single_ws; + rte_kvargs_free(kvlist); +@@ -636,9 +636,8 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) + + cnxk_tim_fini(); + roc_sso_rsrc_fini(&dev->sso); +- roc_sso_dev_fini(&dev->sso); + +- return 0; ++ return roc_sso_dev_fini(&dev->sso); + } + + int diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.h b/dpdk/drivers/event/cnxk/cnxk_eventdev.h -index 305c6a3b9e..39c13b02fc 100644 +index 305c6a3b9e..9cd1d37010 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev.h +++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.h @@ -44,7 +44,7 @@ @@ -16064,6 +31388,19 @@ index 305c6a3b9e..39c13b02fc 100644 #define CNXK_GRP_FROM_TAG(x) (((x) >> 36) & 0x3ff) #define CNXK_SWTAG_PEND(x) (BIT_ULL(62) & x) +@@ -54,10 +54,10 @@ + #define CN10K_GW_MODE_PREF 1 + #define CN10K_GW_MODE_PREF_WFE 2 + +-#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \ ++#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name, err_val) \ + do { \ + if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \ +- return -EINVAL; \ ++ return -err_val; \ + } while (0) + + typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id); @@ -74,9 +74,9 @@ typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base, struct cnxk_sso_qos { @@ -16077,6 +31414,15 @@ index 305c6a3b9e..39c13b02fc 100644 }; struct cnxk_sso_evdev { +@@ -113,7 +113,7 @@ struct cnxk_sso_evdev { + /* CN9K */ + uint8_t dual_ws; + /* CN10K */ +- uint8_t gw_mode; ++ uint32_t gw_mode; + /* Crypto adapter */ + uint8_t is_ca_internal_port; + } __rte_cache_aligned; diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c index fdcd68ca63..54c3d6a3cb 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c @@ -16138,6 +31484,31 @@ index 69c15b1d0a..3aa6f081a7 100644 printf("Verifying CN9K Dual workslot mode\n"); dev->dual_ws = 1; cn9k_sso_set_rsrc(dev); +diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c b/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c +index 99b3acee7c..ffecdb7448 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c ++++ b/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c +@@ -356,6 +356,7 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, + uint32_t *caps, const struct event_timer_adapter_ops **ops) + { + struct cnxk_tim_evdev *dev = cnxk_tim_priv_get(); ++ struct cnxk_tim_ring *tim_ring; + + RTE_SET_USED(flags); + +@@ -376,6 +377,12 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, + /* Store evdev pointer for later use. */ + dev->event_dev = (struct rte_eventdev *)(uintptr_t)evdev; + *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT; ++ ++ tim_ring = ((struct rte_event_timer_adapter_data ++ *)((char *)caps - offsetof(struct rte_event_timer_adapter_data, caps))) ++ ->adapter_priv; ++ if (tim_ring != NULL && rte_eal_process_type() == RTE_PROC_SECONDARY) ++ cnxk_tim_set_fp_ops(tim_ring); + *ops = &cnxk_tim_ops; + + return 0; diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_worker.c b/dpdk/drivers/event/cnxk/cnxk_tim_worker.c index 3ce99864a6..dfcfbdc797 100644 --- a/dpdk/drivers/event/cnxk/cnxk_tim_worker.c @@ -16152,10 +31523,18 @@ index 3ce99864a6..dfcfbdc797 100644 cnxk_tim_sync_start_cyc(tim_ring); for (index = 0; index < nb_timers; index++) { diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_worker.h b/dpdk/drivers/event/cnxk/cnxk_tim_worker.h -index 78e36ffafe..0c9f29cfbe 100644 +index 78e36ffafe..24088ca05b 100644 --- a/dpdk/drivers/event/cnxk/cnxk_tim_worker.h +++ b/dpdk/drivers/event/cnxk/cnxk_tim_worker.h -@@ -233,8 +233,8 @@ cnxk_tim_add_entry_sp(struct cnxk_tim_ring *const tim_ring, +@@ -216,6 +216,7 @@ cnxk_tim_insert_chunk(struct cnxk_tim_bkt *const bkt, + if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk))) + return NULL; + ++ RTE_MEMPOOL_CHECK_COOKIES(tim_ring->chunk_pool, (void **)&chunk, 1, 0); + *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0; + if (bkt->nb_entry) { + *(uint64_t *)(((struct cnxk_tim_ent *)(uintptr_t) +@@ -233,8 +234,8 @@ cnxk_tim_add_entry_sp(struct cnxk_tim_ring *const tim_ring, const struct cnxk_tim_ent *const pent, const uint8_t flags) { @@ -16165,7 +31544,17 @@ index 78e36ffafe..0c9f29cfbe 100644 struct cnxk_tim_bkt *bkt; uint64_t lock_sema; int16_t rem; -@@ -316,8 +316,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring, +@@ -268,7 +269,8 @@ cnxk_tim_add_entry_sp(struct cnxk_tim_ring *const tim_ring, + } while (hbt_state & BIT_ULL(33)); + #endif + +- if (!(hbt_state & BIT_ULL(34))) { ++ if (!(hbt_state & BIT_ULL(34)) || ++ !(hbt_state & GENMASK(31, 0))) { + cnxk_tim_bkt_dec_lock(bkt); + goto __retry; + } +@@ -316,8 +318,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring, const struct cnxk_tim_ent *const pent, const uint8_t flags) { @@ -16175,11 +31564,143 @@ index 78e36ffafe..0c9f29cfbe 100644 struct cnxk_tim_bkt *bkt; uint64_t lock_sema; int64_t rem; +@@ -350,7 +352,8 @@ cnxk_tim_add_entry_mp(struct cnxk_tim_ring *const tim_ring, + } while (hbt_state & BIT_ULL(33)); + #endif + +- if (!(hbt_state & BIT_ULL(34))) { ++ if (!(hbt_state & BIT_ULL(34)) || ++ !(hbt_state & GENMASK(31, 0))) { + cnxk_tim_bkt_dec_lock(bkt); + goto __retry; + } +@@ -447,10 +450,10 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, + struct cnxk_tim_ent *chunk = NULL; + struct cnxk_tim_bkt *mirr_bkt; + struct cnxk_tim_bkt *bkt; +- uint16_t chunk_remainder; ++ int16_t chunk_remainder; + uint16_t index = 0; + uint64_t lock_sema; +- int16_t rem, crem; ++ int16_t rem; + uint8_t lock_cnt; + + __retry: +@@ -458,31 +461,6 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, + + /* Only one thread beyond this. */ + lock_sema = cnxk_tim_bkt_inc_lock(bkt); +- lock_cnt = (uint8_t)((lock_sema >> TIM_BUCKET_W1_S_LOCK) & +- TIM_BUCKET_W1_M_LOCK); +- +- if (lock_cnt) { +- cnxk_tim_bkt_dec_lock(bkt); +-#ifdef RTE_ARCH_ARM64 +- asm volatile(PLT_CPU_FEATURE_PREAMBLE +- " ldxrb %w[lock_cnt], [%[lock]] \n" +- " tst %w[lock_cnt], 255 \n" +- " beq dne%= \n" +- " sevl \n" +- "rty%=: wfe \n" +- " ldxrb %w[lock_cnt], [%[lock]] \n" +- " tst %w[lock_cnt], 255 \n" +- " bne rty%= \n" +- "dne%=: \n" +- : [lock_cnt] "=&r"(lock_cnt) +- : [lock] "r"(&bkt->lock) +- : "memory"); +-#else +- while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED)) +- ; +-#endif +- goto __retry; +- } + + /* Bucket related checks. */ + if (unlikely(cnxk_tim_bkt_get_hbt(lock_sema))) { +@@ -507,21 +485,46 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, + } while (hbt_state & BIT_ULL(33)); + #endif + +- if (!(hbt_state & BIT_ULL(34))) { ++ if (!(hbt_state & BIT_ULL(34)) || ++ !(hbt_state & GENMASK(31, 0))) { + cnxk_tim_bkt_dec_lock(bkt); + goto __retry; + } + } + } + ++ lock_cnt = (uint8_t)((lock_sema >> TIM_BUCKET_W1_S_LOCK) & ++ TIM_BUCKET_W1_M_LOCK); ++ if (lock_cnt) { ++ cnxk_tim_bkt_dec_lock(bkt); ++#ifdef RTE_ARCH_ARM64 ++ asm volatile(PLT_CPU_FEATURE_PREAMBLE ++ " ldxrb %w[lock_cnt], [%[lock]] \n" ++ " tst %w[lock_cnt], 255 \n" ++ " beq dne%= \n" ++ " sevl \n" ++ "rty%=: wfe \n" ++ " ldxrb %w[lock_cnt], [%[lock]] \n" ++ " tst %w[lock_cnt], 255 \n" ++ " bne rty%= \n" ++ "dne%=: \n" ++ : [lock_cnt] "=&r"(lock_cnt) ++ : [lock] "r"(&bkt->lock) ++ : "memory"); ++#else ++ while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED)) ++ ; ++#endif ++ goto __retry; ++ } ++ + chunk_remainder = cnxk_tim_bkt_fetch_rem(lock_sema); + rem = chunk_remainder - nb_timers; + if (rem < 0) { +- crem = tim_ring->nb_chunk_slots - chunk_remainder; +- if (chunk_remainder && crem) { ++ if (chunk_remainder > 0) { + chunk = ((struct cnxk_tim_ent *) + mirr_bkt->current_chunk) + +- crem; ++ tim_ring->nb_chunk_slots - chunk_remainder; + + index = cnxk_tim_cpy_wrk(index, chunk_remainder, chunk, + tim, ents, bkt); +@@ -535,18 +538,19 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring, + chunk = cnxk_tim_insert_chunk(bkt, mirr_bkt, tim_ring); + + if (unlikely(chunk == NULL)) { +- cnxk_tim_bkt_dec_lock(bkt); ++ cnxk_tim_bkt_dec_lock_relaxed(bkt); + rte_errno = ENOMEM; + tim[index]->state = RTE_EVENT_TIMER_ERROR; +- return crem; ++ return index; + } + *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0; + mirr_bkt->current_chunk = (uintptr_t)chunk; +- cnxk_tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt); ++ index = cnxk_tim_cpy_wrk(index, nb_timers, chunk, tim, ents, ++ bkt) - ++ index; + +- rem = nb_timers - chunk_remainder; +- cnxk_tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem); +- cnxk_tim_bkt_add_nent(bkt, rem); ++ cnxk_tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - index); ++ cnxk_tim_bkt_add_nent(bkt, index); + } else { + chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk; + chunk += (tim_ring->nb_chunk_slots - chunk_remainder); diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c -index 16e9764dbf..543f793ed1 100644 +index 16e9764dbf..f76f1c26b0 100644 --- a/dpdk/drivers/event/dlb2/dlb2.c +++ b/dpdk/drivers/event/dlb2/dlb2.c -@@ -61,12 +61,13 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { +@@ -61,12 +61,14 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { .max_num_events = DLB2_MAX_NUM_LDB_CREDITS, .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2), @@ -16187,6 +31708,7 @@ index 16e9764dbf..543f793ed1 100644 - RTE_EVENT_DEV_CAP_EVENT_QOS | - RTE_EVENT_DEV_CAP_BURST_MODE | + .event_dev_cap = (RTE_EVENT_DEV_CAP_EVENT_QOS | ++ RTE_EVENT_DEV_CAP_NONSEQ_MODE | RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | - RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE | RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | @@ -16197,7 +31719,25 @@ index 16e9764dbf..543f793ed1 100644 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE), }; -@@ -2145,7 +2146,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2, +@@ -626,7 +628,7 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle, + cfg->num_ldb_queues; + + cfg->num_hist_list_entries = resources_asked->num_ldb_ports * +- DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; ++ evdev_dlb2_default_info.max_event_port_dequeue_depth; + + if (device_version == DLB2_HW_V2_5) { + DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n", +@@ -1349,7 +1351,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, + cfg.cq_depth = rte_align32pow2(dequeue_depth); + cfg.cq_depth_threshold = 1; + +- cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; ++ cfg.cq_history_list_size = cfg.cq_depth; + + if (handle->cos_id == DLB2_COS_DEFAULT) + cfg.cos_id = 0; +@@ -2145,7 +2147,7 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2, } /* This is expected with eventdev API! @@ -16206,7 +31746,25 @@ index 16e9764dbf..543f793ed1 100644 */ if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n", -@@ -3897,31 +3898,47 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2, +@@ -2936,6 +2938,7 @@ __dlb2_event_enqueue_burst(void *event_port, + struct dlb2_eventdev_port *ev_port = event_port; + struct dlb2_port *qm_port = &ev_port->qm_port; + struct process_local_port_data *port_data; ++ int num_tx; + int i; + + RTE_ASSERT(ev_port->enq_configured); +@@ -2945,7 +2948,8 @@ __dlb2_event_enqueue_burst(void *event_port, + + port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)]; + +- while (i < num) { ++ num_tx = RTE_MIN(num, ev_port->conf.enqueue_depth); ++ while (i < num_tx) { + uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE]; + uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE]; + int pop_offs = 0; +@@ -3897,31 +3901,47 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2, while (num < max_num) { struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE]; int num_avail; @@ -16275,7 +31833,7 @@ index a5e2f8e46b..7837ae8733 100644 struct dlb2_queue { diff --git a/dpdk/drivers/event/dlb2/dlb2_selftest.c b/dpdk/drivers/event/dlb2/dlb2_selftest.c -index 2113bc2c99..1863ffe049 100644 +index 2113bc2c99..62aa11d981 100644 --- a/dpdk/drivers/event/dlb2/dlb2_selftest.c +++ b/dpdk/drivers/event/dlb2/dlb2_selftest.c @@ -223,7 +223,7 @@ test_stop_flush(struct test *t) /* test to check we can properly flush events */ @@ -16287,6 +31845,24 @@ index 2113bc2c99..1863ffe049 100644 goto err; } +@@ -1475,7 +1475,7 @@ do_selftest(void) + int + test_dlb2_eventdev(void) + { +- const char *dlb2_eventdev_name = "dlb2_event"; ++ const char *dlb2_eventdev_name = "event_dlb2"; + uint8_t num_evdevs = rte_event_dev_count(); + int i, ret = 0; + int found = 0, skipped = 0, passed = 0, failed = 0; +@@ -1489,7 +1489,7 @@ test_dlb2_eventdev(void) + + /* skip non-dlb2 event devices */ + if (strncmp(info.driver_name, dlb2_eventdev_name, +- sizeof(*info.driver_name)) != 0) { ++ strlen(dlb2_eventdev_name)) != 0) { + skipped++; + continue; + } diff --git a/dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h b/dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h index 6b8fee3416..9511521e67 100644 --- a/dpdk/drivers/event/dlb2/pf/base/dlb2_hw_types.h @@ -16500,6 +32076,71 @@ index 3661b940c3..4011c24aef 100644 ret = dlb2_verify_map_qid_slot_available(port, queue, resp); if (ret) +diff --git a/dpdk/drivers/event/dlb2/pf/dlb2_main.c b/dpdk/drivers/event/dlb2/pf/dlb2_main.c +index b6ec85b479..17e5a3cec7 100644 +--- a/dpdk/drivers/event/dlb2/pf/dlb2_main.c ++++ b/dpdk/drivers/event/dlb2/pf/dlb2_main.c +@@ -46,6 +46,7 @@ + #define DLB2_PCI_CAP_ID_MSIX 0x11 + #define DLB2_PCI_EXT_CAP_ID_PRI 0x13 + #define DLB2_PCI_EXT_CAP_ID_ACS 0xD ++#define DLB2_PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ + + #define DLB2_PCI_PRI_CTRL_ENABLE 0x1 + #define DLB2_PCI_PRI_ALLOC_REQ 0xC +@@ -64,6 +65,8 @@ + #define DLB2_PCI_ACS_CR 0x8 + #define DLB2_PCI_ACS_UF 0x10 + #define DLB2_PCI_ACS_EC 0x20 ++#define DLB2_PCI_PASID_CTRL 0x06 /* PASID control register */ ++#define DLB2_PCI_PASID_CAP_OFFSET 0x148 /* PASID capability offset */ + + static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id) + { +@@ -252,12 +255,14 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + uint16_t rt_ctl_word; + uint32_t pri_reqs_dword; + uint16_t pri_ctrl_word; ++ uint16_t pasid_ctrl; + + int pcie_cap_offset; + int pri_cap_offset; + int msix_cap_offset; + int err_cap_offset; + int acs_cap_offset; ++ int pasid_cap_offset; + int wait_count; + + uint16_t devsta_busy_word; +@@ -577,6 +582,28 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + } + } + ++ /* The current Linux kernel vfio driver does not expose PASID capability to ++ * users. It also enables PASID by default, which breaks DLB PF PMD. We have ++ * to use the hardcoded offset for now to disable PASID. ++ */ ++ pasid_cap_offset = DLB2_PCI_PASID_CAP_OFFSET; ++ ++ off = pasid_cap_offset + DLB2_PCI_PASID_CTRL; ++ if (rte_pci_read_config(pdev, &pasid_ctrl, 2, off) != 2) ++ pasid_ctrl = 0; ++ ++ if (pasid_ctrl) { ++ DLB2_INFO(dlb2_dev, "DLB2 disabling pasid...\n"); ++ ++ pasid_ctrl = 0; ++ ret = rte_pci_write_config(pdev, &pasid_ctrl, 2, off); ++ if (ret != 2) { ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ __func__, (int)off); ++ return ret; ++ } ++ } ++ + return 0; + } + diff --git a/dpdk/drivers/event/dlb2/rte_pmd_dlb2.h b/dpdk/drivers/event/dlb2/rte_pmd_dlb2.h index 74399db018..1dbd885a16 100644 --- a/dpdk/drivers/event/dlb2/rte_pmd_dlb2.h @@ -16513,6 +32154,67 @@ index 74399db018..1dbd885a16 100644 AUTO_POP, /* Pop CQ tokens after (dequeue_depth - 1) events are released. * Supported on load-balanced ports only. +diff --git a/dpdk/drivers/event/dpaa/dpaa_eventdev.c b/dpdk/drivers/event/dpaa/dpaa_eventdev.c +index ff6cc0be18..4cc89b4f82 100644 +--- a/dpdk/drivers/event/dpaa/dpaa_eventdev.c ++++ b/dpdk/drivers/event/dpaa/dpaa_eventdev.c +@@ -992,14 +992,14 @@ dpaa_event_check_flags(const char *params) + } + + static int +-dpaa_event_dev_create(const char *name, const char *params) ++dpaa_event_dev_create(const char *name, const char *params, struct rte_vdev_device *vdev) + { + struct rte_eventdev *eventdev; + struct dpaa_eventdev *priv; + + eventdev = rte_event_pmd_vdev_init(name, + sizeof(struct dpaa_eventdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (eventdev == NULL) { + DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name); + goto fail; +@@ -1049,7 +1049,7 @@ dpaa_event_dev_probe(struct rte_vdev_device *vdev) + + params = rte_vdev_device_args(vdev); + +- return dpaa_event_dev_create(name, params); ++ return dpaa_event_dev_create(name, params, vdev); + } + + static int +diff --git a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c +index 4d94c315d2..0a5c1c3f95 100644 +--- a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c ++++ b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c +@@ -1083,7 +1083,7 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, + } + + static int +-dpaa2_eventdev_create(const char *name) ++dpaa2_eventdev_create(const char *name, struct rte_vdev_device *vdev) + { + struct rte_eventdev *eventdev; + struct dpaa2_eventdev *priv; +@@ -1093,7 +1093,7 @@ dpaa2_eventdev_create(const char *name) + + eventdev = rte_event_pmd_vdev_init(name, + sizeof(struct dpaa2_eventdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (eventdev == NULL) { + DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name); + goto fail; +@@ -1187,7 +1187,7 @@ dpaa2_eventdev_probe(struct rte_vdev_device *vdev) + + name = rte_vdev_device_name(vdev); + DPAA2_EVENTDEV_INFO("Initializing %s", name); +- return dpaa2_eventdev_create(name); ++ return dpaa2_eventdev_create(name, vdev); + } + + static int diff --git a/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c index bbbd20951f..b549bdfcbb 100644 --- a/dpdk/drivers/event/dpaa2/dpaa2_eventdev_selftest.c @@ -16526,8 +32228,32 @@ index bbbd20951f..b549bdfcbb 100644 eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS, 0 /*MBUF_CACHE_SIZE*/, +diff --git a/dpdk/drivers/event/dsw/dsw_evdev.c b/dpdk/drivers/event/dsw/dsw_evdev.c +index ffabf0d23d..abe8e68525 100644 +--- a/dpdk/drivers/event/dsw/dsw_evdev.c ++++ b/dpdk/drivers/event/dsw/dsw_evdev.c +@@ -363,6 +363,10 @@ static int + dsw_close(struct rte_eventdev *dev) + { + struct dsw_evdev *dsw = dsw_pmd_priv(dev); ++ uint16_t port_id; ++ ++ for (port_id = 0; port_id < dsw->num_ports; port_id++) ++ dsw_port_release(&dsw->ports[port_id]); + + dsw->num_ports = 0; + dsw->num_queues = 0; +@@ -430,7 +434,7 @@ dsw_probe(struct rte_vdev_device *vdev) + name = rte_vdev_device_name(vdev); + + dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (dev == NULL) + return -EFAULT; + diff --git a/dpdk/drivers/event/dsw/dsw_evdev.h b/dpdk/drivers/event/dsw/dsw_evdev.h -index e64ae26f6e..c907c00c78 100644 +index e64ae26f6e..df7dcc5577 100644 --- a/dpdk/drivers/event/dsw/dsw_evdev.h +++ b/dpdk/drivers/event/dsw/dsw_evdev.h @@ -24,7 +24,7 @@ @@ -16548,20 +32274,498 @@ index e64ae26f6e..c907c00c78 100644 * all possible senders. */ #define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4) +@@ -128,7 +128,6 @@ struct dsw_queue_flow { + enum dsw_migration_state { + DSW_MIGRATION_STATE_IDLE, + DSW_MIGRATION_STATE_PAUSING, +- DSW_MIGRATION_STATE_FORWARDING, + DSW_MIGRATION_STATE_UNPAUSING + }; + +@@ -192,6 +191,13 @@ struct dsw_port { + uint16_t paused_events_len; + struct rte_event paused_events[DSW_MAX_EVENTS]; + ++ uint16_t emigrating_events_len; ++ /* Buffer for not-yet-processed events pertaining to a flow ++ * emigrating from this port. These events will be forwarded ++ * to the target port. ++ */ ++ struct rte_event emigrating_events[DSW_MAX_EVENTS]; ++ + uint16_t seen_events_len; + uint16_t seen_events_idx; + struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED]; diff --git a/dpdk/drivers/event/dsw/dsw_event.c b/dpdk/drivers/event/dsw/dsw_event.c -index c6ed470286..e209cd5b00 100644 +index c6ed470286..c5fb0c8882 100644 --- a/dpdk/drivers/event/dsw/dsw_event.c +++ b/dpdk/drivers/event/dsw/dsw_event.c -@@ -1096,7 +1096,7 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port) +@@ -237,6 +237,15 @@ dsw_port_is_flow_paused(struct dsw_port *port, uint8_t queue_id, + queue_id, flow_hash); + } + ++static __rte_always_inline bool ++dsw_port_is_flow_migrating(struct dsw_port *port, uint8_t queue_id, ++ uint16_t flow_hash) ++{ ++ return dsw_is_queue_flow_in_ary(port->emigration_target_qfs, ++ port->emigration_targets_len, ++ queue_id, flow_hash); ++} ++ + static void + dsw_port_add_paused_flows(struct dsw_port *port, struct dsw_queue_flow *qfs, + uint8_t qfs_len) +@@ -271,9 +280,19 @@ dsw_port_remove_paused_flow(struct dsw_port *port, + port->paused_flows[i] = + port->paused_flows[last_idx]; + port->paused_flows_len--; +- break; ++ ++ DSW_LOG_DP_PORT(DEBUG, port->id, ++ "Unpausing queue_id %d flow_hash %d.\n", ++ target_qf->queue_id, ++ target_qf->flow_hash); ++ ++ return; + } + } ++ ++ DSW_LOG_DP_PORT(ERR, port->id, ++ "Failed to unpause queue_id %d flow_hash %d.\n", ++ target_qf->queue_id, target_qf->flow_hash); + } + + static void +@@ -284,7 +303,6 @@ dsw_port_remove_paused_flows(struct dsw_port *port, + + for (i = 0; i < qfs_len; i++) + dsw_port_remove_paused_flow(port, &qfs[i]); +- + } + + static void +@@ -439,14 +457,15 @@ dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id) + + static bool + dsw_select_emigration_target(struct dsw_evdev *dsw, +- struct dsw_queue_flow_burst *bursts, +- uint16_t num_bursts, uint8_t source_port_id, +- int16_t *port_loads, uint16_t num_ports, +- uint8_t *target_port_ids, +- struct dsw_queue_flow *target_qfs, +- uint8_t *targets_len) +-{ +- int16_t source_port_load = port_loads[source_port_id]; ++ struct dsw_port *source_port, ++ struct dsw_queue_flow_burst *bursts, ++ uint16_t num_bursts, ++ int16_t *port_loads, uint16_t num_ports, ++ uint8_t *target_port_ids, ++ struct dsw_queue_flow *target_qfs, ++ uint8_t *targets_len) ++{ ++ int16_t source_port_load = port_loads[source_port->id]; + struct dsw_queue_flow *candidate_qf = NULL; + uint8_t candidate_port_id = 0; + int16_t candidate_weight = -1; +@@ -471,7 +490,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw, + for (port_id = 0; port_id < num_ports; port_id++) { + int16_t weight; + +- if (port_id == source_port_id) ++ if (port_id == source_port->id) + continue; + + if (!dsw_is_serving_port(dsw, port_id, qf->queue_id)) +@@ -493,7 +512,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw, + if (candidate_weight < 0) + return false; + +- DSW_LOG_DP_PORT(DEBUG, source_port_id, "Selected queue_id %d " ++ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Selected queue_id %d " + "flow_hash %d (with flow load %d) for migration " + "to port %d.\n", candidate_qf->queue_id, + candidate_qf->flow_hash, +@@ -501,7 +520,7 @@ dsw_select_emigration_target(struct dsw_evdev *dsw, + candidate_port_id); + + port_loads[candidate_port_id] += candidate_flow_load; +- port_loads[source_port_id] -= candidate_flow_load; ++ port_loads[source_port->id] -= candidate_flow_load; + + target_port_ids[*targets_len] = candidate_port_id; + target_qfs[*targets_len] = *candidate_qf; +@@ -527,8 +546,8 @@ dsw_select_emigration_targets(struct dsw_evdev *dsw, + for (i = 0; i < DSW_MAX_FLOWS_PER_MIGRATION; i++) { + bool found; + +- found = dsw_select_emigration_target(dsw, bursts, num_bursts, +- source_port->id, ++ found = dsw_select_emigration_target(dsw, source_port, ++ bursts, num_bursts, + port_loads, dsw->num_ports, + target_port_ids, + target_qfs, +@@ -608,6 +627,7 @@ dsw_port_buffer_paused(struct dsw_port *port, + port->paused_events_len++; + } + ++ + static void + dsw_port_buffer_non_paused(struct dsw_evdev *dsw, struct dsw_port *source_port, + uint8_t dest_port_id, const struct rte_event *event) +@@ -679,40 +699,39 @@ dsw_port_buffer_event(struct dsw_evdev *dsw, struct dsw_port *source_port, + } + + static void +-dsw_port_flush_paused_events(struct dsw_evdev *dsw, +- struct dsw_port *source_port, +- const struct dsw_queue_flow *qf) ++dsw_port_flush_no_longer_paused_events(struct dsw_evdev *dsw, ++ struct dsw_port *source_port) + { + uint16_t paused_events_len = source_port->paused_events_len; + struct rte_event paused_events[paused_events_len]; +- uint8_t dest_port_id; + uint16_t i; + + if (paused_events_len == 0) + return; + +- if (dsw_port_is_flow_paused(source_port, qf->queue_id, qf->flow_hash)) +- return; +- + rte_memcpy(paused_events, source_port->paused_events, + paused_events_len * sizeof(struct rte_event)); + + source_port->paused_events_len = 0; + +- dest_port_id = dsw_schedule(dsw, qf->queue_id, qf->flow_hash); +- + for (i = 0; i < paused_events_len; i++) { + struct rte_event *event = &paused_events[i]; + uint16_t flow_hash; + + flow_hash = dsw_flow_id_hash(event->flow_id); + +- if (event->queue_id == qf->queue_id && +- flow_hash == qf->flow_hash) ++ if (dsw_port_is_flow_paused(source_port, event->queue_id, ++ flow_hash)) ++ dsw_port_buffer_paused(source_port, event); ++ else { ++ uint8_t dest_port_id; ++ ++ dest_port_id = dsw_schedule(dsw, event->queue_id, ++ flow_hash); ++ + dsw_port_buffer_non_paused(dsw, source_port, + dest_port_id, event); +- else +- dsw_port_buffer_paused(source_port, event); ++ } + } + } + +@@ -755,11 +774,6 @@ dsw_port_end_emigration(struct dsw_evdev *dsw, struct dsw_port *port, + DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for " + "queue_id %d flow_hash %d.\n", queue_id, + flow_hash); +- +- if (queue_schedule_type == RTE_SCHED_TYPE_ATOMIC) { +- dsw_port_remove_paused_flow(port, qf); +- dsw_port_flush_paused_events(dsw, port, qf); +- } + } + + finished = port->emigration_targets_len - left_qfs_len; +@@ -826,10 +840,31 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw, + if (dsw->num_ports == 1) + return; + +- if (seen_events_len < DSW_MAX_EVENTS_RECORDED) ++ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n"); ++ ++ if (seen_events_len < DSW_MAX_EVENTS_RECORDED) { ++ DSW_LOG_DP_PORT(DEBUG, source_port->id, "Not enough events " ++ "are recorded to allow for a migration.\n"); + return; ++ } + +- DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n"); ++ /* A flow migration cannot be initiated if there are paused ++ * events, since some/all of those events may be have been ++ * produced as a result of processing the flow(s) selected for ++ * migration. Moving such a flow would potentially introduced ++ * reordering, since processing the migrated flow on the ++ * receiving flow may commence before the to-be-enqueued-to ++ ++ * flows are unpaused, leading to paused events on the second ++ * port as well, destined for the same paused flow(s). When ++ * those flows are unpaused, the resulting events are ++ * delivered the owning port in an undefined order. ++ */ ++ if (source_port->paused_events_len > 0) { ++ DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are " ++ "events in the paus buffer.\n"); ++ return; ++ } + + /* Randomize interval to avoid having all threads considering + * emigration at the same in point in time, which might lead +@@ -927,9 +962,8 @@ dsw_port_consider_emigration(struct dsw_evdev *dsw, + } + + static void +-dsw_port_flush_paused_events(struct dsw_evdev *dsw, +- struct dsw_port *source_port, +- const struct dsw_queue_flow *qf); ++dsw_port_flush_no_longer_paused_events(struct dsw_evdev *dsw, ++ struct dsw_port *source_port); + + static void + dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port, +@@ -954,62 +988,123 @@ dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port, + + if (dsw_schedule(dsw, qf->queue_id, qf->flow_hash) == port->id) + port->immigrations++; ++ } ++ ++ dsw_port_flush_no_longer_paused_events(dsw, port); ++} ++ ++static void ++dsw_port_buffer_in_buffer(struct dsw_port *port, ++ const struct rte_event *event) ++ ++{ ++ RTE_ASSERT(port->in_buffer_start == 0); ++ ++ port->in_buffer[port->in_buffer_len] = *event; ++ port->in_buffer_len++; ++} ++ ++static void ++dsw_port_forward_emigrated_event(struct dsw_evdev *dsw, ++ struct dsw_port *source_port, ++ struct rte_event *event) ++{ ++ uint16_t i; ++ ++ for (i = 0; i < source_port->emigration_targets_len; i++) { ++ struct dsw_queue_flow *qf = ++ &source_port->emigration_target_qfs[i]; ++ uint8_t dest_port_id = ++ source_port->emigration_target_port_ids[i]; ++ struct dsw_port *dest_port = &dsw->ports[dest_port_id]; + +- dsw_port_flush_paused_events(dsw, port, qf); ++ if (event->queue_id == qf->queue_id && ++ dsw_flow_id_hash(event->flow_id) == qf->flow_hash) { ++ /* No need to care about bursting forwarded ++ * events (to the destination port's in_ring), ++ * since migration doesn't happen very often, ++ * and also the majority of the dequeued ++ * events will likely *not* be forwarded. ++ */ ++ while (rte_event_ring_enqueue_burst(dest_port->in_ring, ++ event, 1, ++ NULL) != 1) ++ rte_pause(); ++ return; ++ } + } ++ ++ /* Event did not belong to the emigrated flows */ ++ dsw_port_buffer_in_buffer(source_port, event); + } + +-#define FORWARD_BURST_SIZE (32) ++static void ++dsw_port_stash_migrating_event(struct dsw_port *port, ++ const struct rte_event *event) ++{ ++ port->emigrating_events[port->emigrating_events_len] = *event; ++ port->emigrating_events_len++; ++} ++ ++#define DRAIN_DEQUEUE_BURST_SIZE (32) + + static void +-dsw_port_forward_emigrated_flow(struct dsw_port *source_port, +- struct rte_event_ring *dest_ring, +- uint8_t queue_id, +- uint16_t flow_hash) ++dsw_port_drain_in_ring(struct dsw_port *source_port) + { +- uint16_t events_left; ++ uint16_t num_events; ++ uint16_t dequeued; + + /* Control ring message should been seen before the ring count + * is read on the port's in_ring. + */ + rte_smp_rmb(); + +- events_left = rte_event_ring_count(source_port->in_ring); ++ num_events = rte_event_ring_count(source_port->in_ring); + +- while (events_left > 0) { +- uint16_t in_burst_size = +- RTE_MIN(FORWARD_BURST_SIZE, events_left); +- struct rte_event in_burst[in_burst_size]; +- uint16_t in_len; ++ for (dequeued = 0; dequeued < num_events; ) { ++ uint16_t burst_size = RTE_MIN(DRAIN_DEQUEUE_BURST_SIZE, ++ num_events - dequeued); ++ struct rte_event events[burst_size]; ++ uint16_t len; + uint16_t i; + +- in_len = rte_event_ring_dequeue_burst(source_port->in_ring, +- in_burst, +- in_burst_size, NULL); +- /* No need to care about bursting forwarded events (to +- * the destination port's in_ring), since migration +- * doesn't happen very often, and also the majority of +- * the dequeued events will likely *not* be forwarded. +- */ +- for (i = 0; i < in_len; i++) { +- struct rte_event *e = &in_burst[i]; +- if (e->queue_id == queue_id && +- dsw_flow_id_hash(e->flow_id) == flow_hash) { +- while (rte_event_ring_enqueue_burst(dest_ring, +- e, 1, +- NULL) != 1) +- rte_pause(); +- } else { +- uint16_t last_idx = source_port->in_buffer_len; +- source_port->in_buffer[last_idx] = *e; +- source_port->in_buffer_len++; +- } ++ len = rte_event_ring_dequeue_burst(source_port->in_ring, ++ events, burst_size, ++ NULL); ++ ++ for (i = 0; i < len; i++) { ++ struct rte_event *event = &events[i]; ++ uint16_t flow_hash; ++ ++ flow_hash = dsw_flow_id_hash(event->flow_id); ++ ++ if (unlikely(dsw_port_is_flow_migrating(source_port, ++ event->queue_id, ++ flow_hash))) ++ dsw_port_stash_migrating_event(source_port, ++ event); ++ else ++ dsw_port_buffer_in_buffer(source_port, event); + } + +- events_left -= in_len; ++ dequeued += len; + } + } + ++static void ++dsw_port_forward_emigrated_flows(struct dsw_evdev *dsw, ++ struct dsw_port *source_port) ++{ ++ uint16_t i; ++ ++ for (i = 0; i < source_port->emigrating_events_len; i++) { ++ struct rte_event *event = &source_port->emigrating_events[i]; ++ ++ dsw_port_forward_emigrated_event(dsw, source_port, event); ++ } ++ source_port->emigrating_events_len = 0; ++} ++ + static void + dsw_port_move_emigrating_flows(struct dsw_evdev *dsw, + struct dsw_port *source_port) +@@ -1018,22 +1113,27 @@ dsw_port_move_emigrating_flows(struct dsw_evdev *dsw, + + dsw_port_flush_out_buffers(dsw, source_port); + +- rte_smp_wmb(); +- + for (i = 0; i < source_port->emigration_targets_len; i++) { + struct dsw_queue_flow *qf = + &source_port->emigration_target_qfs[i]; + uint8_t dest_port_id = + source_port->emigration_target_port_ids[i]; +- struct dsw_port *dest_port = &dsw->ports[dest_port_id]; + + dsw->queues[qf->queue_id].flow_to_port_map[qf->flow_hash] = +- dest_port_id; +- +- dsw_port_forward_emigrated_flow(source_port, dest_port->in_ring, +- qf->queue_id, qf->flow_hash); ++ dest_port_id; + } + ++ rte_smp_wmb(); ++ ++ dsw_port_drain_in_ring(source_port); ++ dsw_port_forward_emigrated_flows(dsw, source_port); ++ ++ dsw_port_remove_paused_flows(source_port, ++ source_port->emigration_target_qfs, ++ source_port->emigration_targets_len); ++ ++ dsw_port_flush_no_longer_paused_events(dsw, source_port); ++ + /* Flow table update and migration destination port's enqueues + * must be seen before the control message. + */ +@@ -1054,9 +1154,7 @@ dsw_port_handle_confirm(struct dsw_evdev *dsw, struct dsw_port *port) + if (port->cfm_cnt == (dsw->num_ports-1)) { + switch (port->migration_state) { + case DSW_MIGRATION_STATE_PAUSING: +- DSW_LOG_DP_PORT(DEBUG, port->id, "Going into forwarding " +- "migration state.\n"); +- port->migration_state = DSW_MIGRATION_STATE_FORWARDING; ++ dsw_port_move_emigrating_flows(dsw, port); + break; + case DSW_MIGRATION_STATE_UNPAUSING: + dsw_port_end_emigration(dsw, port, +@@ -1096,18 +1194,18 @@ dsw_port_ctl_process(struct dsw_evdev *dsw, struct dsw_port *port) static void dsw_port_note_op(struct dsw_port *port, uint16_t num_events) { - /* To pull the control ring reasonbly often on busy ports, -+ /* To pull the control ring reasonably often on busy ports, - * each dequeued/enqueued event is considered an 'op' too. - */ +- * each dequeued/enqueued event is considered an 'op' too. +- */ port->ops_since_bg_task += (num_events+1); -@@ -1180,7 +1180,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port, + } + + static void + dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port) + { +- if (unlikely(port->migration_state == DSW_MIGRATION_STATE_FORWARDING && +- port->pending_releases == 0)) +- dsw_port_move_emigrating_flows(dsw, port); ++ /* For simplicity (in the migration logic), avoid all ++ * background processing in case event processing is in ++ * progress. ++ */ ++ if (port->pending_releases > 0) ++ return; + + /* Polling the control ring is relatively inexpensive, and + * polling it often helps bringing down migration latency, so +@@ -1167,7 +1265,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port, + uint16_t i; + + DSW_LOG_DP_PORT(DEBUG, source_port->id, "Attempting to enqueue %d " +- "events to port %d.\n", events_len, source_port->id); ++ "events.\n", events_len); + + dsw_port_bg_process(dsw, source_port); + +@@ -1180,7 +1278,7 @@ dsw_event_enqueue_burst_generic(struct dsw_port *source_port, * addition, a port cannot be left "unattended" (e.g. unused) * for long periods of time, since that would stall * migration. Eventdev API extensions to provide a cleaner way @@ -16570,6 +32774,70 @@ index c6ed470286..e209cd5b00 100644 * considered. */ if (unlikely(events_len == 0)) { +@@ -1351,6 +1449,38 @@ dsw_port_dequeue_burst(struct dsw_port *port, struct rte_event *events, + return rte_event_ring_dequeue_burst(port->in_ring, events, num, NULL); + } + ++static void ++dsw_port_stash_migrating_events(struct dsw_port *port, ++ struct rte_event *events, uint16_t *num) ++{ ++ uint16_t i; ++ ++ /* The assumption here - performance-wise - is that events ++ * belonging to migrating flows are relatively rare. ++ */ ++ for (i = 0; i < (*num); ) { ++ struct rte_event *event = &events[i]; ++ uint16_t flow_hash; ++ ++ flow_hash = dsw_flow_id_hash(event->flow_id); ++ ++ if (unlikely(dsw_port_is_flow_migrating(port, event->queue_id, ++ flow_hash))) { ++ uint16_t left; ++ ++ dsw_port_stash_migrating_event(port, event); ++ ++ (*num)--; ++ left = *num - i; ++ ++ if (left > 0) ++ memmove(event, event + 1, ++ left * sizeof(struct rte_event)); ++ } else ++ i++; ++ } ++} ++ + uint16_t + dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num, + uint64_t wait __rte_unused) +@@ -1368,6 +1498,11 @@ dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num, + + dequeued = dsw_port_dequeue_burst(source_port, events, num); + ++ if (unlikely(source_port->migration_state == ++ DSW_MIGRATION_STATE_PAUSING)) ++ dsw_port_stash_migrating_events(source_port, events, ++ &dequeued); ++ + source_port->pending_releases = dequeued; + + dsw_port_load_record(source_port, dequeued); +diff --git a/dpdk/drivers/event/octeontx/ssovf_evdev.c b/dpdk/drivers/event/octeontx/ssovf_evdev.c +index 9e14e35d10..634fa8a27f 100644 +--- a/dpdk/drivers/event/octeontx/ssovf_evdev.c ++++ b/dpdk/drivers/event/octeontx/ssovf_evdev.c +@@ -879,7 +879,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) + } + + eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), +- rte_socket_id()); ++ rte_socket_id(), vdev); + if (eventdev == NULL) { + ssovf_log_err("Failed to create eventdev vdev %s", name); + return -ENOMEM; diff --git a/dpdk/drivers/event/octeontx/ssovf_evdev.h b/dpdk/drivers/event/octeontx/ssovf_evdev.h index bb1056a955..e46dc055eb 100644 --- a/dpdk/drivers/event/octeontx/ssovf_evdev.h @@ -16659,9 +32927,18 @@ index 36ae4dd88f..ca06d51c8a 100644 * rte_pktmbuf_mtod_offset can be used for this purpose * but it brings down the performance as it reads diff --git a/dpdk/drivers/event/opdl/opdl_evdev.c b/dpdk/drivers/event/opdl/opdl_evdev.c -index 15c10240b0..8b6890b220 100644 +index 15c10240b0..2774a923aa 100644 --- a/dpdk/drivers/event/opdl/opdl_evdev.c +++ b/dpdk/drivers/event/opdl/opdl_evdev.c +@@ -695,7 +695,7 @@ opdl_probe(struct rte_vdev_device *vdev) + } + } + dev = rte_event_pmd_vdev_init(name, +- sizeof(struct opdl_evdev), socket_id); ++ sizeof(struct opdl_evdev), socket_id, vdev); + + if (dev == NULL) { + PMD_DRV_LOG(ERR, "eventdev vdev init() failed"); @@ -703,7 +703,7 @@ opdl_probe(struct rte_vdev_device *vdev) } @@ -16684,6 +32961,58 @@ index e4fc70a440..24b92df476 100644 if (!err) { if (rte_event_dev_start(evdev) < 0) { PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n", +diff --git a/dpdk/drivers/event/skeleton/skeleton_eventdev.c b/dpdk/drivers/event/skeleton/skeleton_eventdev.c +index bf3b01ebc8..66510cc432 100644 +--- a/dpdk/drivers/event/skeleton/skeleton_eventdev.c ++++ b/dpdk/drivers/event/skeleton/skeleton_eventdev.c +@@ -427,12 +427,12 @@ RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map); + /* VDEV based event device */ + + static int +-skeleton_eventdev_create(const char *name, int socket_id) ++skeleton_eventdev_create(const char *name, int socket_id, struct rte_vdev_device *vdev) + { + struct rte_eventdev *eventdev; + + eventdev = rte_event_pmd_vdev_init(name, +- sizeof(struct skeleton_eventdev), socket_id); ++ sizeof(struct skeleton_eventdev), socket_id, vdev); + if (eventdev == NULL) { + PMD_DRV_ERR("Failed to create eventdev vdev %s", name); + goto fail; +@@ -458,7 +458,7 @@ skeleton_eventdev_probe(struct rte_vdev_device *vdev) + name = rte_vdev_device_name(vdev); + RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name, + rte_socket_id()); +- return skeleton_eventdev_create(name, rte_socket_id()); ++ return skeleton_eventdev_create(name, rte_socket_id(), vdev); + } + + static int +diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c +index 6ae613e0f2..e43bf250d6 100644 +--- a/dpdk/drivers/event/sw/sw_evdev.c ++++ b/dpdk/drivers/event/sw/sw_evdev.c +@@ -625,8 +625,8 @@ sw_dump(struct rte_eventdev *dev, FILE *f) + "Ordered", "Atomic", "Parallel", "Directed" + }; + uint32_t i; +- fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name", +- sw->port_count, sw->qid_count); ++ fprintf(f, "EventDev %s: ports %d, qids %d\n", ++ dev->data->name, sw->port_count, sw->qid_count); + + fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n", + sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts); +@@ -1077,7 +1077,7 @@ sw_probe(struct rte_vdev_device *vdev) + min_burst_size, deq_burst_size, refill_once); + + dev = rte_event_pmd_vdev_init(name, +- sizeof(struct sw_evdev), socket_id); ++ sizeof(struct sw_evdev), socket_id, vdev); + if (dev == NULL) { + SW_LOG_ERR("eventdev vdev init() failed"); + return -EFAULT; diff --git a/dpdk/drivers/event/sw/sw_evdev.h b/dpdk/drivers/event/sw/sw_evdev.h index 33645bd1df..4fd1054470 100644 --- a/dpdk/drivers/event/sw/sw_evdev.h @@ -16697,8 +33026,59 @@ index 33645bd1df..4fd1054470 100644 uint16_t last_dequeue_burst_sz; /* how big the burst was */ uint64_t last_dequeue_ticks; /* used to track burst processing time */ +diff --git a/dpdk/drivers/event/sw/sw_evdev_scheduler.c b/dpdk/drivers/event/sw/sw_evdev_scheduler.c +index 809a54d731..2ee5d1b2d8 100644 +--- a/dpdk/drivers/event/sw/sw_evdev_scheduler.c ++++ b/dpdk/drivers/event/sw/sw_evdev_scheduler.c +@@ -90,8 +90,10 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, + sw->cq_ring_space[cq]--; + + int head = (p->hist_head++ & (SW_PORT_HIST_LIST-1)); +- p->hist_list[head].fid = flow_id; +- p->hist_list[head].qid = qid_id; ++ p->hist_list[head] = (struct sw_hist_list_entry) { ++ .qid = qid_id, ++ .fid = flow_id, ++ }; + + p->stats.tx_pkts++; + qid->stats.tx_pkts++; +@@ -162,8 +164,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid, + qid->stats.tx_pkts++; + + const int head = (p->hist_head & (SW_PORT_HIST_LIST-1)); +- p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id); +- p->hist_list[head].qid = qid_id; ++ p->hist_list[head] = (struct sw_hist_list_entry) { ++ .qid = qid_id, ++ .fid = SW_HASH_FLOWID(qe->flow_id), ++ }; + + if (keep_order) + rob_ring_dequeue(qid->reorder_buffer_freelist, +@@ -368,12 +372,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) + if (!allow_reorder && !eop) + flags = QE_FLAG_VALID; + +- /* +- * if we don't have space for this packet in an IQ, +- * then move on to next queue. Technically, for a +- * packet that needs reordering, we don't need to check +- * here, but it simplifies things not to special-case +- */ + uint32_t iq_num = PRIO_TO_IQ(qe->priority); + struct sw_qid *qid = &sw->qids[qe->queue_id]; + +@@ -419,7 +417,6 @@ __pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder) + struct reorder_buffer_entry *rob_entry = + hist_entry->rob_entry; + +- hist_entry->rob_entry = NULL; + /* Although fragmentation not currently + * supported by eventdev API, we support it + * here. Open: How do we alert the user that diff --git a/dpdk/drivers/event/sw/sw_evdev_selftest.c b/dpdk/drivers/event/sw/sw_evdev_selftest.c -index 9768d3a0c7..cb97a4d615 100644 +index 9768d3a0c7..e513601a5b 100644 --- a/dpdk/drivers/event/sw/sw_evdev_selftest.c +++ b/dpdk/drivers/event/sw/sw_evdev_selftest.c @@ -1109,7 +1109,7 @@ xstats_tests(struct test *t) @@ -16710,6 +33090,27 @@ index 9768d3a0c7..cb97a4d615 100644 static const uint64_t queue_expected_zero[] = { 0 /* rx */, 0 /* tx */, +@@ -1488,6 +1488,7 @@ xstats_id_reset_tests(struct test *t) + goto fail; + } + ev.queue_id = t->qid[i]; ++ ev.flow_id = 0; + ev.op = RTE_EVENT_OP_NEW; + ev.mbuf = arp; + *rte_event_pmd_selftest_seqn(arp) = i; +@@ -1641,9 +1642,9 @@ xstats_id_reset_tests(struct test *t) + failed = 1; + } + if (val != port_expected[i]) { +- printf("%d: %s value incorrect, expected %"PRIu64 +- " got %d\n", __LINE__, port_names[i], +- port_expected[i], id); ++ printf("%d: %s value incorrect, expected %" PRIu64 ++ " got %" PRIu64 "\n", ++ __LINE__, port_names[i], port_expected[i], val); + failed = 1; + } + /* reset to zero */ diff --git a/dpdk/drivers/gpu/cuda/cuda.c b/dpdk/drivers/gpu/cuda/cuda.c index 882df08e56..fd577f7167 100644 --- a/dpdk/drivers/gpu/cuda/cuda.c @@ -16738,10 +33139,23 @@ index 882df08e56..fd577f7167 100644 if (mem_alloc_list_cur->next != NULL) mem_alloc_list_cur->next->prev = mem_alloc_list_cur->prev; diff --git a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c -index 4c669b878f..6ebbf91de5 100644 +index 4c669b878f..55ebd6bfeb 100644 --- a/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c +++ b/dpdk/drivers/mempool/cnxk/cn10k_mempool_ops.c -@@ -202,7 +202,7 @@ cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) +@@ -144,6 +144,12 @@ cn10k_mempool_enq(struct rte_mempool *mp, void *const *obj_table, + */ + rte_io_wmb(); + ++ /* For non-EAL threads, rte_lcore_id() will not be valid. Hence ++ * fallback to bulk alloc ++ */ ++ if (unlikely(rte_lcore_id() == LCORE_ID_ANY)) ++ return cnxk_mempool_enq(mp, obj_table, n); ++ + if (n == 1) { + roc_npa_aura_op_free(mp->pool_id, 1, ptr[0]); + return 0; +@@ -202,7 +208,7 @@ cn10k_mempool_deq(struct rte_mempool *mp, void **obj_table, unsigned int n) BATCH_ALLOC_SZ, 0, 1); /* If issue fails, try falling back to default alloc */ if (unlikely(rc)) @@ -16750,6 +33164,25 @@ index 4c669b878f..6ebbf91de5 100644 mem->status = BATCH_ALLOC_OP_ISSUED; } +diff --git a/dpdk/drivers/mempool/cnxk/cnxk_mempool_ops.c b/dpdk/drivers/mempool/cnxk/cnxk_mempool_ops.c +index c7b75f026d..b3592d91dc 100644 +--- a/dpdk/drivers/mempool/cnxk/cnxk_mempool_ops.c ++++ b/dpdk/drivers/mempool/cnxk/cnxk_mempool_ops.c +@@ -123,6 +123,14 @@ cnxk_mempool_free(struct rte_mempool *mp) + int rc = 0; + + plt_npa_dbg("aura_handle=0x%" PRIx64, mp->pool_id); ++ ++ /* It can happen that rte_mempool_free() is called immediately after ++ * rte_mempool_create_empty(). In such cases the NPA pool will not be ++ * allocated. ++ */ ++ if (roc_npa_aura_handle_to_base(mp->pool_id) == 0) ++ return; ++ + rc = roc_npa_pool_destroy(mp->pool_id); + if (rc) + plt_err("Failed to free pool or aura rc=%d", rc); diff --git a/dpdk/drivers/mempool/dpaa/dpaa_mempool.c b/dpdk/drivers/mempool/dpaa/dpaa_mempool.c index f17aff9655..32639a3bfd 100644 --- a/dpdk/drivers/mempool/dpaa/dpaa_mempool.c @@ -16785,6 +33218,34 @@ index 94dc5cd815..8fd9edced2 100644 gpool, ret); } +diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +index 1396f32c3d..88cdc7ee2e 100644 +--- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c ++++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +@@ -312,7 +312,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + static int + eth_dev_start(struct rte_eth_dev *dev) + { ++ struct pmd_internals *internals = dev->data->dev_private; ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; ++ for (i = 0; i < internals->nb_queues; i++) { ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ } + return 0; + } + +@@ -340,6 +347,8 @@ eth_dev_stop(struct rte_eth_dev *dev) + + internals->rx_queue[i].sockfd = -1; + internals->tx_queue[i].sockfd = -1; ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; diff --git a/dpdk/drivers/net/af_xdp/compat.h b/dpdk/drivers/net/af_xdp/compat.h index 3880dc7dd7..28ea64aeaa 100644 --- a/dpdk/drivers/net/af_xdp/compat.h @@ -16851,10 +33312,10 @@ index 3880dc7dd7..28ea64aeaa 100644 +} +#endif diff --git a/dpdk/drivers/net/af_xdp/meson.build b/dpdk/drivers/net/af_xdp/meson.build -index 3ed2b29784..1e0de23705 100644 +index 3ed2b29784..2605086d0c 100644 --- a/dpdk/drivers/net/af_xdp/meson.build +++ b/dpdk/drivers/net/af_xdp/meson.build -@@ -9,19 +9,49 @@ endif +@@ -9,19 +9,57 @@ endif sources = files('rte_eth_af_xdp.c') @@ -16910,9 +33371,17 @@ index 3ed2b29784..1e0de23705 100644 build = false - reason = 'missing dependency, "libbpf"' + reason = 'missing header, "linux/if_xdp.h"' ++endif ++ ++if build ++ if cc.has_function('bpf_xdp_attach', ++ prefix : '#include <bpf/libbpf.h>', ++ dependencies : bpf_dep) ++ cflags += ['-DRTE_NET_AF_XDP_LIBBPF_XDP_ATTACH'] ++ endif endif diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c -index 96c2c9d939..9db76d4562 100644 +index 96c2c9d939..6bc7178fc5 100644 --- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -15,8 +15,6 @@ @@ -16924,7 +33393,36 @@ index 96c2c9d939..9db76d4562 100644 #include <rte_ethdev.h> #include <ethdev_driver.h> -@@ -697,67 +695,6 @@ find_internal_resource(struct pmd_internals *port_int) +@@ -655,7 +653,13 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + static int + eth_dev_start(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) { ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ } + + return 0; + } +@@ -664,7 +668,14 @@ eth_dev_start(struct rte_eth_dev *dev) + static int + eth_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) { ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ } ++ + return 0; + } + +@@ -697,67 +708,6 @@ find_internal_resource(struct pmd_internals *port_int) return list; } @@ -16992,7 +33490,62 @@ index 96c2c9d939..9db76d4562 100644 static int eth_dev_configure(struct rte_eth_dev *dev) { -@@ -1013,6 +950,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) +@@ -908,6 +858,43 @@ eth_stats_reset(struct rte_eth_dev *dev) + return 0; + } + ++#ifdef RTE_NET_AF_XDP_LIBBPF_XDP_ATTACH ++ ++static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags) ++{ ++ return bpf_xdp_attach(ifindex, fd, flags, NULL); ++} ++ ++static int ++remove_xdp_program(struct pmd_internals *internals) ++{ ++ uint32_t curr_prog_id = 0; ++ int ret; ++ ++ ret = bpf_xdp_query_id(internals->if_index, XDP_FLAGS_UPDATE_IF_NOEXIST, ++ &curr_prog_id); ++ if (ret != 0) { ++ AF_XDP_LOG(ERR, "bpf_xdp_query_id failed\n"); ++ return ret; ++ } ++ ++ ret = bpf_xdp_detach(internals->if_index, XDP_FLAGS_UPDATE_IF_NOEXIST, ++ NULL); ++ if (ret != 0) ++ AF_XDP_LOG(ERR, "bpf_xdp_detach failed\n"); ++ return ret; ++} ++ ++#else ++ ++#pragma GCC diagnostic push ++#pragma GCC diagnostic ignored "-Wdeprecated-declarations" ++ ++static int link_xdp_prog_with_dev(int ifindex, int fd, __u32 flags) ++{ ++ return bpf_set_link_xdp_fd(ifindex, fd, flags); ++} ++ + static void + remove_xdp_program(struct pmd_internals *internals) + { +@@ -922,6 +909,10 @@ remove_xdp_program(struct pmd_internals *internals) + XDP_FLAGS_UPDATE_IF_NOEXIST); + } + ++#pragma GCC diagnostic pop ++ ++#endif ++ + static void + xdp_umem_destroy(struct xsk_umem_info *umem) + { +@@ -1013,6 +1004,66 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) return aligned_addr; } @@ -17059,7 +33612,7 @@ index 96c2c9d939..9db76d4562 100644 static struct xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq) -@@ -1052,7 +1049,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1052,7 +1103,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); if (umem == NULL) { @@ -17068,7 +33621,7 @@ index 96c2c9d939..9db76d4562 100644 return NULL; } -@@ -1065,7 +1062,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1065,7 +1116,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, ret = xsk_umem__create(&umem->umem, base_addr, umem_size, &rxq->fq, &rxq->cq, &usr_config); if (ret) { @@ -17077,7 +33630,7 @@ index 96c2c9d939..9db76d4562 100644 goto err; } umem->buffer = base_addr; -@@ -1099,7 +1096,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1099,7 +1150,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id()); if (umem == NULL) { @@ -17086,7 +33639,7 @@ index 96c2c9d939..9db76d4562 100644 return NULL; } -@@ -1135,7 +1132,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1135,7 +1186,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, &usr_config); if (ret) { @@ -17095,7 +33648,13 @@ index 96c2c9d939..9db76d4562 100644 goto err; } umem->mz = mz; -@@ -1151,13 +1148,13 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, +@@ -1148,16 +1199,19 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + return NULL; + } + ++#pragma GCC diagnostic push ++#pragma GCC diagnostic ignored "-Wdeprecated-declarations" ++ static int load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) { @@ -17113,7 +33672,25 @@ index 96c2c9d939..9db76d4562 100644 } /* -@@ -1269,18 +1266,19 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1171,7 +1225,7 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) + } + + /* Link the program with the given network device */ +- ret = bpf_set_link_xdp_fd(if_index, prog_fd, ++ ret = link_xdp_prog_with_dev(if_index, prog_fd, + XDP_FLAGS_UPDATE_IF_NOEXIST); + if (ret) { + AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n", +@@ -1185,6 +1239,8 @@ load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map) + return 0; + } + ++#pragma GCC diagnostic pop ++ + /* Detect support for busy polling through setsockopt(). */ + static int + configure_preferred_busy_poll(struct pkt_rx_queue *rxq) +@@ -1269,18 +1325,19 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, cfg.bind_flags |= XDP_USE_NEED_WAKEUP; #endif @@ -17144,7 +33721,7 @@ index 96c2c9d939..9db76d4562 100644 } if (internals->shared_umem) -@@ -1294,7 +1292,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1294,7 +1351,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, if (ret) { AF_XDP_LOG(ERR, "Failed to create xsk socket.\n"); @@ -17153,7 +33730,7 @@ index 96c2c9d939..9db76d4562 100644 } /* insert the xsk into the xsks_map */ -@@ -1306,7 +1304,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1306,7 +1363,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, &rxq->xsk_queue_idx, &fd, 0); if (err) { AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n"); @@ -17162,7 +33739,7 @@ index 96c2c9d939..9db76d4562 100644 } } -@@ -1314,7 +1312,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1314,7 +1371,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size); if (ret) { AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n"); @@ -17171,7 +33748,7 @@ index 96c2c9d939..9db76d4562 100644 } #endif -@@ -1322,20 +1320,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, +@@ -1322,20 +1379,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, ret = configure_preferred_busy_poll(rxq); if (ret) { AF_XDP_LOG(ERR, "Failed configure busy polling.\n"); @@ -17224,7 +33801,7 @@ index 1c03e8bfa1..3a028f4290 100644 * @return * void diff --git a/dpdk/drivers/net/atlantic/atl_rxtx.c b/dpdk/drivers/net/atlantic/atl_rxtx.c -index e3f57ded73..aeb79bf5a2 100644 +index e3f57ded73..cb6f8141a8 100644 --- a/dpdk/drivers/net/atlantic/atl_rxtx.c +++ b/dpdk/drivers/net/atlantic/atl_rxtx.c @@ -1094,7 +1094,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -17236,6 +33813,19 @@ index e3f57ded73..aeb79bf5a2 100644 * hardware point of view... */ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); +@@ -1127,10 +1127,9 @@ atl_xmit_cleanup(struct atl_tx_queue *txq) + if (txq != NULL) { + sw_ring = txq->sw_ring; + int head = txq->tx_head; +- int cnt; +- int i; ++ int cnt = head; + +- for (i = 0, cnt = head; ; i++) { ++ while (true) { + txd = &txq->hw_ring[cnt]; + + if (txd->dd) diff --git a/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c b/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c index 7d0e724019..d0eb4af928 100644 --- a/dpdk/drivers/net/atlantic/hw_atl/hw_atl_b0.c @@ -17249,6 +33839,50 @@ index 7d0e724019..d0eb4af928 100644 hw_atl_rpf_vlan_prom_mode_en_set(self, 1); /* Rx Interrupts */ +diff --git a/dpdk/drivers/net/avp/avp_ethdev.c b/dpdk/drivers/net/avp/avp_ethdev.c +index 7ac55584ff..4676c19a78 100644 +--- a/dpdk/drivers/net/avp/avp_ethdev.c ++++ b/dpdk/drivers/net/avp/avp_ethdev.c +@@ -2036,6 +2036,7 @@ static int + avp_dev_start(struct rte_eth_dev *eth_dev) + { + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); ++ uint16_t i; + int ret; + + rte_spinlock_lock(&avp->lock); +@@ -2056,6 +2057,11 @@ avp_dev_start(struct rte_eth_dev *eth_dev) + /* remember current link state */ + avp->flags |= AVP_F_LINKUP; + ++ for (i = 0; i < avp->num_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < avp->num_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + ret = 0; + + unlock: +@@ -2067,6 +2073,7 @@ static int + avp_dev_stop(struct rte_eth_dev *eth_dev) + { + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); ++ uint16_t i; + int ret; + + rte_spinlock_lock(&avp->lock); +@@ -2086,6 +2093,11 @@ avp_dev_stop(struct rte_eth_dev *eth_dev) + ret); + } + ++ for (i = 0; i < avp->num_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < avp->num_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + unlock: + rte_spinlock_unlock(&avp->lock); + return ret; diff --git a/dpdk/drivers/net/axgbe/axgbe_dev.c b/dpdk/drivers/net/axgbe/axgbe_dev.c index daeb3308f4..6a7fddffca 100644 --- a/dpdk/drivers/net/axgbe/axgbe_dev.c @@ -17397,6 +34031,241 @@ index 02236ec192..72104f8a3f 100644 */ } +diff --git a/dpdk/drivers/net/axgbe/axgbe_rxtx.c b/dpdk/drivers/net/axgbe/axgbe_rxtx.c +index 6bd41d3002..1de5b29f06 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_rxtx.c ++++ b/dpdk/drivers/net/axgbe/axgbe_rxtx.c +@@ -341,20 +341,19 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, + struct axgbe_rx_queue *rxq = rx_queue; + volatile union axgbe_rx_desc *desc; + +- uint64_t old_dirty = rxq->dirty; + struct rte_mbuf *first_seg = NULL; + struct rte_mbuf *mbuf, *tmbuf; +- unsigned int err, etlt; +- uint32_t error_status; ++ unsigned int err = 0, etlt; ++ uint32_t error_status = 0; + uint16_t idx, pidx, data_len = 0, pkt_len = 0; + uint64_t offloads; ++ bool eop = 0; + + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); ++ + while (nb_rx < nb_pkts) { +- bool eop = 0; + next_desc: +- if (unlikely(idx == rxq->nb_desc)) +- idx = 0; ++ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); + + desc = &rxq->desc[idx]; + +@@ -382,19 +381,6 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, + } + + mbuf = rxq->sw_ring[idx]; +- /* Check for any errors and free mbuf*/ +- err = AXGMAC_GET_BITS_LE(desc->write.desc3, +- RX_NORMAL_DESC3, ES); +- error_status = 0; +- if (unlikely(err)) { +- error_status = desc->write.desc3 & AXGBE_ERR_STATUS; +- if ((error_status != AXGBE_L3_CSUM_ERR) +- && (error_status != AXGBE_L4_CSUM_ERR)) { +- rxq->errors++; +- rte_pktmbuf_free(mbuf); +- goto err_set; +- } +- } + rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); + + if (!AXGMAC_GET_BITS_LE(desc->write.desc3, +@@ -405,76 +391,112 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, + } else { + eop = 1; + pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, +- RX_NORMAL_DESC3, PL); +- data_len = pkt_len - rxq->crc_len; ++ RX_NORMAL_DESC3, PL) - rxq->crc_len; ++ data_len = pkt_len % rxq->buf_size; ++ /* Check for any errors and free mbuf*/ ++ err = AXGMAC_GET_BITS_LE(desc->write.desc3, ++ RX_NORMAL_DESC3, ES); ++ error_status = 0; ++ if (unlikely(err)) { ++ error_status = desc->write.desc3 & ++ AXGBE_ERR_STATUS; ++ if (error_status != AXGBE_L3_CSUM_ERR && ++ error_status != AXGBE_L4_CSUM_ERR) { ++ rxq->errors++; ++ rte_pktmbuf_free(mbuf); ++ rte_pktmbuf_free(first_seg); ++ first_seg = NULL; ++ eop = 0; ++ goto err_set; ++ } ++ } ++ ++ } ++ /* Mbuf populate */ ++ mbuf->data_off = RTE_PKTMBUF_HEADROOM; ++ mbuf->data_len = data_len; ++ mbuf->pkt_len = data_len; ++ ++ if (rxq->saved_mbuf) { ++ first_seg = rxq->saved_mbuf; ++ rxq->saved_mbuf = NULL; + } + + if (first_seg != NULL) { +- if (rte_pktmbuf_chain(first_seg, mbuf) != 0) +- rte_mempool_put(rxq->mb_pool, +- first_seg); ++ if (rte_pktmbuf_chain(first_seg, mbuf) != 0) { ++ rte_pktmbuf_free(first_seg); ++ first_seg = NULL; ++ rte_pktmbuf_free(mbuf); ++ rxq->saved_mbuf = NULL; ++ rxq->errors++; ++ eop = 0; ++ break; ++ } + } else { + first_seg = mbuf; + } + + /* Get the RSS hash */ + if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) +- mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); ++ first_seg->hash.rss = ++ rte_le_to_cpu_32(desc->write.desc1); + etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ETLT); + offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; + if (!err || !etlt) { + if (etlt == RX_CVLAN_TAG_PRESENT) { +- mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; +- mbuf->vlan_tci = ++ first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN; ++ first_seg->vlan_tci = + AXGMAC_GET_BITS_LE(desc->write.desc0, + RX_NORMAL_DESC0, OVT); + if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) +- mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED; ++ first_seg->ol_flags |= ++ RTE_MBUF_F_RX_VLAN_STRIPPED; + else +- mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED; ++ first_seg->ol_flags &= ++ ~RTE_MBUF_F_RX_VLAN_STRIPPED; + } else { +- mbuf->ol_flags &= +- ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED); +- mbuf->vlan_tci = 0; ++ first_seg->ol_flags &= ++ ~(RTE_MBUF_F_RX_VLAN | ++ RTE_MBUF_F_RX_VLAN_STRIPPED); ++ first_seg->vlan_tci = 0; + } + } +- /* Mbuf populate */ +- mbuf->data_off = RTE_PKTMBUF_HEADROOM; +- mbuf->data_len = data_len; + + err_set: + rxq->cur++; +- rxq->sw_ring[idx++] = tmbuf; ++ rxq->sw_ring[idx] = tmbuf; + desc->read.baddr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); + memset((void *)(&desc->read.desc2), 0, 8); + AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); +- rxq->dirty++; + +- if (!eop) { +- rte_pktmbuf_free(mbuf); ++ if (!eop) + goto next_desc; +- } ++ eop = 0; + +- first_seg->pkt_len = pkt_len; + rxq->bytes += pkt_len; +- mbuf->next = NULL; + + first_seg->port = rxq->port_id; + if (rxq->pdata->rx_csum_enable) { +- mbuf->ol_flags = 0; +- mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; +- mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; ++ first_seg->ol_flags = 0; ++ first_seg->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; ++ first_seg->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { +- mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; +- mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; +- mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; +- mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; ++ first_seg->ol_flags &= ++ ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; ++ first_seg->ol_flags |= ++ RTE_MBUF_F_RX_IP_CKSUM_BAD; ++ first_seg->ol_flags &= ++ ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; ++ first_seg->ol_flags |= ++ RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; + } else if (unlikely(error_status + == AXGBE_L4_CSUM_ERR)) { +- mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; +- mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; ++ first_seg->ol_flags &= ++ ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; ++ first_seg->ol_flags |= ++ RTE_MBUF_F_RX_L4_CKSUM_BAD; + } + } + +@@ -484,15 +506,20 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, + first_seg = NULL; + } + ++ /* Check if we need to save state before leaving */ ++ if (first_seg != NULL && eop == 0) ++ rxq->saved_mbuf = first_seg; ++ + /* Save receive context.*/ + rxq->pkts += nb_rx; + +- if (rxq->dirty != old_dirty) { ++ if (rxq->dirty != rxq->cur) { + rte_wmb(); +- idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); ++ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur - 1); + AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, + low32_value(rxq->ring_phys_addr + + (idx * sizeof(union axgbe_rx_desc)))); ++ rxq->dirty = rxq->cur; + } + return nb_rx; + } +diff --git a/dpdk/drivers/net/axgbe/axgbe_rxtx.h b/dpdk/drivers/net/axgbe/axgbe_rxtx.h +index 2a330339cd..2da3095547 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_rxtx.h ++++ b/dpdk/drivers/net/axgbe/axgbe_rxtx.h +@@ -65,6 +65,12 @@ struct axgbe_rx_queue { + uint16_t crc_len; + /* address of s/w rx buffers */ + struct rte_mbuf **sw_ring; ++ ++ /* For segemented packets - save the current state ++ * of packet, if next descriptor is not ready yet ++ */ ++ struct rte_mbuf *saved_mbuf; ++ + /* Port private data */ + struct axgbe_port *pdata; + /* Number of Rx descriptors in queue */ diff --git a/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c index 816371cd79..d95a446bef 100644 --- a/dpdk/drivers/net/axgbe/axgbe_rxtx_vec_sse.c @@ -17618,6 +34487,50 @@ index 80d19cbfd6..d7e1729e68 100644 * in ports on which storage services where never requested. */ #define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc)) +diff --git a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c +index f36ad30e17..e0be3c137c 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c +@@ -211,6 +211,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev) + { + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(sc); + +@@ -244,6 +245,11 @@ bnx2x_dev_start(struct rte_eth_dev *dev) + + bnx2x_print_device_info(sc); + ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return ret; + } + +@@ -252,6 +258,7 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) + { + struct bnx2x_softc *sc = dev->data->dev_private; + int ret = 0; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(sc); + +@@ -277,6 +284,11 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) + return ret; + } + ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c index 1cd972591a..c07b01510a 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c @@ -18268,7 +35181,7 @@ index a43b22a8f8..e1dcf3ac2f 100644 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: PMD_DRV_LOG(INFO, "Port conn async event\n"); diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c -index f79f33ab4e..517e4b3898 100644 +index f79f33ab4e..44fd45a4e9 100644 --- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c +++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c @@ -177,6 +177,7 @@ static int bnxt_restore_vlan_filters(struct bnxt *bp); @@ -18418,7 +35331,15 @@ index f79f33ab4e..517e4b3898 100644 dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT; dev_info->speed_capa = bnxt_get_speed_capabilities(bp); -@@ -1067,6 +1071,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) +@@ -1013,7 +1017,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, + .tx_free_thresh = 32, + .tx_rs_thresh = 32, + }; +- eth_dev->data->dev_conf.intr_conf.lsc = 1; + + dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; +@@ -1067,6 +1070,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; @@ -18426,7 +35347,7 @@ index f79f33ab4e..517e4b3898 100644 int rc; bp->rx_queues = (void *)eth_dev->data->rx_queues; -@@ -1141,6 +1146,17 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) +@@ -1141,6 +1145,17 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; eth_dev->data->dev_conf.rxmode.offloads = rx_offloads; @@ -18444,7 +35365,7 @@ index f79f33ab4e..517e4b3898 100644 bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu); return 0; -@@ -1336,6 +1352,11 @@ static int bnxt_handle_if_change_status(struct bnxt *bp) +@@ -1336,6 +1351,11 @@ static int bnxt_handle_if_change_status(struct bnxt *bp) /* clear fatal flag so that re-init happens */ bp->flags &= ~BNXT_FLAG_FATAL_ERROR; @@ -18456,7 +35377,24 @@ index f79f33ab4e..517e4b3898 100644 rc = bnxt_init_resources(bp, true); bp->flags &= ~BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; -@@ -1525,7 +1546,7 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) +@@ -1460,6 +1480,7 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; + struct rte_eth_link link; ++ uint16_t i; + int ret; + + eth_dev->data->dev_started = 0; +@@ -1521,11 +1542,16 @@ static int bnxt_dev_stop(struct rte_eth_dev *eth_dev) + + eth_dev->data->scattered_rx = 0; + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; } /* Unload the driver, release resources */ @@ -18465,7 +35403,7 @@ index f79f33ab4e..517e4b3898 100644 { struct bnxt *bp = eth_dev->data->dev_private; -@@ -1541,18 +1562,13 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) +@@ -1541,18 +1567,13 @@ static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) return bnxt_dev_stop(eth_dev); } @@ -18485,7 +35423,15 @@ index f79f33ab4e..517e4b3898 100644 if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) PMD_DRV_LOG(ERR, "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", -@@ -1673,6 +1689,7 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) +@@ -1639,6 +1660,7 @@ static void bnxt_drv_uninit(struct bnxt *bp) + bnxt_free_link_info(bp); + bnxt_free_parent_info(bp); + bnxt_uninit_locks(bp); ++ bnxt_free_rep_info(bp); + + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + bp->tx_mem_zone = NULL; +@@ -1673,6 +1695,7 @@ static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp); rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp); bnxt_cancel_fc_thread(bp); @@ -18493,7 +35439,7 @@ index f79f33ab4e..517e4b3898 100644 if (eth_dev->data->dev_started) ret = bnxt_dev_stop(eth_dev); -@@ -1812,6 +1829,14 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) +@@ -1812,6 +1835,14 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) if (bp->link_info == NULL) goto out; @@ -18508,7 +35454,7 @@ index f79f33ab4e..517e4b3898 100644 do { /* Retrieve link info from hardware */ rc = bnxt_get_hwrm_link_config(bp, &new); -@@ -1829,12 +1854,6 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) +@@ -1829,12 +1860,6 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); } while (cnt--); @@ -18521,7 +35467,7 @@ index f79f33ab4e..517e4b3898 100644 out: /* Timed out or success */ if (new.link_status != eth_dev->data->dev_link.link_status || -@@ -2125,11 +2144,6 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, +@@ -2125,11 +2150,6 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, return -EINVAL; } @@ -18533,7 +35479,7 @@ index f79f33ab4e..517e4b3898 100644 /* Update the default RSS VNIC(s) */ vnic = BNXT_GET_DEFAULT_VNIC(bp); vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf); -@@ -2137,6 +2151,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, +@@ -2137,6 +2157,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf, RTE_ETH_RSS_LEVEL(rss_conf->rss_hf)); @@ -18543,7 +35489,7 @@ index f79f33ab4e..517e4b3898 100644 /* * If hashkey is not specified, use the previously configured * hashkey -@@ -2152,6 +2169,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, +@@ -2152,6 +2175,9 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, } memcpy(vnic->rss_hash_key, rss_conf->rss_key, rss_conf->rss_key_len); @@ -18553,7 +35499,7 @@ index f79f33ab4e..517e4b3898 100644 rss_config: rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); return rc; -@@ -2831,9 +2851,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, +@@ -2831,9 +2857,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, uint32_t nb_mc_addr) { struct bnxt *bp = eth_dev->data->dev_private; @@ -18564,7 +35510,7 @@ index f79f33ab4e..517e4b3898 100644 int rc; rc = is_bnxt_in_error(bp); -@@ -2842,6 +2861,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, +@@ -2842,6 +2867,8 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, vnic = BNXT_GET_DEFAULT_VNIC(bp); @@ -18573,7 +35519,7 @@ index f79f33ab4e..517e4b3898 100644 if (nb_mc_addr > BNXT_MAX_MC_ADDRS) { vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; goto allmulti; -@@ -2849,14 +2870,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, +@@ -2849,14 +2876,10 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, /* TODO Check for Duplicate mcast addresses */ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; @@ -18591,7 +35537,7 @@ index f79f33ab4e..517e4b3898 100644 vnic->flags |= BNXT_VNIC_INFO_MCAST; else vnic->flags &= ~BNXT_VNIC_INFO_MCAST; -@@ -3003,9 +3020,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, +@@ -3003,9 +3026,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) { @@ -18601,14 +35547,14 @@ index f79f33ab4e..517e4b3898 100644 uint32_t rc; uint32_t i; -@@ -3013,35 +3028,25 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) +@@ -3013,35 +3034,25 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) if (rc) return rc; + /* Return if port is active */ + if (eth_dev->data->dev_started) { + PMD_DRV_LOG(ERR, "Stop port before changing MTU\n"); -+ return -EPERM; ++ return -EBUSY; + } + /* Exit if receive queues are not configured yet */ @@ -18647,7 +35593,7 @@ index f79f33ab4e..517e4b3898 100644 for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; uint16_t size = 0; -@@ -4264,6 +4269,18 @@ static int bnxt_restore_mac_filters(struct bnxt *bp) +@@ -4264,6 +4275,18 @@ static int bnxt_restore_mac_filters(struct bnxt *bp) return 0; } @@ -18666,7 +35612,7 @@ index f79f33ab4e..517e4b3898 100644 static int bnxt_restore_filters(struct bnxt *bp) { struct rte_eth_dev *dev = bp->eth_dev; -@@ -4284,14 +4301,21 @@ static int bnxt_restore_filters(struct bnxt *bp) +@@ -4284,14 +4307,21 @@ static int bnxt_restore_filters(struct bnxt *bp) if (ret) return ret; @@ -18690,7 +35636,7 @@ index f79f33ab4e..517e4b3898 100644 int rc = 0; do { -@@ -4345,16 +4369,16 @@ static void bnxt_dev_recover(void *arg) +@@ -4345,16 +4375,16 @@ static void bnxt_dev_recover(void *arg) goto err_start; } @@ -18711,7 +35657,7 @@ index f79f33ab4e..517e4b3898 100644 PMD_DRV_LOG(INFO, "Port: %u Recovered from FW reset\n", bp->eth_dev->data->port_id); pthread_mutex_unlock(&bp->err_recovery_lock); -@@ -4985,11 +5009,15 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp) +@@ -4985,11 +5015,15 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp) static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; @@ -18729,7 +35675,7 @@ index f79f33ab4e..517e4b3898 100644 0); if (eth_dev->data->mac_addrs == NULL) { PMD_DRV_LOG(ERR, "Failed to alloc MAC addr tbl\n"); -@@ -5016,6 +5044,23 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) +@@ -5016,6 +5050,23 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev) /* Copy the permanent MAC from the FUNC_QCAPS response */ memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, RTE_ETHER_ADDR_LEN); @@ -18753,7 +35699,7 @@ index f79f33ab4e..517e4b3898 100644 return rc; } -@@ -5178,10 +5223,6 @@ static int bnxt_get_config(struct bnxt *bp) +@@ -5178,10 +5229,6 @@ static int bnxt_get_config(struct bnxt *bp) if (rc) return rc; @@ -18764,7 +35710,7 @@ index f79f33ab4e..517e4b3898 100644 bnxt_hwrm_port_mac_qcfg(bp); bnxt_hwrm_parent_pf_qcfg(bp); -@@ -5229,6 +5270,25 @@ bnxt_init_locks(struct bnxt *bp) +@@ -5229,6 +5276,25 @@ bnxt_init_locks(struct bnxt *bp) return err; } @@ -18790,7 +35736,7 @@ index f79f33ab4e..517e4b3898 100644 static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) { int rc = 0; -@@ -5237,6 +5297,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) +@@ -5237,6 +5303,10 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) if (rc) return rc; @@ -18801,7 +35747,7 @@ index f79f33ab4e..517e4b3898 100644 if (!reconfig_dev) { rc = bnxt_setup_mac_addr(bp->eth_dev); if (rc) -@@ -5272,6 +5336,16 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) +@@ -5272,6 +5342,16 @@ static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev) } } @@ -18818,7 +35764,7 @@ index f79f33ab4e..517e4b3898 100644 rc = bnxt_alloc_mem(bp, reconfig_dev); if (rc) return rc; -@@ -5666,24 +5740,6 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) +@@ -5666,24 +5746,6 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs) return ret; } @@ -18843,7 +35789,7 @@ index f79f33ab4e..517e4b3898 100644 /* Allocate and initialize various fields in bnxt struct that * need to be allocated/destroyed only once in the lifetime of the driver */ -@@ -5760,10 +5816,6 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev) +@@ -5760,10 +5822,6 @@ static int bnxt_drv_init(struct rte_eth_dev *eth_dev) if (rc) return rc; @@ -18854,7 +35800,15 @@ index f79f33ab4e..517e4b3898 100644 return rc; } -@@ -5916,6 +5968,10 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) +@@ -5794,6 +5852,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) + + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; ++ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + + bp = eth_dev->data->dev_private; + +@@ -5916,14 +5975,16 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev) if (!reconfig_dev) { bnxt_free_hwrm_resources(bp); bnxt_free_error_recovery_info(bp); @@ -18865,7 +35819,16 @@ index f79f33ab4e..517e4b3898 100644 } bnxt_uninit_ctx_mem(bp); -@@ -6302,4 +6358,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev) + + bnxt_free_flow_stats_info(bp); +- if (bp->rep_info != NULL) +- bnxt_free_switch_domain(bp); +- bnxt_free_rep_info(bp); ++ bnxt_free_switch_domain(bp); + rte_free(bp->ptp_cfg); + bp->ptp_cfg = NULL; + return rc; +@@ -6302,4 +6363,4 @@ bool is_bnxt_supported(struct rte_eth_dev *dev) RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE); RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map); @@ -19030,7 +35993,7 @@ index d062be5525..8bdf2405f0 100644 if (!ret || update_flow) { flow->filter = filter; diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c -index f53f8632fe..9c5257309a 100644 +index f53f8632fe..51e1e2d6b3 100644 --- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c +++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c @@ -506,8 +506,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, @@ -19354,6 +36317,15 @@ index f53f8632fe..9c5257309a 100644 } static int32_t +@@ -4514,7 +4535,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) + uint16_t duration = 0; + int rc, i; + +- if (!bp->leds->num_leds || BNXT_VF(bp)) ++ if (BNXT_VF(bp) || !bp->leds || !bp->leds->num_leds) + return -EOPNOTSUPP; + + HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB); @@ -6106,38 +6127,6 @@ int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp) return rc; } @@ -19623,7 +36595,7 @@ index dc437f314e..4cdbb177d9 100644 bp->async_cp_ring = cpr; cpr->cp_ring_struct = ring; diff --git a/dpdk/drivers/net/bnxt/bnxt_rxq.c b/dpdk/drivers/net/bnxt/bnxt_rxq.c -index 1456f8b54f..4f2e0e7376 100644 +index 1456f8b54f..c38be2f348 100644 --- a/dpdk/drivers/net/bnxt/bnxt_rxq.c +++ b/dpdk/drivers/net/bnxt/bnxt_rxq.c @@ -20,6 +20,32 @@ @@ -19735,7 +36707,20 @@ index 1456f8b54f..4f2e0e7376 100644 return 0; err: bnxt_rx_queue_release_op(eth_dev, queue_idx); -@@ -472,10 +484,11 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -460,6 +472,12 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -EINVAL; + } + ++ /* reset the previous stats for the rx_queue since the counters ++ * will be cleared when the queue is started. ++ */ ++ memset(&bp->prev_rx_ring_stats[rx_queue_id], 0, ++ sizeof(struct bnxt_ring_stats)); ++ + /* Set the queue state to started here. + * We check the status of the queue while posting buffer. + * If queue is it started, we do not post buffers for Rx. +@@ -472,10 +490,11 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (rc) return rc; @@ -19751,7 +36736,7 @@ index 1456f8b54f..4f2e0e7376 100644 PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id); if (dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { -@@ -574,6 +587,9 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -574,6 +593,9 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (active_queue_cnt == 0) { uint16_t saved_mru = vnic->mru; @@ -19772,10 +36757,18 @@ index 0331c23810..287df8dff3 100644 +uint64_t bnxt_get_rx_port_offloads(struct bnxt *bp); #endif diff --git a/dpdk/drivers/net/bnxt/bnxt_rxr.c b/dpdk/drivers/net/bnxt/bnxt_rxr.c -index 44247d7200..b60c2470f3 100644 +index 44247d7200..4e56c7c99f 100644 --- a/dpdk/drivers/net/bnxt/bnxt_rxr.c +++ b/dpdk/drivers/net/bnxt/bnxt_rxr.c -@@ -824,6 +824,9 @@ void bnxt_set_mark_in_mbuf(struct bnxt *bp, +@@ -813,7 +813,6 @@ bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1, + + skip_mark: + mbuf->hash.fdir.hi = 0; +- mbuf->hash.fdir.id = 0; + + return 0; + } +@@ -824,6 +823,9 @@ void bnxt_set_mark_in_mbuf(struct bnxt *bp, { uint32_t cfa_code = 0; @@ -19785,7 +36778,7 @@ index 44247d7200..b60c2470f3 100644 cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); if (!cfa_code) return; -@@ -1408,6 +1411,9 @@ int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr) +@@ -1408,6 +1410,9 @@ int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr) cons = RING_CMP(cpr->cp_ring_struct, raw_cons); rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; @@ -20022,10 +37015,23 @@ index 67fd4cbebb..f3a03812ad 100644 +uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp); #endif diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c -index e2b7e40571..3b8f2382f9 100644 +index e2b7e40571..ec63b97fe2 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txr.c +++ b/dpdk/drivers/net/bnxt/bnxt_txr.c -@@ -602,6 +602,9 @@ int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr) +@@ -551,6 +551,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) + if (rc) + return rc; + ++ /* reset the previous stats for the tx_queue since the counters ++ * will be cleared when the queue is started. ++ */ ++ memset(&bp->prev_tx_ring_stats[tx_queue_id], 0, ++ sizeof(struct bnxt_ring_stats)); ++ + bnxt_free_hwrm_tx_ring(bp, tx_queue_id); + rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id); + if (rc) +@@ -602,6 +608,9 @@ int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr) cons = RING_CMPL(ring_mask, raw_cons); txcmp = (struct tx_cmpl *)&cp_desc_ring[cons]; @@ -20304,6 +37310,26 @@ index dd0a347058..5a99c7a06e 100644 * * NOTE: Also performs virt2phy address conversion by default thus is * can be expensive to invoke. +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h +index b27678dae9..2b02836a40 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h ++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h +@@ -203,13 +203,13 @@ ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, + * Flush all flows in the flow database. + * + * ulp_ctxt [in] Ptr to ulp context +- * tbl_idx [in] The index to table ++ * flow_type [in] - specify default or regular + * + * returns 0 on success or negative number on failure + */ + int32_t + ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, +- uint32_t idx); ++ enum bnxt_ulp_fdb_type flow_type); + + /* + * Flush all flows in the flow database that belong to a device function. diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index f4274dd634..9edf3e8799 100644 --- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -20362,7 +37388,7 @@ index 8b104b6391..8222e3cd38 100644 slave_remove(struct bond_dev_private *internals, struct rte_eth_dev *slave_eth_dev); diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c -index ca50583d62..b3cddd8a20 100644 +index ca50583d62..e4eb113927 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -243,7 +243,7 @@ record_default(struct port *port) @@ -20416,7 +37442,22 @@ index ca50583d62..b3cddd8a20 100644 ACTOR_STATE_CLR(port, SYNCHRONIZATION); MODE4_DEBUG("Out of sync -> ATTACHED\n"); } -@@ -696,7 +696,7 @@ selection_logic(struct bond_dev_private *internals, uint16_t slave_id) +@@ -652,12 +652,9 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) + } + + static uint16_t +-max_index(uint64_t *a, int n) ++max_index(uint64_t *a, uint16_t n) + { +- if (n <= 0) +- return -1; +- +- int i, max_i = 0; ++ uint16_t i, max_i = 0; + uint64_t max = a[0]; + + for (i = 1; i < n; ++i) { +@@ -696,7 +693,7 @@ selection_logic(struct bond_dev_private *internals, uint16_t slave_id) /* Search for aggregator suitable for this port */ for (i = 0; i < slaves_count; ++i) { agg = &bond_mode_8023ad_ports[slaves[i]]; @@ -20425,7 +37466,7 @@ index ca50583d62..b3cddd8a20 100644 if (agg->aggregator_port_id != slaves[i]) continue; -@@ -921,7 +921,7 @@ bond_mode_8023ad_periodic_cb(void *arg) +@@ -921,7 +918,7 @@ bond_mode_8023ad_periodic_cb(void *arg) SM_FLAG_SET(port, BEGIN); @@ -20434,7 +37475,7 @@ index ca50583d62..b3cddd8a20 100644 if (SM_FLAG(port, LACP_ENABLED)) { /* If port was enabled set it to BEGIN state */ SM_FLAG_CLR(port, LACP_ENABLED); -@@ -1069,7 +1069,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, +@@ -1069,7 +1066,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION; port->sm_flags = SM_FLAGS_BEGIN; @@ -20444,7 +37485,7 @@ index ca50583d62..b3cddd8a20 100644 if (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) { diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h -index 11a71a55e5..7eb392f8c8 100644 +index 11a71a55e5..025bd0ec54 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h +++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -68,7 +68,7 @@ struct port_params { @@ -20456,7 +37497,18 @@ index 11a71a55e5..7eb392f8c8 100644 uint16_t port_priority; /**< Priority of this (unused in current implementation) */ uint16_t port_number; -@@ -317,7 +317,7 @@ rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id); +@@ -197,10 +197,6 @@ int + rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id, + struct rte_eth_bond_8023ad_slave_info *conf); + +-#ifdef __cplusplus +-} +-#endif +- + /** + * Configure a slave port to start collecting. + * +@@ -317,7 +313,7 @@ rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id); * @param port_id Bonding device id * * @return @@ -20465,6 +37517,16 @@ index 11a71a55e5..7eb392f8c8 100644 */ int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id); +@@ -331,4 +327,9 @@ rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id); + int + rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id, + enum rte_bond_8023ad_agg_selection agg_selection); ++ ++#ifdef __cplusplus ++} ++#endif ++ + #endif /* RTE_ETH_BOND_8023AD_H_ */ diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_alb.h b/dpdk/drivers/net/bonding/rte_eth_bond_alb.h index 386e70c594..4e9aeda9bc 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_alb.h @@ -20479,7 +37541,7 @@ index 386e70c594..4e9aeda9bc 100644 uint16_t bond_mode_alb_arp_upd(struct client_data *client_info, diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_api.c b/dpdk/drivers/net/bonding/rte_eth_bond_api.c -index 84943cffe2..919c580fb8 100644 +index 84943cffe2..6cdef1cd2d 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_api.c +++ b/dpdk/drivers/net/bonding/rte_eth_bond_api.c @@ -375,7 +375,7 @@ eth_bond_slave_inherit_dev_info_rx_next(struct bond_dev_private *internals, @@ -20500,7 +37562,19 @@ index 84943cffe2..919c580fb8 100644 */ txconf_i->offloads = (txconf_i->offloads | txconf->offloads) & internals->tx_queue_offload_capa; -@@ -566,6 +566,12 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) +@@ -541,6 +541,11 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) + return ret; + } + ++ /* Bond mode Broadcast & 8023AD don't support MBUF_FAST_FREE offload. */ ++ if (internals->mode == BONDING_MODE_8023AD || ++ internals->mode == BONDING_MODE_BROADCAST) ++ internals->tx_offload_capa &= ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; ++ + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= + internals->flow_type_rss_offloads; + +@@ -566,6 +571,12 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id) slave_port_id); return -1; } @@ -20513,7 +37587,7 @@ index 84943cffe2..919c580fb8 100644 } /* Update all slave devices MACs */ -@@ -668,7 +674,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, +@@ -668,7 +679,7 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, } if (slave_idx < 0) { @@ -20522,11 +37596,126 @@ index 84943cffe2..919c580fb8 100644 internals->slave_count); return -1; } +@@ -698,6 +709,16 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id, + } + } + ++ /* Remove the dedicated queues flow */ ++ if (internals->mode == BONDING_MODE_8023AD && ++ internals->mode4.dedicated_queues.enabled == 1 && ++ internals->mode4.dedicated_queues.flow[slave_port_id] != NULL) { ++ rte_flow_destroy(slave_port_id, ++ internals->mode4.dedicated_queues.flow[slave_port_id], ++ &flow_error); ++ internals->mode4.dedicated_queues.flow[slave_port_id] = NULL; ++ } ++ + slave_eth_dev = &rte_eth_devices[slave_port_id]; + slave_remove(internals, slave_eth_dev); + slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_args.c b/dpdk/drivers/net/bonding/rte_eth_bond_args.c +index 5406e1c934..a20a461159 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_args.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_args.c +@@ -211,6 +211,12 @@ bond_ethdev_parse_socket_id_kvarg(const char *key __rte_unused, + if (*endptr != 0 || errno != 0) + return -1; + ++ /* SOCKET_ID_ANY also consider a valid socket id */ ++ if ((int8_t)socket_id == SOCKET_ID_ANY) { ++ *(int *)extra_args = SOCKET_ID_ANY; ++ return 0; ++ } ++ + /* validate socket id value */ + if (socket_id >= 0 && socket_id < RTE_MAX_NUMA_NODES) { + *(int *)extra_args = (int)socket_id; diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c -index 84f4900ee5..9b3acde46c 100644 +index 84f4900ee5..7d957133e3 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c -@@ -1318,7 +1318,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, +@@ -82,7 +82,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + bufs + num_rx_total, nb_pkts); + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; +- if (++active_slave == slave_count) ++ if (++active_slave >= slave_count) + active_slave = 0; + } + +@@ -198,7 +198,7 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, + if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues || + slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) { + RTE_BOND_LOG(ERR, +- "%s: Slave %d capabilities doesn't allow to allocate additional queues", ++ "%s: Slave %d capabilities doesn't allow allocating additional queues", + __func__, slave_port); + return -1; + } +@@ -271,6 +271,24 @@ bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { + return 0; + } + ++static bool ++is_bond_mac_addr(const struct rte_ether_addr *ea, ++ const struct rte_ether_addr *mac_addrs, uint32_t max_mac_addrs) ++{ ++ uint32_t i; ++ ++ for (i = 0; i < max_mac_addrs; i++) { ++ /* skip zero address */ ++ if (rte_is_zero_ether_addr(&mac_addrs[i])) ++ continue; ++ ++ if (rte_is_same_ether_addr(ea, &mac_addrs[i])) ++ return true; ++ } ++ ++ return false; ++} ++ + static inline uint16_t + rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, + bool dedicated_rxq) +@@ -331,8 +349,9 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, + /* Remove packet from array if: + * - it is slow packet but no dedicated rxq is present, + * - slave is not in collecting state, +- * - bonding interface is not in promiscuous mode: +- * - packet is unicast and address does not match, ++ * - bonding interface is not in promiscuous mode and ++ * packet address isn't in mac_addrs array: ++ * - packet is unicast, + * - packet is multicast and bonding interface + * is not in allmulti, + */ +@@ -342,12 +361,10 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, + bufs[j])) || + !collecting || + (!promisc && +- ((rte_is_unicast_ether_addr(&hdr->dst_addr) && +- !rte_is_same_ether_addr(bond_mac, +- &hdr->dst_addr)) || +- (!allmulti && +- rte_is_multicast_ether_addr(&hdr->dst_addr)))))) { +- ++ !is_bond_mac_addr(&hdr->dst_addr, bond_mac, ++ BOND_MAX_MAC_ADDRS) && ++ (rte_is_unicast_ether_addr(&hdr->dst_addr) || ++ !allmulti)))) { + if (hdr->ether_type == ether_type_slow_be) { + bond_mode_8023ad_handle_slow_pkt( + internals, slaves[idx], bufs[j]); +@@ -768,7 +785,7 @@ burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts, + ((char *)ipv4_hdr + + ip_hdr_offset); + if ((size_t)tcp_hdr + sizeof(*tcp_hdr) +- < pkt_end) ++ <= pkt_end) + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv4_hdr->next_proto_id == + IPPROTO_UDP) { +@@ -1318,7 +1335,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, /* Increment reference count on mbufs */ for (i = 0; i < nb_pkts; i++) @@ -20535,7 +37724,7 @@ index 84f4900ee5..9b3acde46c 100644 /* Transmit burst on each active slave */ for (i = 0; i < num_of_slaves; i++) { -@@ -1554,7 +1554,7 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) +@@ -1554,7 +1571,7 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) } int @@ -20544,7 +37733,7 @@ index 84f4900ee5..9b3acde46c 100644 { struct bond_dev_private *internals; -@@ -1678,14 +1678,10 @@ int +@@ -1678,14 +1695,10 @@ int slave_configure(struct rte_eth_dev *bonded_eth_dev, struct rte_eth_dev *slave_eth_dev) { @@ -20559,7 +37748,7 @@ index 84f4900ee5..9b3acde46c 100644 struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; -@@ -1711,19 +1707,32 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, +@@ -1711,19 +1724,23 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; slave_eth_dev->data->dev_conf.rxmode.mq_mode = bonded_eth_dev->data->dev_conf.rxmode.mq_mode; @@ -20582,25 +37771,16 @@ index 84f4900ee5..9b3acde46c 100644 slave_eth_dev->data->dev_conf.rxmode.mtu = bonded_eth_dev->data->dev_conf.rxmode.mtu; -+ slave_eth_dev->data->dev_conf.txmode.offloads |= -+ bonded_eth_dev->data->dev_conf.txmode.offloads; -+ -+ slave_eth_dev->data->dev_conf.txmode.offloads &= -+ (bonded_eth_dev->data->dev_conf.txmode.offloads | -+ ~internals->tx_offload_capa); -+ -+ slave_eth_dev->data->dev_conf.rxmode.offloads |= -+ bonded_eth_dev->data->dev_conf.rxmode.offloads; -+ -+ slave_eth_dev->data->dev_conf.rxmode.offloads &= -+ (bonded_eth_dev->data->dev_conf.rxmode.offloads | -+ ~internals->rx_offload_capa); ++ slave_eth_dev->data->dev_conf.txmode.offloads = ++ bonded_eth_dev->data->dev_conf.txmode.offloads; + ++ slave_eth_dev->data->dev_conf.rxmode.offloads = ++ bonded_eth_dev->data->dev_conf.rxmode.offloads; + nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; -@@ -1734,14 +1743,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, +@@ -1734,14 +1751,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } } @@ -20615,7 +37795,7 @@ index 84f4900ee5..9b3acde46c 100644 /* Configure device */ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, nb_rx_queues, nb_tx_queues, -@@ -1752,6 +1753,27 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, +@@ -1752,6 +1761,27 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, return errval; } @@ -20643,7 +37823,7 @@ index 84f4900ee5..9b3acde46c 100644 /* Setup Rx Queues */ for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) { bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id]; -@@ -1799,10 +1821,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, +@@ -1799,10 +1829,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, return errval; } @@ -20659,7 +37839,7 @@ index 84f4900ee5..9b3acde46c 100644 errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev, slave_eth_dev->data->port_id); -@@ -1994,6 +2019,13 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) +@@ -1994,6 +2027,13 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) internals->slaves[i].port_id); goto out_err; } @@ -20673,7 +37853,26 @@ index 84f4900ee5..9b3acde46c 100644 /* We will need to poll for link status if any slave doesn't * support interrupts */ -@@ -2092,18 +2124,20 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) +@@ -2023,6 +2063,11 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) + internals->mode == BONDING_MODE_ALB) + bond_tlb_enable(internals); + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + + out_err: +@@ -2089,23 +2134,34 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) + eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + eth_dev->data->dev_started = 0; + ++ if (internals->link_status_polling_enabled) { ++ rte_eal_alarm_cancel(bond_ethdev_slave_link_status_change_monitor, ++ (void *)&rte_eth_devices[internals->port_id]); ++ } internals->link_status_polling_enabled = 0; for (i = 0; i < internals->slave_count; i++) { uint16_t slave_id = internals->slaves[i].port_id; @@ -20702,8 +37901,26 @@ index 84f4900ee5..9b3acde46c 100644 - } } ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ return 0; -@@ -2128,6 +2162,7 @@ bond_ethdev_close(struct rte_eth_dev *dev) + } + +@@ -2121,6 +2177,10 @@ bond_ethdev_close(struct rte_eth_dev *dev) + return 0; + + RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name); ++ ++ /* Flush flows in all back-end devices before removing them */ ++ bond_flow_ops.flush(dev, &ferror); ++ + while (internals->slave_count != skipped) { + uint16_t port_id = internals->slaves[skipped].port_id; + +@@ -2128,6 +2188,7 @@ bond_ethdev_close(struct rte_eth_dev *dev) RTE_BOND_LOG(ERR, "Failed to stop device on port %u", port_id); skipped++; @@ -20711,7 +37928,60 @@ index 84f4900ee5..9b3acde46c 100644 } if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) { -@@ -2684,6 +2719,39 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) +@@ -2137,7 +2198,6 @@ bond_ethdev_close(struct rte_eth_dev *dev) + skipped++; + } + } +- bond_flow_ops.flush(dev, &ferror); + bond_ethdev_free_queues(dev); + rte_bitmap_reset(internals->vlan_filter_bmp); + rte_bitmap_free(internals->vlan_filter_bmp); +@@ -2166,8 +2226,6 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + + uint16_t max_nb_rx_queues = UINT16_MAX; + uint16_t max_nb_tx_queues = UINT16_MAX; +- uint16_t max_rx_desc_lim = UINT16_MAX; +- uint16_t max_tx_desc_lim = UINT16_MAX; + + dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS; + +@@ -2201,12 +2259,6 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + + if (slave_info.max_tx_queues < max_nb_tx_queues) + max_nb_tx_queues = slave_info.max_tx_queues; +- +- if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim) +- max_rx_desc_lim = slave_info.rx_desc_lim.nb_max; +- +- if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim) +- max_tx_desc_lim = slave_info.tx_desc_lim.nb_max; + } + } + +@@ -2218,8 +2270,10 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + memcpy(&dev_info->default_txconf, &internals->default_txconf, + sizeof(dev_info->default_txconf)); + +- dev_info->rx_desc_lim.nb_max = max_rx_desc_lim; +- dev_info->tx_desc_lim.nb_max = max_tx_desc_lim; ++ memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim, ++ sizeof(dev_info->rx_desc_lim)); ++ memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim, ++ sizeof(dev_info->tx_desc_lim)); + + /** + * If dedicated hw queues enabled for link bonding device in LACP mode +@@ -2384,9 +2438,6 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg) + * event callback */ + if (slave_ethdev->data->dev_link.link_status != + internals->slaves[i].last_link_status) { +- internals->slaves[i].last_link_status = +- slave_ethdev->data->dev_link.link_status; +- + bond_ethdev_lsc_event_callback(internals->slaves[i].port_id, + RTE_ETH_EVENT_INTR_LSC, + &bonded_ethdev->data->port_id, +@@ -2684,6 +2735,39 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) return ret; } @@ -20751,7 +38021,7 @@ index 84f4900ee5..9b3acde46c 100644 static int bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev) { -@@ -2797,6 +2865,39 @@ bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) +@@ -2797,6 +2881,39 @@ bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) return ret; } @@ -20791,7 +38061,24 @@ index 84f4900ee5..9b3acde46c 100644 static void bond_ethdev_delayed_lsc_propagation(void *arg) { -@@ -2886,6 +2987,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, +@@ -2819,7 +2936,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, + + uint8_t lsc_flag = 0; + int valid_slave = 0; +- uint16_t active_pos; ++ uint16_t active_pos, slave_idx; + uint16_t i; + + if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL) +@@ -2840,6 +2957,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == port_id) { + valid_slave = 1; ++ slave_idx = i; + break; + } + } +@@ -2886,6 +3004,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, lsc_flag = 1; mac_address_slaves_update(bonded_eth_dev); @@ -20800,7 +38087,7 @@ index 84f4900ee5..9b3acde46c 100644 } activate_slave(bonded_eth_dev, port_id); -@@ -2915,6 +3018,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, +@@ -2915,6 +3035,8 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, else internals->current_primary_port = internals->primary_port; mac_address_slaves_update(bonded_eth_dev); @@ -20809,7 +38096,40 @@ index 84f4900ee5..9b3acde46c 100644 } } -@@ -3293,7 +3398,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) +@@ -2924,6 +3046,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, + * slaves + */ + bond_ethdev_link_update(bonded_eth_dev, 0); ++ internals->slaves[slave_idx].last_link_status = link.link_status; + + if (lsc_flag) { + /* Cancel any possible outstanding interrupts if delays are enabled */ +@@ -3213,7 +3336,7 @@ static int + bond_alloc(struct rte_vdev_device *dev, uint8_t mode) + { + const char *name = rte_vdev_device_name(dev); +- uint8_t socket_id = dev->device.numa_node; ++ int socket_id = dev->device.numa_node; + struct bond_dev_private *internals = NULL; + struct rte_eth_dev *eth_dev = NULL; + uint32_t vlan_filter_bmp_size; +@@ -3284,6 +3407,15 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) + memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim)); + memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim)); + ++ /* ++ * Do not restrict descriptor counts until ++ * the first back-end device gets attached. ++ */ ++ internals->rx_desc_lim.nb_max = UINT16_MAX; ++ internals->tx_desc_lim.nb_max = UINT16_MAX; ++ internals->rx_desc_lim.nb_align = 1; ++ internals->tx_desc_lim.nb_align = 1; ++ + memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); + memset(internals->slaves, 0, sizeof(internals->slaves)); + +@@ -3293,7 +3425,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) /* Set mode 4 default configuration */ bond_mode_8023ad_setup(eth_dev, NULL); if (bond_ethdev_mode_set(eth_dev, mode)) { @@ -20818,15 +38138,25 @@ index 84f4900ee5..9b3acde46c 100644 eth_dev->data->port_id, mode); goto err; } -@@ -3483,6 +3588,7 @@ bond_ethdev_configure(struct rte_eth_dev *dev) - const char *name = dev->device->name; - struct bond_dev_private *internals = dev->data->dev_private; - struct rte_kvargs *kvlist = internals->kvlist; -+ uint64_t offloads; - int arg_count; - uint16_t port_id = dev - rte_eth_devices; - uint8_t agg_mode; -@@ -3504,6 +3610,11 @@ bond_ethdev_configure(struct rte_eth_dev *dev) +@@ -3404,7 +3536,7 @@ bond_probe(struct rte_vdev_device *dev) + port_id = bond_alloc(dev, bonding_mode); + if (port_id < 0) { + RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on " +- "socket %u.", name, bonding_mode, socket_id); ++ "socket %d.", name, bonding_mode, socket_id); + goto parse_error; + } + internals = rte_eth_devices[port_id].data->dev_private; +@@ -3429,7 +3561,7 @@ bond_probe(struct rte_vdev_device *dev) + + rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); + RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " +- "socket %u.", name, port_id, bonding_mode, socket_id); ++ "socket %d.", name, port_id, bonding_mode, socket_id); + return 0; + + parse_error: +@@ -3504,6 +3636,11 @@ bond_ethdev_configure(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { struct rte_eth_rss_conf *rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; @@ -20838,7 +38168,7 @@ index 84f4900ee5..9b3acde46c 100644 if (rss_conf->rss_key != NULL) { if (internals->rss_key_len > rss_conf->rss_key_len) { RTE_BOND_LOG(ERR, "Invalid rss key length(%u)", -@@ -3515,13 +3626,18 @@ bond_ethdev_configure(struct rte_eth_dev *dev) +@@ -3515,13 +3652,18 @@ bond_ethdev_configure(struct rte_eth_dev *dev) internals->rss_key_len); } else { if (internals->rss_key_len > sizeof(default_rss_key)) { @@ -20863,24 +38193,7 @@ index 84f4900ee5..9b3acde46c 100644 } for (i = 0; i < RTE_DIM(internals->reta_conf); i++) { -@@ -3533,6 +3649,16 @@ bond_ethdev_configure(struct rte_eth_dev *dev) - } - } - -+ offloads = dev->data->dev_conf.txmode.offloads; -+ if ((offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) && -+ (internals->mode == BONDING_MODE_8023AD || -+ internals->mode == BONDING_MODE_BROADCAST)) { -+ RTE_BOND_LOG(WARNING, -+ "bond mode broadcast & 8023AD don't support MBUF_FAST_FREE offload, force disable it."); -+ offloads &= ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; -+ dev->data->dev_conf.txmode.offloads = offloads; -+ } -+ - /* set the max_rx_pktlen */ - internals->max_rx_pktlen = internals->candidate_max_rx_pktlen; - -@@ -3765,6 +3891,18 @@ bond_ethdev_configure(struct rte_eth_dev *dev) +@@ -3765,6 +3907,18 @@ bond_ethdev_configure(struct rte_eth_dev *dev) return -1; } @@ -21043,10 +38356,31 @@ index 5d603514c0..94b0dfcde7 100644 } diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h -index fe408907a6..5806392322 100644 +index fe408907a6..9d71a9352e 100644 --- a/dpdk/drivers/net/cnxk/cn10k_rx.h +++ b/dpdk/drivers/net/cnxk/cn10k_rx.h -@@ -363,7 +363,13 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, +@@ -244,6 +244,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, + uint64_t rearm, const uint16_t flags) + { + const rte_iova_t *iova_list; ++ uint16_t later_skip = 0; + struct rte_mbuf *head; + const rte_iova_t *eol; + uint8_t nb_segs; +@@ -270,10 +271,11 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, + nb_segs--; + + rearm = rearm & ~0xFFFF; ++ later_skip = (uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf; + + head = mbuf; + while (nb_segs) { +- mbuf->next = ((struct rte_mbuf *)*iova_list) - 1; ++ mbuf->next = (struct rte_mbuf *)(*iova_list - later_skip); + mbuf = mbuf->next; + + RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1); +@@ -363,7 +365,13 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, *(uint64_t *)(&mbuf->rearm_data) = val; if (flag & NIX_RX_MULTI_SEG_F) @@ -21061,7 +38395,16 @@ index fe408907a6..5806392322 100644 else mbuf->next = NULL; } -@@ -451,7 +457,6 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, +@@ -416,7 +424,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, + struct nix_cqe_hdr_s *cq; + struct rte_mbuf *mbuf; + uint64_t aura_handle; +- uint64_t sa_base; ++ uint64_t sa_base = 0; + uint16_t lmt_id; + uint64_t laddr; + +@@ -451,7 +459,6 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, flags); cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, (flags & NIX_RX_OFFLOAD_TSTAMP_F), @@ -21069,7 +38412,7 @@ index fe408907a6..5806392322 100644 (uint64_t *)((uint8_t *)mbuf + data_off)); rx_pkts[packets++] = mbuf; -@@ -481,10 +486,11 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, +@@ -481,10 +488,11 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, plt_write64((wdata | nb_pkts), rxq->cq_door); /* Free remaining meta buffers if any */ @@ -21084,6 +38427,58 @@ index fe408907a6..5806392322 100644 return nb_pkts; } +@@ -540,9 +548,9 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, + uint8_t loff = 0, lnum = 0; + uint8x16_t f0, f1, f2, f3; + uint16_t lmt_id, d_off; ++ uintptr_t sa_base = 0; + uint16_t packets = 0; + uint16_t pkts_left; +- uintptr_t sa_base; + uint32_t head; + uintptr_t cq0; + +@@ -628,9 +636,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, + mbuf23 = vqsubq_u64(mbuf23, data_off); + } else { + mbuf01 = +- vsubq_u64(vld1q_u64((uint64_t *)cq0), data_off); +- mbuf23 = vsubq_u64(vld1q_u64((uint64_t *)(cq0 + 16)), +- data_off); ++ vsubq_u64(vld1q_u64((uint64_t *)cq0), ++ vdupq_n_u64(sizeof(struct rte_mbuf))); ++ mbuf23 = ++ vsubq_u64(vld1q_u64((uint64_t *)(cq0 + 16)), ++ vdupq_n_u64(sizeof(struct rte_mbuf))); + } + + /* Move mbufs to scalar registers for future use */ +@@ -639,6 +649,12 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, + mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0); + mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1); + ++ /* Mark mempool obj as "get" as it is alloc'ed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1); ++ + /* Mask to get packet len from NIX_RX_SG_S */ + const uint8x16_t shuf_msk = { + 0xFF, 0xFF, /* pkt_type set as unknown */ +@@ -904,12 +920,6 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, + roc_prefetch_store_keep(mbuf2); + roc_prefetch_store_keep(mbuf3); + +- /* Mark mempool obj as "get" as it is alloc'ed by NIX */ +- RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1); +- RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1); +- RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1); +- RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1); +- + packets += NIX_DESCS_PER_LOOP; + + if (!(flags & NIX_RX_VWQE_F)) { diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.c b/dpdk/drivers/net/cnxk/cn10k_tx.c index 5e6c5ee111..4e1abf7804 100644 --- a/dpdk/drivers/net/cnxk/cn10k_tx.c @@ -21100,7 +38495,7 @@ index 5e6c5ee111..4e1abf7804 100644 void diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h -index 873e1871f9..6704d2d655 100644 +index 873e1871f9..bd9eba08e9 100644 --- a/dpdk/drivers/net/cnxk/cn10k_tx.h +++ b/dpdk/drivers/net/cnxk/cn10k_tx.h @@ -736,7 +736,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd, @@ -21142,7 +38537,35 @@ index 873e1871f9..6704d2d655 100644 uint8_t shft, c_shft; __uint128_t data128; uint16_t c_lmt_id; -@@ -2254,7 +2254,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -1465,10 +1465,12 @@ cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr, + vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2); + vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1); + } ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0); + } else { + /* Store the prepared send desc to LMT lines */ + vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0); + vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1); ++ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0); + } + } + +@@ -1494,13 +1496,13 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + uint64x2_t sgdesc01_w1, sgdesc23_w1; + struct cn10k_eth_txq *txq = tx_queue; + rte_iova_t io_addr = txq->io_addr; ++ uint8_t lnum, shift = 0, loff; + uintptr_t laddr = txq->lmt_base; + uint8_t c_lnum, c_shft, c_loff; + uint64x2_t ltypes01, ltypes23; + uint64x2_t xtmp128, ytmp128; + uint64x2_t xmask01, xmask23; + uintptr_t c_laddr = laddr; +- uint8_t lnum, shift, loff; + rte_iova_t c_io_addr; + uint64_t sa_base; + union wdata { +@@ -2254,7 +2256,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, } if (flags & NIX_TX_OFFLOAD_TSTAMP_F) { @@ -21151,6 +38574,39 @@ index 873e1871f9..6704d2d655 100644 const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST, RTE_MBUF_F_TX_IEEE1588_TMST}; /* Set send mem alg to SUB. */ +@@ -2350,28 +2352,28 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + mbuf3 = (uint64_t *)tx_pkts[3]; + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0)) +- vsetq_lane_u64(0x80000, xmask01, 0); ++ xmask01 = vsetq_lane_u64(0x80000, xmask01, 0); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf0)->pool, + (void **)&mbuf0, 1, 0); + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1)) +- vsetq_lane_u64(0x80000, xmask01, 1); ++ xmask01 = vsetq_lane_u64(0x80000, xmask01, 1); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf1)->pool, + (void **)&mbuf1, 1, 0); + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2)) +- vsetq_lane_u64(0x80000, xmask23, 0); ++ xmask23 = vsetq_lane_u64(0x80000, xmask23, 0); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf2)->pool, + (void **)&mbuf2, 1, 0); + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3)) +- vsetq_lane_u64(0x80000, xmask23, 1); ++ xmask23 = vsetq_lane_u64(0x80000, xmask23, 1); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf3)->pool, diff --git a/dpdk/drivers/net/cnxk/cn9k_rx.c b/dpdk/drivers/net/cnxk/cn9k_rx.c index 8d504c4a6d..60baf10b39 100644 --- a/dpdk/drivers/net/cnxk/cn9k_rx.c @@ -21218,7 +38674,7 @@ index f3f19fed97..f560286c97 100644 void diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.h b/dpdk/drivers/net/cnxk/cn9k_tx.h -index 435dde1317..8167313a15 100644 +index 435dde1317..fe44ff4290 100644 --- a/dpdk/drivers/net/cnxk/cn9k_tx.h +++ b/dpdk/drivers/net/cnxk/cn9k_tx.h @@ -304,7 +304,7 @@ cn9k_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc, @@ -21239,7 +38695,24 @@ index 435dde1317..8167313a15 100644 * address. */ send_mem->w0.cn9k.alg = -@@ -465,8 +465,8 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts, +@@ -342,6 +342,16 @@ cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags) + roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags)); + } + ++static __rte_always_inline void ++cn9k_nix_sec_fc_wait_one(const struct cn9k_eth_txq *txq) ++{ ++ uint64_t nb_desc = txq->cpt_desc; ++ uint64_t *fc = txq->cpt_fc; ++ ++ while (nb_desc <= __atomic_load_n(fc, __ATOMIC_RELAXED)) ++ ; ++} ++ + static __rte_always_inline uint64_t + cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr) + { +@@ -465,8 +475,8 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts, { struct cn9k_eth_txq *txq = tx_queue; const rte_iova_t io_addr = txq->io_addr; @@ -21249,7 +38722,7 @@ index 435dde1317..8167313a15 100644 uint16_t i; NIX_XMIT_FC_OR_RETURN(txq, pkts); -@@ -506,8 +506,8 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -506,8 +516,8 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, { struct cn9k_eth_txq *txq = tx_queue; const rte_iova_t io_addr = txq->io_addr; @@ -21259,7 +38732,7 @@ index 435dde1317..8167313a15 100644 uint16_t segdw; uint64_t i; -@@ -1531,7 +1531,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -1531,7 +1541,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, } if (flags & NIX_TX_OFFLOAD_TSTAMP_F) { @@ -21268,8 +38741,41 @@ index 435dde1317..8167313a15 100644 const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST, RTE_MBUF_F_TX_IEEE1588_TMST}; /* Set send mem alg to SUB. */ +@@ -1625,28 +1635,28 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + mbuf3 = (uint64_t *)tx_pkts[3]; + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0)) +- vsetq_lane_u64(0x80000, xmask01, 0); ++ xmask01 = vsetq_lane_u64(0x80000, xmask01, 0); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf0)->pool, + (void **)&mbuf0, 1, 0); + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1)) +- vsetq_lane_u64(0x80000, xmask01, 1); ++ xmask01 = vsetq_lane_u64(0x80000, xmask01, 1); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf1)->pool, + (void **)&mbuf1, 1, 0); + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2)) +- vsetq_lane_u64(0x80000, xmask23, 0); ++ xmask23 = vsetq_lane_u64(0x80000, xmask23, 0); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf2)->pool, + (void **)&mbuf2, 1, 0); + + if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3)) +- vsetq_lane_u64(0x80000, xmask23, 1); ++ xmask23 = vsetq_lane_u64(0x80000, xmask23, 1); + else + RTE_MEMPOOL_CHECK_COOKIES( + ((struct rte_mbuf *)mbuf3)->pool, diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c -index 74f625553d..a1da90be57 100644 +index 74f625553d..94d1b17443 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c @@ -3,6 +3,8 @@ @@ -21290,6 +38796,15 @@ index 74f625553d..a1da90be57 100644 RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) { for (i = 0; i < mtr->rq_num; i++) { +@@ -565,7 +567,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, + first_skip += RTE_PKTMBUF_HEADROOM; + first_skip += rte_pktmbuf_priv_size(mp); + rq->first_skip = first_skip; +- rq->later_skip = sizeof(struct rte_mbuf); ++ rq->later_skip = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp); + rq->lpb_size = mp->elt_size; + + /* Enable Inline IPSec on RQ, will not be used for Poll mode */ @@ -597,6 +599,13 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rxq_sp->qconf.mp = mp; @@ -21304,7 +38819,43 @@ index 74f625553d..a1da90be57 100644 /* Setup rq reference for inline dev if present */ rc = roc_nix_inl_dev_rq_get(rq); if (rc) -@@ -1122,6 +1131,10 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) +@@ -751,6 +760,27 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, + return flowkey_cfg; + } + ++static int ++nix_rxchan_cfg_disable(struct cnxk_eth_dev *dev) ++{ ++ struct roc_nix *nix = &dev->nix; ++ struct roc_nix_fc_cfg fc_cfg; ++ int rc; ++ ++ if (!roc_nix_is_lbk(nix)) ++ return 0; ++ ++ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg)); ++ fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG; ++ fc_cfg.rxchan_cfg.enable = false; ++ rc = roc_nix_fc_config_set(nix, &fc_cfg); ++ if (rc) { ++ plt_err("Failed to setup flow control, rc=%d(%s)", rc, roc_error_msg_get(rc)); ++ return rc; ++ } ++ return 0; ++} ++ + static void + nix_free_queue_mem(struct cnxk_eth_dev *dev) + { +@@ -1086,6 +1116,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) + goto fail_configure; + + roc_nix_tm_fini(nix); ++ nix_rxchan_cfg_disable(dev); + roc_nix_lf_free(nix); + } + +@@ -1122,6 +1153,10 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) goto fail_configure; } @@ -21315,11 +38866,39 @@ index 74f625553d..a1da90be57 100644 dev->npc.channel = roc_nix_get_base_chan(nix); nb_rxq = data->nb_rx_queues; +@@ -1299,6 +1334,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) + roc_nix_tm_fini(nix); + free_nix_lf: + nix_free_queue_mem(dev); ++ rc |= nix_rxchan_cfg_disable(dev); + rc |= roc_nix_lf_free(nix); + fail_configure: + dev->configured = 0; +@@ -1780,6 +1816,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) + /* Free ROC RQ's, SQ's and CQ's memory */ + nix_free_queue_mem(dev); + ++ /* free nix bpid */ ++ rc = nix_rxchan_cfg_disable(dev); ++ if (rc) ++ plt_err("Failed to free nix bpid, rc=%d", rc); ++ + /* Free nix lf resources */ + rc = roc_nix_lf_free(nix); + if (rc) diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.h b/dpdk/drivers/net/cnxk/cnxk_ethdev.h -index 5bfda3d815..480cc6dfa4 100644 +index 5bfda3d815..ccdc5f04ee 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev.h +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.h -@@ -685,14 +685,11 @@ cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf, +@@ -444,6 +444,7 @@ int cnxk_nix_probe(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev); + int cnxk_nix_remove(struct rte_pci_device *pci_dev); + int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); ++int cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev); + int cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev, + struct rte_ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +@@ -685,14 +686,11 @@ cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf, static __rte_always_inline void cnxk_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf, struct cnxk_timesync_info *tstamp, @@ -21424,10 +39003,71 @@ index 39d8563826..b6ccccdc39 100644 } diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -index ce5f1f7240..f1d13c5004 100644 +index ce5f1f7240..9662bb0a2c 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -@@ -517,7 +517,8 @@ cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev) +@@ -390,6 +390,44 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) + dev->dmac_filter_count--; + } + ++int ++cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev) ++{ ++ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); ++ struct rte_eth_dev_data *data = eth_dev->data; ++ int i, rc = 0; ++ ++ /* Flush all tx queues */ ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { ++ struct roc_nix_sq *sq = &dev->sqs[i]; ++ ++ if (eth_dev->data->tx_queues[i] == NULL) ++ continue; ++ ++ rc = roc_nix_tm_sq_aura_fc(sq, false); ++ if (rc) { ++ plt_err("Failed to disable sqb aura fc, rc=%d", rc); ++ goto exit; ++ } ++ ++ /* Wait for sq entries to be flushed */ ++ rc = roc_nix_tm_sq_flush_spin(sq); ++ if (rc) { ++ plt_err("Failed to drain sq, rc=%d\n", rc); ++ goto exit; ++ } ++ if (data->tx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED) { ++ rc = roc_nix_tm_sq_aura_fc(sq, true); ++ if (rc) { ++ plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", i, rc); ++ goto exit; ++ } ++ } ++ } ++exit: ++ return rc; ++} ++ + int + cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + { +@@ -433,6 +471,15 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + goto exit; + } + ++ /* if new MTU was smaller than old one, then flush all SQs before MTU change */ ++ if (old_frame_size > frame_size) { ++ if (data->dev_started) { ++ plt_err("Reducing MTU is not supported when device started"); ++ goto exit; ++ } ++ cnxk_nix_sq_flush(eth_dev); ++ } ++ + frame_size -= RTE_ETHER_CRC_LEN; + + /* Update mtu on Tx */ +@@ -517,7 +564,8 @@ cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); @@ -21437,7 +39077,7 @@ index ce5f1f7240..f1d13c5004 100644 } int -@@ -746,6 +747,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev, +@@ -746,6 +794,8 @@ cnxk_nix_reta_update(struct rte_eth_dev *eth_dev, goto fail; } @@ -21473,10 +39113,64 @@ index 139fea256c..359f9a30ae 100644 * using freq_mult and clk_delta calculated during configure stage. */ diff --git a/dpdk/drivers/net/cnxk/cnxk_rte_flow.c b/dpdk/drivers/net/cnxk/cnxk_rte_flow.c -index b08d7c34fa..32166ae764 100644 +index b08d7c34fa..0410f2d82e 100644 --- a/dpdk/drivers/net/cnxk/cnxk_rte_flow.c +++ b/dpdk/drivers/net/cnxk/cnxk_rte_flow.c -@@ -297,7 +297,14 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, +@@ -110,13 +110,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + struct roc_npc_action in_actions[], uint32_t *flowkey_cfg) + { + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); ++ const struct rte_flow_action_queue *act_q = NULL; + const struct rte_flow_action_port_id *port_act; +- const struct rte_flow_action_queue *act_q; + struct roc_npc *roc_npc_src = &dev->npc; + struct rte_eth_dev *portid_eth_dev; + char if_name[RTE_ETH_NAME_MAX_LEN]; + struct cnxk_eth_dev *hw_dst; + struct roc_npc *roc_npc_dst; ++ bool is_vf_action = false; + int i = 0, rc = 0; + int rq; + +@@ -150,6 +151,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + case RTE_FLOW_ACTION_TYPE_VF: + in_actions[i].type = ROC_NPC_ACTION_TYPE_VF; + in_actions[i].conf = actions->conf; ++ is_vf_action = true; + break; + + case RTE_FLOW_ACTION_TYPE_PORT_ID: +@@ -183,13 +185,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: +- act_q = (const struct rte_flow_action_queue *) +- actions->conf; +- rq = act_q->index; +- if (rq >= eth_dev->data->nb_rx_queues) { +- plt_npc_dbg("Invalid queue index"); +- goto err_exit; +- } ++ act_q = (const struct rte_flow_action_queue *)actions->conf; + in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; + in_actions[i].conf = actions->conf; + break; +@@ -234,6 +230,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + } + i++; + } ++ ++ if (!is_vf_action && act_q) { ++ rq = act_q->index; ++ if (rq >= eth_dev->data->nb_rx_queues) { ++ plt_npc_dbg("Invalid queue index"); ++ goto err_exit; ++ } ++ } + in_actions[i].type = ROC_NPC_ACTION_TYPE_END; + return 0; + +@@ -297,7 +301,14 @@ cnxk_flow_validate(struct rte_eth_dev *eth_dev, return rc; } @@ -21773,6 +39467,30 @@ index 561d759dbc..7dbd4deb79 100644 return ret; } +diff --git a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +index e7ea76180f..b2c8c1a03f 100644 +--- a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c ++++ b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +@@ -414,6 +414,7 @@ int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) + { + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; ++ uint16_t i; + + CXGBE_FUNC_TRACE(); + +@@ -429,6 +430,11 @@ int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) + t4_sge_eth_clear_queues(pi); + eth_dev->data->scattered_rx = 0; + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/cxgbe/cxgbe_flow.c b/dpdk/drivers/net/cxgbe/cxgbe_flow.c index edcbba9d7c..6e460dfe2e 100644 --- a/dpdk/drivers/net/cxgbe/cxgbe_flow.c @@ -21958,10 +39676,77 @@ index f623f3e684..566cd48406 100644 /* set offset to -1 to distinguish ingress queues without FL */ diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -index e49f765434..9847ca1be1 100644 +index e49f765434..bae6c5abf2 100644 --- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c +++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -@@ -1030,7 +1030,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -133,6 +133,8 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { + }; + + static struct rte_dpaa_driver rte_dpaa_pmd; ++int dpaa_valid_dev; ++struct rte_mempool *dpaa_tx_sg_pool; + + static int + dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); +@@ -385,6 +387,7 @@ static void dpaa_interrupt_handler(void *param) + static int dpaa_eth_dev_start(struct rte_eth_dev *dev) + { + struct dpaa_if *dpaa_intf = dev->data->dev_private; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + +@@ -399,12 +402,18 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) + + fman_if_enable_rx(dev->process_private); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + + static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) + { + struct fman_if *fif = dev->process_private; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; +@@ -413,6 +422,11 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) + fman_if_disable_rx(fif); + dev->tx_pkt_burst = dpaa_eth_tx_drop_all; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -978,8 +992,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + } else { + DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" +- " mode has not been requested", +- max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM); ++ " mode has not been requested", max_rx_pktlen, buffsz); + } + + dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); +@@ -994,7 +1007,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + if (vsp_id >= 0) { + ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id, + DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid, +- fif); ++ fif, buffsz + RTE_PKTMBUF_HEADROOM); + if (ret) { + DPAA_PMD_ERR("dpaa_port_vsp_update failed"); + return ret; +@@ -1030,7 +1043,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, QM_FQCTRL_CTXASTASHING | QM_FQCTRL_PREFERINCACHE; opts.fqd.context_a.stashing.exclusive = 0; @@ -21970,7 +39755,7 @@ index e49f765434..9847ca1be1 100644 * So do not enable stashing in this case */ if (dpaa_svr_family != SVR_LS1046A_FAMILY) -@@ -1201,23 +1201,17 @@ int +@@ -1201,23 +1214,17 @@ int dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { @@ -21998,7 +39783,7 @@ index e49f765434..9847ca1be1 100644 rxq->fqid, ret); } -@@ -1866,7 +1860,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) +@@ -1866,7 +1873,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->name = dpaa_device->name; @@ -22007,7 +39792,7 @@ index e49f765434..9847ca1be1 100644 eth_dev->process_private = fman_intf; dpaa_intf->ifid = dev_id; dpaa_intf->cfg = cfg; -@@ -2169,7 +2163,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, +@@ -2169,7 +2176,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, if (dpaa_svr_family == SVR_LS1043A_FAMILY) dpaa_push_mode_max_queue = 0; @@ -22016,11 +39801,194 @@ index e49f765434..9847ca1be1 100644 * only one queue per thread. */ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { +@@ -2215,7 +2222,20 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, + /* Invoke PMD device initialization function */ + diag = dpaa_dev_init(eth_dev); + if (diag == 0) { ++ if (!dpaa_tx_sg_pool) { ++ dpaa_tx_sg_pool = ++ rte_pktmbuf_pool_create("dpaa_mbuf_tx_sg_pool", ++ DPAA_POOL_SIZE, ++ DPAA_POOL_CACHE_SIZE, 0, ++ DPAA_MAX_SGS * sizeof(struct qm_sg_entry), ++ rte_socket_id()); ++ if (dpaa_tx_sg_pool == NULL) { ++ DPAA_PMD_ERR("SG pool creation failed\n"); ++ return -ENOMEM; ++ } ++ } + rte_eth_dev_probing_finish(eth_dev); ++ dpaa_valid_dev++; + return 0; + } + +@@ -2233,6 +2253,9 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) + + eth_dev = dpaa_dev->eth_dev; + dpaa_eth_dev_close(eth_dev); ++ dpaa_valid_dev--; ++ if (!dpaa_valid_dev) ++ rte_mempool_free(dpaa_tx_sg_pool); + ret = rte_eth_dev_release_port(eth_dev); + + return ret; +diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.h b/dpdk/drivers/net/dpaa/dpaa_ethdev.h +index 6fdd57dbc3..502c1c88b8 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_ethdev.h ++++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.h +@@ -33,6 +33,13 @@ + + #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ + ++/* Maximum SG segments supported on all cores*/ ++#define DPAA_MAX_SGS 128 ++/* SG pool size */ ++#define DPAA_POOL_SIZE 2048 ++/* SG pool cache size */ ++#define DPAA_POOL_CACHE_SIZE 256 ++ + /* RX queue tail drop threshold (CGR Based) in frame count */ + #define CGR_RX_PERFQ_THRESH 256 + #define CGR_TX_CGR_THRESH 512 +@@ -103,6 +110,18 @@ + + #define FMC_FILE "/tmp/fmc.bin" + ++extern struct rte_mempool *dpaa_tx_sg_pool; ++ ++/* structure to free external and indirect ++ * buffers. ++ */ ++struct dpaa_sw_buf_free { ++ /* To which packet this segment belongs */ ++ uint16_t pkt_id; ++ /* The actual segment */ ++ struct rte_mbuf *seg; ++}; ++ + /* Each network interface is represented by one of these */ + struct dpaa_if { + int valid; +diff --git a/dpdk/drivers/net/dpaa/dpaa_flow.c b/dpdk/drivers/net/dpaa/dpaa_flow.c +index 1ccd036027..690ba6bcb3 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_flow.c ++++ b/dpdk/drivers/net/dpaa/dpaa_flow.c +@@ -939,7 +939,7 @@ int dpaa_fm_term(void) + + static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf, + uint8_t vsp_id, t_handle fman_handle, +- struct fman_if *fif) ++ struct fman_if *fif, u32 mbuf_data_room_size) + { + t_fm_vsp_params vsp_params; + t_fm_buffer_prefix_content buf_prefix_cont; +@@ -976,10 +976,8 @@ static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf, + return -1; + } + vsp_params.ext_buf_pools.num_of_pools_used = 1; +- vsp_params.ext_buf_pools.ext_buf_pool[0].id = +- dpaa_intf->vsp_bpid[vsp_id]; +- vsp_params.ext_buf_pools.ext_buf_pool[0].size = +- RTE_MBUF_DEFAULT_BUF_SIZE; ++ vsp_params.ext_buf_pools.ext_buf_pool[0].id = dpaa_intf->vsp_bpid[vsp_id]; ++ vsp_params.ext_buf_pools.ext_buf_pool[0].size = mbuf_data_room_size; + + dpaa_intf->vsp_handle[vsp_id] = fm_vsp_config(&vsp_params); + if (!dpaa_intf->vsp_handle[vsp_id]) { +@@ -1023,7 +1021,7 @@ static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf, + + int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf, + bool fmc_mode, uint8_t vsp_id, uint32_t bpid, +- struct fman_if *fif) ++ struct fman_if *fif, u32 mbuf_data_room_size) + { + int ret = 0; + t_handle fman_handle; +@@ -1054,7 +1052,8 @@ int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf, + + dpaa_intf->vsp_bpid[vsp_id] = bpid; + +- return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif); ++ return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif, ++ mbuf_data_room_size); + } + + int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif) +diff --git a/dpdk/drivers/net/dpaa/dpaa_flow.h b/dpdk/drivers/net/dpaa/dpaa_flow.h +index f5e131acfa..4742b8dd0a 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_flow.h ++++ b/dpdk/drivers/net/dpaa/dpaa_flow.h +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: BSD-3-Clause +- * Copyright 2017,2019 NXP ++ * Copyright 2017,2019,2022 NXP + */ + + #ifndef __DPAA_FLOW_H__ +@@ -11,7 +11,8 @@ int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set); + int dpaa_fm_deconfig(struct dpaa_if *dpaa_intf, struct fman_if *fif); + void dpaa_write_fm_config_to_file(void); + int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf, +- bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif); ++ bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif, ++ u32 mbuf_data_room_size); + int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif); + int dpaa_port_fmc_init(struct fman_if *fif, + uint32_t *fqids, int8_t *vspids, int max_nb_rxq); diff --git a/dpdk/drivers/net/dpaa/dpaa_rxtx.c b/dpdk/drivers/net/dpaa/dpaa_rxtx.c -index ffac6ce3e2..956fe946fa 100644 +index ffac6ce3e2..463a5f4df5 100644 --- a/dpdk/drivers/net/dpaa/dpaa_rxtx.c +++ b/dpdk/drivers/net/dpaa/dpaa_rxtx.c -@@ -600,8 +600,8 @@ void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) +@@ -445,7 +445,7 @@ dpaa_free_mbuf(const struct qm_fd *fd) + bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); + format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; + if (unlikely(format == qm_fd_sg)) { +- struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; ++ struct rte_mbuf *first_seg, *cur_seg; + struct qm_sg_entry *sgt, *sg_temp; + void *vaddr, *sg_vaddr; + int i = 0; +@@ -459,32 +459,25 @@ dpaa_free_mbuf(const struct qm_fd *fd) + sgt = vaddr + fd_offset; + sg_temp = &sgt[i++]; + hw_sg_to_cpu(sg_temp); +- temp = (struct rte_mbuf *) +- ((char *)vaddr - bp_info->meta_data_size); + sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, + qm_sg_entry_get64(sg_temp)); +- + first_seg = (struct rte_mbuf *)((char *)sg_vaddr - + bp_info->meta_data_size); + first_seg->nb_segs = 1; +- prev_seg = first_seg; + while (i < DPAA_SGT_MAX_ENTRIES) { + sg_temp = &sgt[i++]; + hw_sg_to_cpu(sg_temp); +- sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, ++ if (sg_temp->bpid != 0xFF) { ++ bp_info = DPAA_BPID_TO_POOL_INFO(sg_temp->bpid); ++ sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, + qm_sg_entry_get64(sg_temp)); +- cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - ++ cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - + bp_info->meta_data_size); +- first_seg->nb_segs += 1; +- prev_seg->next = cur_seg; +- if (sg_temp->final) { +- cur_seg->next = NULL; +- break; ++ rte_pktmbuf_free_seg(cur_seg); + } +- prev_seg = cur_seg; ++ if (sg_temp->final) ++ break; + } +- +- rte_pktmbuf_free_seg(temp); + rte_pktmbuf_free_seg(first_seg); + return 0; + } +@@ -600,8 +593,8 @@ void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs) void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd)); /* In case of LS1046, annotation stashing is disabled due to L2 cache @@ -22031,6 +39999,164 @@ index ffac6ce3e2..956fe946fa 100644 * in cache when accessed. */ rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF)); +@@ -794,16 +787,18 @@ uint16_t dpaa_eth_queue_rx(void *q, + static int + dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + struct qm_fd *fd, +- struct dpaa_bp_info *bp_info) ++ struct dpaa_sw_buf_free *free_buf, ++ uint32_t *free_count, ++ uint32_t pkt_id) + { +- struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL; ++ struct rte_mbuf *cur_seg = mbuf; + struct rte_mbuf *temp, *mi; + struct qm_sg_entry *sg_temp, *sgt; + int i = 0; + + DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit"); + +- temp = rte_pktmbuf_alloc(bp_info->mp); ++ temp = rte_pktmbuf_alloc(dpaa_tx_sg_pool); + if (!temp) { + DPAA_PMD_ERR("Failure in allocation of mbuf"); + return -1; +@@ -839,7 +834,7 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + fd->format = QM_FD_SG; + fd->addr = temp->buf_iova; + fd->offset = temp->data_off; +- fd->bpid = bp_info ? bp_info->bpid : 0xff; ++ fd->bpid = DPAA_MEMPOOL_TO_BPID(dpaa_tx_sg_pool); + fd->length20 = mbuf->pkt_len; + + while (i < DPAA_SGT_MAX_ENTRIES) { +@@ -860,10 +855,11 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + sg_temp->bpid = + DPAA_MEMPOOL_TO_BPID(cur_seg->pool); + } +- cur_seg = cur_seg->next; + } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) { ++ free_buf[*free_count].seg = cur_seg; ++ free_buf[*free_count].pkt_id = pkt_id; ++ ++*free_count; + sg_temp->bpid = 0xff; +- cur_seg = cur_seg->next; + } else { + /* Get owner MBUF from indirect buffer */ + mi = rte_mbuf_from_indirect(cur_seg); +@@ -876,11 +872,11 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool); + rte_mbuf_refcnt_update(mi, 1); + } +- prev_seg = cur_seg; +- cur_seg = cur_seg->next; +- prev_seg->next = NULL; +- rte_pktmbuf_free(prev_seg); ++ free_buf[*free_count].seg = cur_seg; ++ free_buf[*free_count].pkt_id = pkt_id; ++ ++*free_count; + } ++ cur_seg = cur_seg->next; + if (cur_seg == NULL) { + sg_temp->final = 1; + cpu_to_hw_sg(sg_temp); +@@ -895,7 +891,10 @@ dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + static inline void + tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, + struct dpaa_bp_info *bp_info, +- struct qm_fd *fd_arr) ++ struct qm_fd *fd_arr, ++ struct dpaa_sw_buf_free *buf_to_free, ++ uint32_t *free_count, ++ uint32_t pkt_id) + { + struct rte_mbuf *mi = NULL; + +@@ -914,6 +913,9 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, + DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid); + } + } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) { ++ buf_to_free[*free_count].seg = mbuf; ++ buf_to_free[*free_count].pkt_id = pkt_id; ++ ++*free_count; + DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, + bp_info ? bp_info->bpid : 0xff); + } else { +@@ -937,7 +939,9 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, + DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, + bp_info ? bp_info->bpid : 0xff); + } +- rte_pktmbuf_free(mbuf); ++ buf_to_free[*free_count].seg = mbuf; ++ buf_to_free[*free_count].pkt_id = pkt_id; ++ ++*free_count; + } + + if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) +@@ -948,16 +952,21 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf, + static inline uint16_t + tx_on_dpaa_pool(struct rte_mbuf *mbuf, + struct dpaa_bp_info *bp_info, +- struct qm_fd *fd_arr) ++ struct qm_fd *fd_arr, ++ struct dpaa_sw_buf_free *buf_to_free, ++ uint32_t *free_count, ++ uint32_t pkt_id) + { + DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf); + + if (mbuf->nb_segs == 1) { + /* Case for non-segmented buffers */ +- tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr); ++ tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr, ++ buf_to_free, free_count, pkt_id); + } else if (mbuf->nb_segs > 1 && + mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) { +- if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info)) { ++ if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, buf_to_free, ++ free_count, pkt_id)) { + DPAA_PMD_DEBUG("Unable to create Scatter Gather FD"); + return 1; + } +@@ -1061,7 +1070,8 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + uint16_t state; + int ret, realloc_mbuf = 0; + uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0}; +- struct rte_mbuf **orig_bufs = bufs; ++ struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * DPAA_MAX_DEQUEUE_NUM_FRAMES]; ++ uint32_t free_count = 0; + + if (unlikely(!DPAA_PER_LCORE_PORTAL)) { + ret = rte_dpaa_portal_init((void *)0); +@@ -1144,7 +1154,10 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + } + indirect_buf: + state = tx_on_dpaa_pool(mbuf, bp_info, +- &fd_arr[loop]); ++ &fd_arr[loop], ++ buf_to_free, ++ &free_count, ++ loop); + if (unlikely(state)) { + /* Set frames_to_send & nb_bufs so + * that packets are transmitted till +@@ -1169,13 +1182,9 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) + + DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q); + +- +- loop = 0; +- while (loop < sent) { +- if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs))) +- rte_pktmbuf_free(*orig_bufs); +- orig_bufs++; +- loop++; ++ for (loop = 0; loop < free_count; loop++) { ++ if (buf_to_free[loop].pkt_id < sent) ++ rte_pktmbuf_free_seg(buf_to_free[loop].seg); + } + + return sent; diff --git a/dpdk/drivers/net/dpaa/fmlib/fm_ext.h b/dpdk/drivers/net/dpaa/fmlib/fm_ext.h index 27c9fb471e..8e7153bdaf 100644 --- a/dpdk/drivers/net/dpaa/fmlib/fm_ext.h @@ -22152,10 +40278,20 @@ index 6f5479fbe1..bb2e00222e 100644 } t_fm_port_congestion_grps; diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c -index a3706439d5..b875139689 100644 +index a3706439d5..6dc5f1390f 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c -@@ -143,7 +143,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) +@@ -74,6 +74,9 @@ int dpaa2_timestamp_dynfield_offset = -1; + /* Enable error queue */ + bool dpaa2_enable_err_queue; + ++int dpaa2_valid_dev; ++struct rte_mempool *dpaa2_tx_sg_pool; ++ + struct rte_dpaa2_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint8_t page_id; /* dpni statistics page id */ +@@ -143,7 +146,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) PMD_INIT_FUNC_TRACE(); if (mask & RTE_ETH_VLAN_FILTER_MASK) { @@ -22164,7 +40300,7 @@ index a3706439d5..b875139689 100644 if (!priv->max_vlan_filters) { DPAA2_PMD_INFO("VLAN filter not available"); return -ENOTSUP; -@@ -395,6 +395,8 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) +@@ -395,6 +398,8 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) if (dpaa2_enable_err_queue) { priv->rx_err_vq = rte_zmalloc("dpni_rx_err", sizeof(struct dpaa2_queue), 0); @@ -22173,7 +40309,7 @@ index a3706439d5..b875139689 100644 dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; dpaa2_q->q_storage = rte_malloc("err_dq_storage", -@@ -916,7 +918,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -916,7 +921,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; cong_notif_cfg.threshold_entry = nb_tx_desc; /* Notify that the queue is not congested when the data in @@ -22182,7 +40318,7 @@ index a3706439d5..b875139689 100644 */ cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; cong_notif_cfg.message_ctx = 0; -@@ -1058,7 +1060,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) +@@ -1058,7 +1063,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) * Dpaa2 link Interrupt handler * * @param param @@ -22191,7 +40327,19 @@ index a3706439d5..b875139689 100644 * * @return * void -@@ -1252,7 +1254,12 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) +@@ -1238,6 +1243,11 @@ dpaa2_dev_start(struct rte_eth_dev *dev) + if (priv->en_ordered) + dev->tx_pkt_burst = dpaa2_dev_tx_ordered; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -1252,7 +1262,13 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; struct rte_eth_link link; @@ -22199,13 +40347,26 @@ index a3706439d5..b875139689 100644 + struct rte_device *rdev = dev->device; + struct rte_intr_handle *intr_handle; + struct rte_dpaa2_device *dpaa2_dev; ++ uint16_t i; + + dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); + intr_handle = dpaa2_dev->intr_handle; PMD_INIT_FUNC_TRACE(); -@@ -2236,7 +2243,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, +@@ -1284,6 +1300,11 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -2236,7 +2257,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, ocfg.oa = 1; /* Late arrival window size disabled */ ocfg.olws = 0; @@ -22214,7 +40375,7 @@ index a3706439d5..b875139689 100644 ocfg.oeane = 0; /* Loose ordering enabled */ ocfg.oloe = 1; -@@ -2720,13 +2727,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) +@@ -2720,13 +2741,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } eth_dev->tx_pkt_burst = dpaa2_dev_tx; @@ -22230,7 +40391,7 @@ index a3706439d5..b875139689 100644 goto init_err; } priv->extract.qos_key_extract.key_info.ipv4_src_offset = -@@ -2744,7 +2751,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) +@@ -2744,7 +2765,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) priv->extract.tc_extract_param[i] = (size_t)rte_malloc(NULL, 256, 64); if (!priv->extract.tc_extract_param[i]) { @@ -22239,20 +40400,79 @@ index a3706439d5..b875139689 100644 ret); goto init_err; } +@@ -2857,7 +2878,20 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, + /* Invoke PMD device initialization function */ + diag = dpaa2_dev_init(eth_dev); + if (diag == 0) { ++ if (!dpaa2_tx_sg_pool) { ++ dpaa2_tx_sg_pool = ++ rte_pktmbuf_pool_create("dpaa2_mbuf_tx_sg_pool", ++ DPAA2_POOL_SIZE, ++ DPAA2_POOL_CACHE_SIZE, 0, ++ DPAA2_MAX_SGS * sizeof(struct qbman_sge), ++ rte_socket_id()); ++ if (dpaa2_tx_sg_pool == NULL) { ++ DPAA2_PMD_ERR("SG pool creation failed\n"); ++ return -ENOMEM; ++ } ++ } + rte_eth_dev_probing_finish(eth_dev); ++ dpaa2_valid_dev++; + return 0; + } + +@@ -2873,6 +2907,9 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) + + eth_dev = dpaa2_dev->eth_dev; + dpaa2_dev_close(eth_dev); ++ dpaa2_valid_dev--; ++ if (!dpaa2_valid_dev) ++ rte_mempool_free(dpaa2_tx_sg_pool); + ret = rte_eth_dev_release_port(eth_dev); + + return ret; diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h -index c5e9267bf0..fd4eabed4e 100644 +index c5e9267bf0..e750fa4cbb 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h +++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h -@@ -62,7 +62,7 @@ +@@ -62,7 +62,10 @@ /* Disable RX tail drop, default is enable */ #define DPAA2_RX_TAILDROP_OFF 0x04 /* Tx confirmation enabled */ -#define DPAA2_TX_CONF_ENABLE 0x08 +#define DPAA2_TX_CONF_ENABLE 0x06 ++ ++/* DPDMUX index for DPMAC */ ++#define DPAA2_DPDMUX_DPMAC_IDX 0 #define DPAA2_RSS_OFFLOAD_ALL ( \ RTE_ETH_RSS_L2_PAYLOAD | \ -@@ -117,7 +117,7 @@ extern int dpaa2_timestamp_dynfield_offset; +@@ -104,6 +107,24 @@ + #define DPAA2_PKT_TYPE_VLAN_1 0x0160 + #define DPAA2_PKT_TYPE_VLAN_2 0x0260 + ++/* Global pool used by driver for SG list TX */ ++extern struct rte_mempool *dpaa2_tx_sg_pool; ++/* Maximum SG segments */ ++#define DPAA2_MAX_SGS 128 ++/* SG pool size */ ++#define DPAA2_POOL_SIZE 2048 ++/* SG pool cache size */ ++#define DPAA2_POOL_CACHE_SIZE 256 ++/* structure to free external and indirect ++ * buffers. ++ */ ++struct sw_buf_free { ++ /* To which packet this segment belongs */ ++ uint16_t pkt_id; ++ /* The actual segment */ ++ struct rte_mbuf *seg; ++}; ++ + /* enable timestamp in mbuf*/ + extern bool dpaa2_enable_ts[]; + extern uint64_t dpaa2_timestamp_rx_dynflag; +@@ -117,7 +138,7 @@ extern int dpaa2_timestamp_dynfield_offset; #define DPAA2_FLOW_MAX_KEY_SIZE 16 @@ -22302,7 +40522,7 @@ index 84fe37a7c0..bf55eb70a3 100644 } } diff --git a/dpdk/drivers/net/dpaa2/dpaa2_mux.c b/dpdk/drivers/net/dpaa2/dpaa2_mux.c -index d347f4df51..54f53b7ea0 100644 +index d347f4df51..c339a0322f 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_mux.c +++ b/dpdk/drivers/net/dpaa2/dpaa2_mux.c @@ -95,7 +95,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, @@ -22323,6 +40543,15 @@ index d347f4df51..54f53b7ea0 100644 if (ret) { DPAA2_PMD_ERR("setting default interface failed in %s", __func__); +@@ -336,7 +336,7 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, + + ret = dpdmux_if_set_errors_behavior(&dpdmux_dev->dpdmux, + CMD_PRI_LOW, +- dpdmux_dev->token, dpdmux_id, ++ dpdmux_dev->token, DPAA2_DPDMUX_DPMAC_IDX, + &mux_err_cfg); + if (ret) { + DPAA2_PMD_ERR("dpdmux_if_set_errors_behavior %s err %d", diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ptp.c b/dpdk/drivers/net/dpaa2/dpaa2_ptp.c index 8d79e39244..3a4536dd69 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_ptp.c @@ -22344,7 +40573,7 @@ index 8d79e39244..3a4536dd69 100644 return 0; diff --git a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c -index c65589a5f3..9fb6c5f91d 100644 +index c65589a5f3..46c47fe36a 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c @@ -140,8 +140,10 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, @@ -22359,7 +40588,164 @@ index c65589a5f3..9fb6c5f91d 100644 #endif if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { -@@ -714,7 +716,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -190,8 +192,12 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; +- else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; ++ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | + L3_IP_1_MORE_FRAGMENT | +@@ -233,8 +239,12 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; +- else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; ++ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; ++ else ++ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (dpaa2_enable_ts[mbuf->port]) { + *dpaa2_timestamp_dynfield(mbuf) = annotation->word2; +@@ -379,9 +389,12 @@ eth_fd_to_mbuf(const struct qbman_fd *fd, + static int __rte_noinline __rte_hot + eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, +- struct rte_mempool *mp, uint16_t bpid) ++ struct sw_buf_free *free_buf, ++ uint32_t *free_count, ++ uint32_t pkt_id, ++ uint16_t bpid) + { +- struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; ++ struct rte_mbuf *cur_seg = mbuf, *mi, *temp; + struct qbman_sge *sgt, *sge = NULL; + int i, offset = 0; + +@@ -405,12 +418,12 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + } + DPAA2_SET_FD_OFFSET(fd, offset); + } else { +- temp = rte_pktmbuf_alloc(mp); ++ temp = rte_pktmbuf_alloc(dpaa2_tx_sg_pool); + if (temp == NULL) { + DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); + return -ENOMEM; + } +- DPAA2_SET_ONLY_FD_BPID(fd, bpid); ++ DPAA2_SET_ONLY_FD_BPID(fd, mempool_to_bpid(dpaa2_tx_sg_pool)); + DPAA2_SET_FD_OFFSET(fd, temp->data_off); + } + DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); +@@ -450,10 +463,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + mempool_to_bpid(cur_seg->pool)); + } + } +- cur_seg = cur_seg->next; + } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) { ++ free_buf[*free_count].seg = cur_seg; ++ free_buf[*free_count].pkt_id = pkt_id; ++ ++*free_count; + DPAA2_SET_FLE_IVP(sge); +- cur_seg = cur_seg->next; + } else { + /* Get owner MBUF from indirect buffer */ + mi = rte_mbuf_from_indirect(cur_seg); +@@ -467,11 +481,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + mempool_to_bpid(mi->pool)); + rte_mbuf_refcnt_update(mi, 1); + } +- prev_seg = cur_seg; +- cur_seg = cur_seg->next; +- prev_seg->next = NULL; +- rte_pktmbuf_free(prev_seg); ++ free_buf[*free_count].seg = cur_seg; ++ free_buf[*free_count].pkt_id = pkt_id; ++ ++*free_count; + } ++ cur_seg = cur_seg->next; + } + DPAA2_SG_SET_FINAL(sge, true); + return 0; +@@ -479,11 +493,19 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + + static void + eth_mbuf_to_fd(struct rte_mbuf *mbuf, +- struct qbman_fd *fd, uint16_t bpid) __rte_unused; ++ struct qbman_fd *fd, ++ struct sw_buf_free *buf_to_free, ++ uint32_t *free_count, ++ uint32_t pkt_id, ++ uint16_t bpid) __rte_unused; + + static void __rte_noinline __rte_hot + eth_mbuf_to_fd(struct rte_mbuf *mbuf, +- struct qbman_fd *fd, uint16_t bpid) ++ struct qbman_fd *fd, ++ struct sw_buf_free *buf_to_free, ++ uint32_t *free_count, ++ uint32_t pkt_id, ++ uint16_t bpid) + { + DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); + +@@ -499,6 +521,9 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf, + rte_mbuf_refcnt_update(mbuf, -1); + } + } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) { ++ buf_to_free[*free_count].seg = mbuf; ++ buf_to_free[*free_count].pkt_id = pkt_id; ++ ++*free_count; + DPAA2_SET_FD_IVP(fd); + } else { + struct rte_mbuf *mi; +@@ -508,7 +533,10 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf, + DPAA2_SET_FD_IVP(fd); + else + rte_mbuf_refcnt_update(mi, 1); +- rte_pktmbuf_free(mbuf); ++ ++ buf_to_free[*free_count].seg = mbuf; ++ buf_to_free[*free_count].pkt_id = pkt_id; ++ ++*free_count; + } + } + +@@ -557,7 +585,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q) + /* Function receive frames for a given device and VQ */ + struct qbman_result *dq_storage; + uint32_t fqid = dpaa2_q->fqid; +- int ret, num_rx = 0, num_pulled; ++ int ret, num_rx = 0; + uint8_t pending, status; + struct qbman_swp *swp; + const struct qbman_fd *fd; +@@ -597,7 +625,6 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q) + while (!qbman_check_command_complete(dq_storage)) + ; + +- num_pulled = 0; + pending = 1; + do { + /* Loop until the dq_storage is updated with +@@ -632,7 +659,6 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q) + + dq_storage++; + num_rx++; +- num_pulled++; + } while (pending); + + dpaa2_q->err_pkts += num_rx; +@@ -714,7 +740,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rte_prefetch0((void *)(size_t)(dq_storage + 1)); /* Prepare next pull descriptor. This will give space for the @@ -22368,7 +40754,7 @@ index c65589a5f3..9fb6c5f91d 100644 */ q_storage->toggle ^= 1; dq_storage1 = q_storage->dq_storage[q_storage->toggle]; -@@ -769,7 +771,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -769,7 +795,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) else bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id); #if defined(RTE_LIBRTE_IEEE1588) @@ -22380,7 +40766,7 @@ index c65589a5f3..9fb6c5f91d 100644 #endif if (eth_data->dev_conf.rxmode.offloads & -@@ -986,6 +991,13 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -986,6 +1015,13 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id); @@ -22394,7 +40780,7 @@ index c65589a5f3..9fb6c5f91d 100644 if (eth_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { rte_vlan_strip(bufs[num_rx]); -@@ -1021,6 +1033,8 @@ uint16_t dpaa2_dev_tx_conf(void *queue) +@@ -1021,6 +1057,8 @@ uint16_t dpaa2_dev_tx_conf(void *queue) struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; struct dpaa2_dev_priv *priv = eth_data->dev_private; struct dpaa2_annot_hdr *annotation; @@ -22403,7 +40789,7 @@ index c65589a5f3..9fb6c5f91d 100644 #endif if (unlikely(!DPAA2_PER_LCORE_DPIO)) { -@@ -1105,10 +1119,16 @@ uint16_t dpaa2_dev_tx_conf(void *queue) +@@ -1105,10 +1143,16 @@ uint16_t dpaa2_dev_tx_conf(void *queue) num_tx_conf++; num_pulled++; #if defined(RTE_LIBRTE_IEEE1588) @@ -22424,7 +40810,17 @@ index c65589a5f3..9fb6c5f91d 100644 #endif } while (pending); -@@ -1184,8 +1204,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -1163,7 +1207,8 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + uint32_t flags[MAX_TX_RING_SLOTS] = {0}; +- struct rte_mbuf **orig_bufs = bufs; ++ struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; ++ uint32_t free_count = 0; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); +@@ -1184,8 +1229,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) * corresponding to last packet transmitted for reading * the timestamp */ @@ -22438,7 +40834,91 @@ index c65589a5f3..9fb6c5f91d 100644 #endif /*Prepare enqueue descriptor*/ -@@ -1510,7 +1533,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -1250,13 +1298,20 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + + if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) { + if (unlikely((*bufs)->nb_segs > 1)) { ++ mp = (*bufs)->pool; + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], +- mp, 0)) ++ buf_to_free, ++ &free_count, ++ loop, ++ mempool_to_bpid(mp))) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, +- &fd_arr[loop], 0); ++ &fd_arr[loop], ++ buf_to_free, ++ &free_count, ++ loop, 0); + } + bufs++; + #ifdef RTE_LIBRTE_IEEE1588 +@@ -1301,11 +1356,17 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + if (unlikely((*bufs)->nb_segs > 1)) { + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], +- mp, bpid)) ++ buf_to_free, ++ &free_count, ++ loop, ++ bpid)) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, +- &fd_arr[loop], bpid); ++ &fd_arr[loop], ++ buf_to_free, ++ &free_count, ++ loop, bpid); + } + } + #ifdef RTE_LIBRTE_IEEE1588 +@@ -1338,12 +1399,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + } + dpaa2_q->tx_pkts += num_tx; + +- loop = 0; +- while (loop < num_tx) { +- if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs))) +- rte_pktmbuf_free(*orig_bufs); +- orig_bufs++; +- loop++; ++ for (loop = 0; loop < free_count; loop++) { ++ if (buf_to_free[loop].pkt_id < num_tx) ++ rte_pktmbuf_free_seg(buf_to_free[loop].seg); + } + + return num_tx; +@@ -1373,12 +1431,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + skip_tx: + dpaa2_q->tx_pkts += num_tx; + +- loop = 0; +- while (loop < num_tx) { +- if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs))) +- rte_pktmbuf_free(*orig_bufs); +- orig_bufs++; +- loop++; ++ for (loop = 0; loop < free_count; loop++) { ++ if (buf_to_free[loop].pkt_id < num_tx) ++ rte_pktmbuf_free_seg(buf_to_free[loop].seg); + } + + return num_tx; +@@ -1464,6 +1519,8 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + int32_t ret; + uint16_t num_tx = 0; + uint16_t bpid; ++ struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size]; ++ uint32_t free_count = 0; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); +@@ -1510,7 +1567,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (*dpaa2_seqn(*bufs)) { /* Use only queue 0 for Tx in case of atomic/ * ordered packets as packets can get unordered @@ -22447,7 +40927,51 @@ index c65589a5f3..9fb6c5f91d 100644 */ dpaa2_set_enqueue_descriptor(order_sendq, (*bufs), -@@ -1738,7 +1761,7 @@ dpaa2_dev_loopback_rx(void *queue, +@@ -1576,12 +1633,17 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + if (unlikely((*bufs)->nb_segs > 1)) { + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], +- mp, ++ buf_to_free, ++ &free_count, ++ loop, + bpid)) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, +- &fd_arr[loop], bpid); ++ &fd_arr[loop], ++ buf_to_free, ++ &free_count, ++ loop, bpid); + } + } + bufs++; +@@ -1610,6 +1672,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + nb_pkts -= loop; + } + dpaa2_q->tx_pkts += num_tx; ++ for (loop = 0; loop < free_count; loop++) { ++ if (buf_to_free[loop].pkt_id < num_tx) ++ rte_pktmbuf_free_seg(buf_to_free[loop].seg); ++ } ++ + return num_tx; + + send_n_return: +@@ -1634,6 +1701,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + } + skip_tx: + dpaa2_q->tx_pkts += num_tx; ++ for (loop = 0; loop < free_count; loop++) { ++ if (buf_to_free[loop].pkt_id < num_tx) ++ rte_pktmbuf_free_seg(buf_to_free[loop].seg); ++ } ++ + return num_tx; + } + +@@ -1738,7 +1810,7 @@ dpaa2_dev_loopback_rx(void *queue, rte_prefetch0((void *)(size_t)(dq_storage + 1)); /* Prepare next pull descriptor. This will give space for the @@ -22528,7 +41052,7 @@ index a548ae2ccb..718a9746ed 100644 struct e1000_filter_info { uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ diff --git a/dpdk/drivers/net/e1000/em_ethdev.c b/dpdk/drivers/net/e1000/em_ethdev.c -index 31c4870086..794496abfc 100644 +index 31c4870086..e983d5682a 100644 --- a/dpdk/drivers/net/e1000/em_ethdev.c +++ b/dpdk/drivers/net/e1000/em_ethdev.c @@ -1058,8 +1058,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) @@ -22542,7 +41066,7 @@ index 31c4870086..794496abfc 100644 * - Use just one TX queue. * - Allow cksum offload only for one TX queue. * - Don't allow TX cksum offload at all. -@@ -1068,7 +1068,7 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1068,13 +1068,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) * (Multiple Receive Queues are mutually exclusive with UDP * fragmentation and are not supported when a legacy receive * descriptor format is used). @@ -22551,6 +41075,14 @@ index 31c4870086..794496abfc 100644 * don't support extended RXD. * To avoid it we support just one RX queue for now (no RSS). */ + +- dev_info->max_rx_queues = 1; +- dev_info->max_tx_queues = 1; ++ dev_info->max_rx_queues = 2; ++ dev_info->max_tx_queues = 2; + + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() | @@ -1558,7 +1558,7 @@ eth_em_interrupt_get_status(struct rte_eth_dev *dev) } @@ -22570,7 +41102,7 @@ index 31c4870086..794496abfc 100644 * @return * void diff --git a/dpdk/drivers/net/e1000/em_rxtx.c b/dpdk/drivers/net/e1000/em_rxtx.c -index 39262502bb..cea5b490ba 100644 +index 39262502bb..2d40854d1d 100644 --- a/dpdk/drivers/net/e1000/em_rxtx.c +++ b/dpdk/drivers/net/e1000/em_rxtx.c @@ -141,7 +141,7 @@ union em_vlan_macip { @@ -22591,7 +41123,24 @@ index 39262502bb..cea5b490ba 100644 * hardware point of view... */ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); -@@ -1074,7 +1074,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -1030,6 +1030,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * - RX port identifier, + * - hardware offload data, if any: + * - IP checksum flag, ++ * - VLAN TCI, if any, + * - error flags. + */ + first_seg->port = rxq->port_id; +@@ -1039,7 +1040,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + rx_desc_error_to_pkt_flags(rxd.errors); + + /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */ +- rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); ++ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.special); + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + +@@ -1074,7 +1075,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * register. * Update the RDT with the value of the last processed RX descriptor * minus 1, to guarantee that the RDT register is never equal to the @@ -22600,6 +41149,42 @@ index 39262502bb..cea5b490ba 100644 * hardware point of view... */ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); +@@ -1575,6 +1576,8 @@ em_dev_clear_queues(struct rte_eth_dev *dev) + em_tx_queue_release_mbufs(txq); + em_reset_tx_queue(txq); + } ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { +@@ -1583,6 +1586,8 @@ em_dev_clear_queues(struct rte_eth_dev *dev) + em_rx_queue_release_mbufs(rxq); + em_reset_rx_queue(rxq); + } ++ ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1811,6 +1816,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) + rxdctl |= E1000_RXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + /* + * Due to EM devices not having any sort of hardware + * limit for packet length, jumbo frame of any size +@@ -1945,6 +1952,8 @@ eth_em_tx_init(struct rte_eth_dev *dev) + txdctl |= (txq->wthresh & 0x3F) << 16; + txdctl |= E1000_TXDCTL_GRAN; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + /* Program the Transmit Control Register. */ diff --git a/dpdk/drivers/net/e1000/igb_ethdev.c b/dpdk/drivers/net/e1000/igb_ethdev.c index 3ee16c15fe..a9c18b27e8 100644 --- a/dpdk/drivers/net/e1000/igb_ethdev.c @@ -22721,7 +41306,7 @@ index fe355ef6b3..3f3fd0d61e 100644 rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT); E1000_WRITE_REG(hw, E1000_RAH(0), rah); diff --git a/dpdk/drivers/net/e1000/igb_rxtx.c b/dpdk/drivers/net/e1000/igb_rxtx.c -index 4a311a7b18..f32dee46df 100644 +index 4a311a7b18..6027cfbfb1 100644 --- a/dpdk/drivers/net/e1000/igb_rxtx.c +++ b/dpdk/drivers/net/e1000/igb_rxtx.c @@ -150,7 +150,7 @@ union igb_tx_offload { @@ -22778,7 +41363,23 @@ index 4a311a7b18..f32dee46df 100644 tx_id = sw_ring[tx_id].next_id; if (sw_ring[tx_id].mbuf) -@@ -2146,7 +2146,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +@@ -1853,6 +1853,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + igb_tx_queue_release_mbufs(txq); + igb_reset_tx_queue(txq, dev); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1861,6 +1862,7 @@ igb_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + igb_rx_queue_release_mbufs(rxq); + igb_reset_rx_queue(rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + } +@@ -2146,7 +2148,7 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev) igb_rss_disable(dev); @@ -22787,8 +41388,42 @@ index 4a311a7b18..f32dee46df 100644 rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, rctl); +@@ -2441,6 +2443,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) + rxdctl |= ((rxq->hthresh & 0x1F) << 8); + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { +@@ -2605,6 +2608,7 @@ eth_igb_tx_init(struct rte_eth_dev *dev) + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + /* Program the Transmit Control Register. */ +@@ -2740,6 +2744,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) + else + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); ++ ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { +@@ -2811,6 +2817,8 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev) + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + } diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c -index 634c97acf6..770b101688 100644 +index 634c97acf6..1c61f793e6 100644 --- a/dpdk/drivers/net/ena/ena_ethdev.c +++ b/dpdk/drivers/net/ena/ena_ethdev.c @@ -38,11 +38,6 @@ @@ -22862,7 +41497,47 @@ index 634c97acf6..770b101688 100644 return -EFAULT; } -@@ -1408,7 +1419,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) +@@ -899,6 +910,7 @@ static int ena_start(struct rte_eth_dev *dev) + struct ena_adapter *adapter = dev->data->dev_private; + uint64_t ticks; + int rc = 0; ++ uint16_t i; + + /* Cannot allocate memory in secondary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +@@ -940,6 +952,11 @@ static int ena_start(struct rte_eth_dev *dev) + ++adapter->dev_stats.dev_start; + adapter->state = ENA_ADAPTER_STATE_RUNNING; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + + err_rss_init: +@@ -955,6 +972,7 @@ static int ena_stop(struct rte_eth_dev *dev) + struct ena_com_dev *ena_dev = &adapter->ena_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; ++ uint16_t i; + int rc; + + /* Cannot free memory in secondary process */ +@@ -986,6 +1004,11 @@ static int ena_stop(struct rte_eth_dev *dev) + adapter->state = ENA_ADAPTER_STATE_STOPPED; + dev->data->dev_started = 0; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -1408,7 +1431,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) ++rxq->rx_stats.refill_partial; } @@ -22871,7 +41546,7 @@ index 634c97acf6..770b101688 100644 if (likely(i > 0)) { /* ...let HW know that it can fill buffers with data. */ ena_com_write_sq_doorbell(rxq->ena_com_io_sq); -@@ -1529,8 +1540,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) +@@ -1529,8 +1552,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= adapter->keep_alive_timeout)) { PMD_DRV_LOG(ERR, "Keep alive timeout\n"); @@ -22881,7 +41556,7 @@ index 634c97acf6..770b101688 100644 ++adapter->dev_stats.wd_expired; } } -@@ -1540,8 +1550,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) +@@ -1540,8 +1562,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) { if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { PMD_DRV_LOG(ERR, "ENA admin queue is not in running state\n"); @@ -22891,7 +41566,7 @@ index 634c97acf6..770b101688 100644 } } -@@ -1632,6 +1641,9 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, +@@ -1632,6 +1653,9 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, struct rte_eth_dev *dev = arg; struct ena_adapter *adapter = dev->data->dev_private; @@ -22901,7 +41576,7 @@ index 634c97acf6..770b101688 100644 check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_tx_completions(adapter); -@@ -1682,6 +1694,13 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, +@@ -1682,6 +1706,13 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, return 0; } @@ -22915,7 +41590,7 @@ index 634c97acf6..770b101688 100644 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc)) { PMD_INIT_LOG(WARNING, -@@ -1694,13 +1713,6 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, +@@ -1694,13 +1725,6 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter, if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) return 0; @@ -22929,7 +41604,7 @@ index 634c97acf6..770b101688 100644 ena_dev->mem_bar = adapter->dev_mem_base; return 0; -@@ -2028,9 +2040,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev) +@@ -2028,9 +2052,6 @@ static int ena_dev_configure(struct rte_eth_dev *dev) */ adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; @@ -22939,7 +41614,7 @@ index 634c97acf6..770b101688 100644 return 0; } -@@ -2325,14 +2334,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -2325,14 +2346,13 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rc); if (rc == ENA_COM_NO_SPACE) { ++rx_ring->rx_stats.bad_desc_num; @@ -22958,7 +41633,7 @@ index 634c97acf6..770b101688 100644 return 0; } -@@ -2732,9 +2740,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) +@@ -2732,9 +2752,8 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) if (unlikely(rc)) { PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); ++tx_ring->tx_stats.prepare_ctx_err; @@ -23018,8 +41693,52 @@ index be4007e3f3..8193eaf6fc 100644 } int ena_rss_reta_update(struct rte_eth_dev *dev, +diff --git a/dpdk/drivers/net/enetc/enetc_ethdev.c b/dpdk/drivers/net/enetc/enetc_ethdev.c +index 7cdb8ce463..c3eebc6fa4 100644 +--- a/dpdk/drivers/net/enetc/enetc_ethdev.c ++++ b/dpdk/drivers/net/enetc/enetc_ethdev.c +@@ -17,6 +17,7 @@ enetc_dev_start(struct rte_eth_dev *dev) + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t val; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG); +@@ -42,6 +43,11 @@ enetc_dev_start(struct rte_eth_dev *dev) + ENETC_PM0_IFM_XGMII); + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -52,6 +58,7 @@ enetc_dev_stop(struct rte_eth_dev *dev) + ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct enetc_hw *enetc_hw = &hw->hw; + uint32_t val; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + dev->data->dev_started = 0; +@@ -63,6 +70,11 @@ enetc_dev_stop(struct rte_eth_dev *dev) + enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, + val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN))); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/enetfec/enet_ethdev.c b/dpdk/drivers/net/enetfec/enet_ethdev.c -index 714f8ac7ec..c938e58204 100644 +index 714f8ac7ec..898aad1c37 100644 --- a/dpdk/drivers/net/enetfec/enet_ethdev.c +++ b/dpdk/drivers/net/enetfec/enet_ethdev.c @@ -2,9 +2,12 @@ @@ -23035,7 +41754,25 @@ index 714f8ac7ec..c938e58204 100644 #include "enet_pmd_logs.h" #include "enet_ethdev.h" #include "enet_regs.h" -@@ -454,6 +457,12 @@ enetfec_rx_queue_setup(struct rte_eth_dev *dev, +@@ -51,6 +54,7 @@ enetfec_restart(struct rte_eth_dev *dev) + uint32_t rcntl = OPT_FRAME_SIZE | 0x04; + uint32_t ecntl = ENETFEC_ETHEREN; + uint32_t val; ++ int i; + + /* Clear any outstanding interrupt. */ + writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR); +@@ -146,6 +150,9 @@ enetfec_restart(struct rte_eth_dev *dev) + /* And last, enable the transmit and receive processing */ + rte_write32(rte_cpu_to_le_32(ecntl), + (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR); ++ ++ for (i = 0; i < fep->max_rx_queues; i++) ++ rte_write32(0, fep->rx_queues[i]->bd.active_reg_desc); + rte_delay_us(10); + } + +@@ -454,6 +461,12 @@ enetfec_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } @@ -23061,6 +41798,104 @@ index a300c6f8bc..c9400957f8 100644 #define RX_BD_FIRST ((ushort)0x0400) /* Reserved */ #define RX_BD_LAST ((ushort)0x0800) /* last buffer in the frame */ #define RX_BD_INT 0x00800000 +diff --git a/dpdk/drivers/net/enetfec/enet_rxtx.c b/dpdk/drivers/net/enetfec/enet_rxtx.c +index 49b326315d..0aea8b240d 100644 +--- a/dpdk/drivers/net/enetfec/enet_rxtx.c ++++ b/dpdk/drivers/net/enetfec/enet_rxtx.c +@@ -39,11 +39,6 @@ enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts, + if (pkt_received >= nb_pkts) + break; + +- new_mbuf = rte_pktmbuf_alloc(pool); +- if (unlikely(new_mbuf == NULL)) { +- stats->rx_nombuf++; +- break; +- } + /* Check for errors. */ + status ^= RX_BD_LAST; + if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO | +@@ -72,6 +67,12 @@ enetfec_recv_pkts(void *rxq1, struct rte_mbuf **rx_pkts, + goto rx_processing_done; + } + ++ new_mbuf = rte_pktmbuf_alloc(pool); ++ if (unlikely(new_mbuf == NULL)) { ++ stats->rx_nombuf++; ++ break; ++ } ++ + /* Process the incoming frame. */ + stats->ipackets++; + pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen)); +@@ -193,7 +194,16 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + tx_st = 0; + break; + } ++ ++ mbuf = *(tx_pkts); ++ if (mbuf->nb_segs > 1) { ++ ENETFEC_DP_LOG(DEBUG, "SG not supported"); ++ return pkt_transmitted; ++ } ++ ++ tx_pkts++; + bdp = txq->bd.cur; ++ + /* First clean the ring */ + index = enet_get_bd_index(bdp, &txq->bd); + status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc)); +@@ -207,9 +217,6 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + txq->tx_mbuf[index] = NULL; + } + +- mbuf = *(tx_pkts); +- tx_pkts++; +- + /* Fill in a Tx ring entry */ + last_bdp = bdp; + status &= ~TX_BD_STATS; +@@ -219,10 +226,6 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + stats->opackets++; + stats->obytes += buflen; + +- if (mbuf->nb_segs > 1) { +- ENETFEC_DP_LOG(DEBUG, "SG not supported"); +- return -1; +- } + status |= (TX_BD_LAST); + data = rte_pktmbuf_mtod(mbuf, void *); + for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE) +@@ -268,5 +271,5 @@ enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + */ + txq->bd.cur = bdp; + } +- return nb_pkts; ++ return pkt_transmitted; + } +diff --git a/dpdk/drivers/net/enic/enic_ethdev.c b/dpdk/drivers/net/enic/enic_ethdev.c +index 163be09809..426912bb83 100644 +--- a/dpdk/drivers/net/enic/enic_ethdev.c ++++ b/dpdk/drivers/net/enic/enic_ethdev.c +@@ -368,6 +368,7 @@ static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev) + { + struct rte_eth_link link; + struct enic *enic = pmd_priv(eth_dev); ++ uint16_t i; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; +@@ -378,6 +379,11 @@ static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev) + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(eth_dev, &link); + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/enic/enic_flow.c b/dpdk/drivers/net/enic/enic_flow.c index 33147169ba..cf51793cfe 100644 --- a/dpdk/drivers/net/enic/enic_flow.c @@ -23202,7 +42037,7 @@ index ae43f36bc0..ab73cd8530 100644 if (ret) return ret; diff --git a/dpdk/drivers/net/enic/enic_main.c b/dpdk/drivers/net/enic/enic_main.c -index 7f84b5f935..97d97ea793 100644 +index 7f84b5f935..2ea26c0407 100644 --- a/dpdk/drivers/net/enic/enic_main.c +++ b/dpdk/drivers/net/enic/enic_main.c @@ -1137,7 +1137,7 @@ int enic_disable(struct enic *enic) @@ -23214,6 +42049,15 @@ index 7f84b5f935..97d97ea793 100644 * though, as we want the polling of link status to continue working. */ if (enic->rte_dev->data->dev_conf.intr_conf.lsc) +@@ -1639,7 +1639,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) + * packet length. + */ + if (!eth_dev->data->dev_started) +- goto set_mtu_done; ++ return rc; + + /* + * The device has started, re-do RQs on the fly. In the process, we diff --git a/dpdk/drivers/net/enic/enic_rxtx.c b/dpdk/drivers/net/enic/enic_rxtx.c index c44715bfd0..33e96b480e 100644 --- a/dpdk/drivers/net/enic/enic_rxtx.c @@ -23241,6 +42085,70 @@ index 3c754a5f66..05cf533896 100644 return ret; } +diff --git a/dpdk/drivers/net/failsafe/failsafe_ops.c b/dpdk/drivers/net/failsafe/failsafe_ops.c +index 55e21d635c..2c23d0e70a 100644 +--- a/dpdk/drivers/net/failsafe/failsafe_ops.c ++++ b/dpdk/drivers/net/failsafe/failsafe_ops.c +@@ -6,6 +6,9 @@ + #include <stdbool.h> + #include <stdint.h> + #include <unistd.h> ++#ifdef RTE_EXEC_ENV_LINUX ++#include <sys/eventfd.h> ++#endif + + #include <rte_debug.h> + #include <rte_atomic.h> +@@ -387,28 +390,11 @@ fs_rx_queue_setup(struct rte_eth_dev *dev, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) + { +- /* +- * FIXME: Add a proper interface in rte_eal_interrupts for +- * allocating eventfd as an interrupt vector. +- * For the time being, fake as if we are using MSIX interrupts, +- * this will cause rte_intr_efd_enable to allocate an eventfd for us. +- */ +- struct rte_intr_handle *intr_handle; + struct sub_device *sdev; + struct rxq *rxq; + uint8_t i; + int ret; + +- intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); +- if (intr_handle == NULL) +- return -ENOMEM; +- +- if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_VFIO_MSIX)) +- return -rte_errno; +- +- if (rte_intr_efds_index_set(intr_handle, 0, -1)) +- return -rte_errno; +- + fs_lock(dev, 0); + if (rx_conf->rx_deferred_start) { + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { +@@ -442,12 +428,16 @@ fs_rx_queue_setup(struct rte_eth_dev *dev, + rxq->info.nb_desc = nb_rx_desc; + rxq->priv = PRIV(dev); + rxq->sdev = PRIV(dev)->subs; +- ret = rte_intr_efd_enable(intr_handle, 1); +- if (ret < 0) { ++#ifdef RTE_EXEC_ENV_LINUX ++ rxq->event_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); ++ if (rxq->event_fd < 0) { ++ ERROR("Failed to create an eventfd: %s", strerror(errno)); + fs_unlock(dev, 0); +- return ret; ++ return -errno; + } +- rxq->event_fd = rte_intr_efds_index_get(intr_handle, 0); ++#else ++ rxq->event_fd = -1; ++#endif + dev->data->rx_queues[rx_queue_id] = rxq; + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { + ret = rte_eth_rx_queue_setup(PORT_ID(sdev), diff --git a/dpdk/drivers/net/fm10k/fm10k.h b/dpdk/drivers/net/fm10k/fm10k.h index 7cfa29faa8..17a7056c45 100644 --- a/dpdk/drivers/net/fm10k/fm10k.h @@ -23362,7 +42270,7 @@ index 1269250e23..10ce5a7582 100644 nb_pkts_recd += var; if (likely(var != RTE_FM10K_DESCS_PER_LOOP)) diff --git a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c -index 1853511c3b..e8d9aaba84 100644 +index 1853511c3b..992ba37b3f 100644 --- a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c +++ b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c @@ -255,7 +255,7 @@ static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask); @@ -23383,6 +42291,46 @@ index 1853511c3b..e8d9aaba84 100644 err = hinic_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK); if (err) { +@@ -980,6 +980,7 @@ static int hinic_dev_start(struct rte_eth_dev *dev) + int rc; + char *name; + struct hinic_nic_dev *nic_dev; ++ uint16_t i; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + name = dev->data->name; +@@ -1047,6 +1048,11 @@ static int hinic_dev_start(struct rte_eth_dev *dev) + + rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + + en_port_fail: +@@ -1169,6 +1175,7 @@ static int hinic_dev_stop(struct rte_eth_dev *dev) + uint16_t port_id; + struct hinic_nic_dev *nic_dev; + struct rte_eth_link link; ++ uint16_t i; + + nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + name = dev->data->name; +@@ -1215,6 +1222,11 @@ static int hinic_dev_stop(struct rte_eth_dev *dev) + hinic_free_all_rx_mbuf(dev); + hinic_free_all_tx_mbuf(dev); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h index 5eca8b10b9..8e6251f69f 100644 --- a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.h @@ -23432,7 +42380,7 @@ index 2688817f37..f09b1a6e1e 100644 &sqe_info, &off_info))) { txq->txq_stats.off_errs++; diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c -index 2ce59d8de6..3495e2acc1 100644 +index 2ce59d8de6..9ca2c0cbb9 100644 --- a/dpdk/drivers/net/hns3/hns3_cmd.c +++ b/dpdk/drivers/net/hns3/hns3_cmd.c @@ -466,7 +466,7 @@ hns3_mask_capability(struct hns3_hw *hw, @@ -23444,7 +42392,68 @@ index 2ce59d8de6..3495e2acc1 100644 i, hns3_get_caps_name(i)); } } -@@ -635,39 +635,6 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) +@@ -511,6 +511,8 @@ hns3_parse_capability(struct hns3_hw *hw, + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_RAS_IMP_B, 1); + if (hns3_get_bit(caps, HNS3_CAPS_TM_B)) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TM_B, 1); ++ if (hns3_get_bit(caps, HNS3_CAPS_GRO_B)) ++ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1); + } + + static uint32_t +@@ -523,6 +525,41 @@ hns3_build_api_caps(void) + return rte_cpu_to_le_32(api_caps); + } + ++static void ++hns3_set_dcb_capability(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ struct rte_pci_device *pci_dev; ++ struct rte_eth_dev *eth_dev; ++ uint16_t device_id; ++ ++ if (hns->is_vf) ++ return; ++ ++ eth_dev = &rte_eth_devices[hw->data->port_id]; ++ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); ++ device_id = pci_dev->id.device_id; ++ ++ if (device_id == HNS3_DEV_ID_25GE_RDMA || ++ device_id == HNS3_DEV_ID_50GE_RDMA || ++ device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || ++ device_id == HNS3_DEV_ID_200G_RDMA) ++ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); ++} ++ ++static void ++hns3_set_default_capability(struct hns3_hw *hw) ++{ ++ hns3_set_dcb_capability(hw); ++ ++ /* ++ * The firmware of the network engines with HIP08 do not report some ++ * capabilities, like GRO. Set default capabilities for it. ++ */ ++ if (hw->revision < PCI_REVISION_ID_HIP09_A) ++ hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_GRO_B, 1); ++} ++ + static int + hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) + { +@@ -540,6 +577,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) + return ret; + + hw->fw_version = rte_le_to_cpu_32(resp->firmware); ++ ++ hns3_set_default_capability(hw); ++ + /* + * Make sure mask the capability before parse capability because it + * may overwrite resp's data. +@@ -635,39 +675,6 @@ hns3_firmware_compat_config(struct hns3_hw *hw, bool is_init) struct hns3_cmd_desc desc; uint32_t compat = 0; @@ -23484,7 +42493,17 @@ index 2ce59d8de6..3495e2acc1 100644 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_FIRMWARE_COMPAT_CFG, false); req = (struct hns3_firmware_compat_cmd *)desc.data; -@@ -736,7 +703,7 @@ hns3_cmd_init(struct hns3_hw *hw) +@@ -696,9 +703,6 @@ hns3_cmd_init(struct hns3_hw *hw) + hw->cmq.csq.next_to_use = 0; + hw->cmq.crq.next_to_clean = 0; + hw->cmq.crq.next_to_use = 0; +- hw->mbx_resp.head = 0; +- hw->mbx_resp.tail = 0; +- hw->mbx_resp.lost = 0; + hns3_cmd_init_regs(hw); + + rte_spinlock_unlock(&hw->cmq.crq.lock); +@@ -736,7 +740,7 @@ hns3_cmd_init(struct hns3_hw *hw) return 0; /* @@ -23494,19 +42513,53 @@ index 2ce59d8de6..3495e2acc1 100644 * fails to take over the PHY. */ diff --git a/dpdk/drivers/net/hns3/hns3_cmd.h b/dpdk/drivers/net/hns3/hns3_cmd.h -index 81bc9e9d98..82c999061d 100644 +index 81bc9e9d98..e1fab05489 100644 --- a/dpdk/drivers/net/hns3/hns3_cmd.h +++ b/dpdk/drivers/net/hns3/hns3_cmd.h -@@ -323,7 +323,7 @@ enum HNS3_CAPS_BITS { +@@ -56,11 +56,6 @@ enum hns3_cmd_return_status { + HNS3_CMD_ROH_CHECK_FAIL = 12 + }; + +-struct hns3_misc_vector { +- uint8_t *addr; +- int vector_irq; +-}; +- + struct hns3_cmq { + struct hns3_cmq_ring csq; + struct hns3_cmq_ring crq; +@@ -323,7 +318,8 @@ enum HNS3_CAPS_BITS { HNS3_CAPS_UDP_TUNNEL_CSUM_B, HNS3_CAPS_RAS_IMP_B, HNS3_CAPS_RXD_ADV_LAYOUT_B = 15, - HNS3_CAPS_TM_B = 17, + HNS3_CAPS_TM_B = 19, ++ HNS3_CAPS_GRO_B = 20, }; /* Capabilities of VF dependent on the PF */ -@@ -603,7 +603,6 @@ struct hns3_cfg_gro_status_cmd { +@@ -394,20 +390,6 @@ struct hns3_pkt_buf_alloc { + struct hns3_shared_buf s_buf; + }; + +-#define HNS3_RX_COM_WL_EN_B 15 +-struct hns3_rx_com_wl_buf_cmd { +- uint16_t high_wl; +- uint16_t low_wl; +- uint8_t rsv[20]; +-}; +- +-#define HNS3_RX_PKT_EN_B 15 +-struct hns3_rx_pkt_buf_cmd { +- uint16_t high_pkt; +- uint16_t low_pkt; +- uint8_t rsv[20]; +-}; +- + #define HNS3_PF_STATE_DONE_B 0 + #define HNS3_PF_STATE_MAIN_B 1 + #define HNS3_PF_STATE_BOND_B 2 +@@ -603,7 +585,6 @@ struct hns3_cfg_gro_status_cmd { #define HNS3_RSS_HASH_KEY_OFFSET_B 4 @@ -23514,11 +42567,129 @@ index 81bc9e9d98..82c999061d 100644 #define HNS3_RSS_HASH_KEY_NUM 16 /* Configure the algorithm mode and Hash Key, opcode:0x0D01 */ struct hns3_rss_generic_config_cmd { +@@ -623,6 +604,7 @@ struct hns3_rss_input_tuple_cmd { + #define HNS3_RSS_CFG_TBL_SIZE_H 4 + #define HNS3_RSS_CFG_TBL_BW_H 2 + #define HNS3_RSS_CFG_TBL_BW_L 8 ++#define HNS3_RSS_CFG_TBL_BW_H_M 0x3 + + /* Configure the indirection table, opcode:0x0D07 */ + struct hns3_rss_indirection_table_cmd { +@@ -804,6 +786,12 @@ struct hns3_sfp_type { + #define HNS3_FIBER_LINK_SPEED_10M_BIT BIT(7) + #define HNS3_FIBER_LINK_SPEED_200G_BIT BIT(8) + ++#define HNS3_FIBER_FEC_AUTO_BIT BIT(0) ++#define HNS3_FIBER_FEC_BASER_BIT BIT(1) ++#define HNS3_FIBER_FEC_RS_BIT BIT(2) ++#define HNS3_FIBER_FEC_LLRS_BIT BIT(3) ++#define HNS3_FIBER_FEC_NOFEC_BIT BIT(4) ++ + struct hns3_sfp_info_cmd { + uint32_t sfp_speed; + uint8_t query_type; /* 0: sfp speed, 1: active */ +@@ -813,7 +801,8 @@ struct hns3_sfp_info_cmd { + uint8_t autoneg_ability; + uint32_t supported_speed; /* speed supported by current media */ + uint32_t module_type; +- uint8_t rsv1[8]; ++ uint8_t fec_ability; /* supported fec modes, see HNS3_FIBER_FEC_XXX_BIT */ ++ uint8_t rsv1[7]; + }; + + #define HNS3_MAC_CFG_FEC_AUTO_EN_B 0 +@@ -984,6 +973,12 @@ struct hns3_dev_specs_0_cmd { + uint32_t max_tm_rate; + }; + ++struct hns3_dev_specs_1_cmd { ++ uint8_t rsv0[12]; ++ uint8_t min_tx_pkt_len; ++ uint8_t rsv1[11]; ++}; ++ + struct hns3_query_rpu_cmd { + uint32_t tc_queue_num; + uint32_t rsv1[2]; diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c -index eac2aa1040..78158401f2 100644 +index eac2aa1040..ace5be01d5 100644 --- a/dpdk/drivers/net/hns3/hns3_common.c +++ b/dpdk/drivers/net/hns3/hns3_common.c -@@ -216,7 +216,7 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) +@@ -11,6 +11,7 @@ + #include "hns3_logs.h" + #include "hns3_regs.h" + #include "hns3_rxtx.h" ++#include "hns3_dcb.h" + + int + hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, +@@ -69,8 +70,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | +- RTE_ETH_RX_OFFLOAD_RSS_HASH | +- RTE_ETH_RX_OFFLOAD_TCP_LRO); ++ RTE_ETH_RX_OFFLOAD_RSS_HASH); + info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | +@@ -90,13 +90,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) + info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; + ++ info->dev_capa = RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP | ++ RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP; + if (hns3_dev_get_support(hw, INDEP_TXRX)) +- info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | +- RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +- info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; ++ info->dev_capa |= RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | ++ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + if (hns3_dev_get_support(hw, PTP)) + info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP; ++ if (hns3_dev_get_support(hw, GRO)) ++ info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO; + + info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = HNS3_MAX_RING_DESC, +@@ -128,7 +131,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + }; + + info->reta_size = hw->rss_ind_tbl_size; +- info->hash_key_size = HNS3_RSS_KEY_SIZE; ++ info->hash_key_size = hw->rss_key_size; + info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT; + + info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE; +@@ -159,6 +162,9 @@ hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + ++ if (value == NULL || extra_args == NULL) ++ return 0; ++ + if (strcmp(value, "vec") == 0) + hint = HNS3_IO_FUNC_HINT_VEC; + else if (strcmp(value, "sve") == 0) +@@ -199,6 +205,9 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + ++ if (value == NULL || extra_args == NULL) ++ return 0; ++ + val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL); + *(uint64_t *)extra_args = val; + +@@ -212,11 +221,14 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + + RTE_SET_USED(key); + ++ if (value == NULL || extra_args == NULL) ++ return 0; ++ + val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL); /* * 500ms is empirical value in process of mailbox communication. If @@ -23527,7 +42698,7 @@ index eac2aa1040..78158401f2 100644 * communication may fail. */ if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX) -@@ -236,6 +236,12 @@ hns3_parse_devargs(struct rte_eth_dev *dev) +@@ -236,6 +248,12 @@ hns3_parse_devargs(struct rte_eth_dev *dev) uint64_t dev_caps_mask = 0; struct rte_kvargs *kvlist; @@ -23540,7 +42711,40 @@ index eac2aa1040..78158401f2 100644 if (dev->device->devargs == NULL) return; -@@ -603,7 +609,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw) +@@ -332,7 +350,7 @@ hns3_set_mc_addr_chk_param(struct hns3_hw *hw, + hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) " + "invalid. valid range: 0~%d", + nb_mc_addr, HNS3_MC_MACADDR_NUM); +- return -EINVAL; ++ return -ENOSPC; + } + + /* Check if input mac addresses are valid */ +@@ -390,6 +408,7 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + uint32_t nb_mc_addr) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct rte_ether_addr *addr; + int cur_addr_num; + int set_addr_num; +@@ -397,6 +416,15 @@ hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev, + int ret; + int i; + ++ if (mc_addr_set == NULL || nb_mc_addr == 0) { ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_configure_all_mc_mac_addr(hns, true); ++ if (ret == 0) ++ hw->mc_addrs_num = 0; ++ rte_spinlock_unlock(&hw->lock); ++ return ret; ++ } ++ + /* Check if input parameters are valid */ + ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr); + if (ret) +@@ -603,7 +631,7 @@ hns3_init_ring_with_vector(struct hns3_hw *hw) hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num); for (i = 0; i < hw->intr_tqps_num; i++) { /* @@ -23549,8 +42753,132 @@ index eac2aa1040..78158401f2 100644 * configuration for interrupt coalesce of queue's interrupt. */ hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX, +@@ -761,3 +789,109 @@ hns3_restore_rx_interrupt(struct hns3_hw *hw) + + return 0; + } ++ ++int ++hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id) ++{ ++ struct rte_pci_device *pci_dev; ++ struct rte_eth_dev *eth_dev; ++ uint8_t revision; ++ int ret; ++ ++ eth_dev = &rte_eth_devices[hw->data->port_id]; ++ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); ++ ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, ++ HNS3_PCI_REVISION_ID); ++ if (ret != HNS3_PCI_REVISION_ID_LEN) { ++ hns3_err(hw, "failed to read pci revision id, ret = %d", ret); ++ return -EIO; ++ } ++ ++ *revision_id = revision; ++ ++ return 0; ++} ++ ++void ++hns3_set_default_dev_specifications(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ ++ hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; ++ hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; ++ hw->rss_key_size = HNS3_RSS_KEY_SIZE; ++ hw->intr.int_ql_max = HNS3_INTR_QL_NONE; ++ ++ if (hns->is_vf) ++ return; ++ ++ hw->max_tm_rate = HNS3_ETHER_MAX_RATE; ++} ++ ++static void ++hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ struct hns3_dev_specs_0_cmd *req0; ++ struct hns3_dev_specs_1_cmd *req1; ++ ++ req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; ++ req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data; ++ ++ hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; ++ hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); ++ hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); ++ hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); ++ hw->min_tx_pkt_len = req1->min_tx_pkt_len; ++ ++ if (hns->is_vf) ++ return; ++ ++ hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); ++} ++ ++static int ++hns3_check_dev_specifications(struct hns3_hw *hw) ++{ ++ if (hw->rss_ind_tbl_size == 0 || ++ hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { ++ hns3_err(hw, "the indirection table size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)", ++ hw->rss_ind_tbl_size, HNS3_RSS_IND_TBL_SIZE_MAX); ++ return -EINVAL; ++ } ++ ++ if (hw->rss_key_size == 0 || hw->rss_key_size > HNS3_RSS_KEY_SIZE_MAX) { ++ hns3_err(hw, "the RSS key size obtained (%u) is invalid, and should not be zero or exceed the maximum(%u)", ++ hw->rss_key_size, HNS3_RSS_KEY_SIZE_MAX); ++ return -EINVAL; ++ } ++ ++ if (hw->rss_key_size > HNS3_RSS_KEY_SIZE) ++ hns3_warn(hw, "the RSS key size obtained (%u) is greater than the default key size (%u)", ++ hw->rss_key_size, HNS3_RSS_KEY_SIZE); ++ ++ return 0; ++} ++ ++int ++hns3_query_dev_specifications(struct hns3_hw *hw) ++{ ++ struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; ++ int ret; ++ int i; ++ ++ for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { ++ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, ++ true); ++ desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); ++ } ++ hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); ++ ++ ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); ++ if (ret) ++ return ret; ++ ++ hns3_parse_dev_specifications(hw, desc); ++ ++ return hns3_check_dev_specifications(hw); ++} +diff --git a/dpdk/drivers/net/hns3/hns3_common.h b/dpdk/drivers/net/hns3/hns3_common.h +index 0dbb1c0413..47d6e34269 100644 +--- a/dpdk/drivers/net/hns3/hns3_common.h ++++ b/dpdk/drivers/net/hns3/hns3_common.h +@@ -58,4 +58,8 @@ int hns3_map_rx_interrupt(struct rte_eth_dev *dev); + void hns3_unmap_rx_interrupt(struct rte_eth_dev *dev); + int hns3_restore_rx_interrupt(struct hns3_hw *hw); + +-#endif /* _HNS3_COMMON_H_ */ ++int hns3_get_pci_revision_id(struct hns3_hw *hw, uint8_t *revision_id); ++void hns3_set_default_dev_specifications(struct hns3_hw *hw); ++int hns3_query_dev_specifications(struct hns3_hw *hw); ++ ++#endif /* HNS3_COMMON_H */ diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c -index 3d0159d787..e4417e87fd 100644 +index 3d0159d787..1a156cca7e 100644 --- a/dpdk/drivers/net/hns3/hns3_dcb.c +++ b/dpdk/drivers/net/hns3/hns3_dcb.c @@ -25,7 +25,7 @@ @@ -23573,7 +42901,41 @@ index 3d0159d787..e4417e87fd 100644 6 * 8, /* Port level */ 6 * 256 /* Qset level */ }; -@@ -1532,7 +1532,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns) +@@ -237,9 +237,9 @@ hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr) + static int + hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw) + { +-#define DEFAULT_TC_WEIGHT 1 + #define DEFAULT_TC_OFFSET 14 + struct hns3_ets_tc_weight_cmd *ets_weight; ++ struct hns3_pg_info *pg_info; + struct hns3_cmd_desc desc; + uint8_t i; + +@@ -247,13 +247,6 @@ hns3_dcb_ets_tc_dwrr_cfg(struct hns3_hw *hw) + ets_weight = (struct hns3_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNS3_MAX_TC_NUM; i++) { +- struct hns3_pg_info *pg_info; +- +- ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; +- +- if (!(hw->hw_tc_map & BIT(i))) +- continue; +- + pg_info = &hw->dcb_info.pg_info[hw->dcb_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } +@@ -1100,7 +1093,7 @@ hns3_dcb_map_cfg(struct hns3_hw *hw) + + ret = hns3_pg_to_pri_map(hw); + if (ret) { +- hns3_err(hw, "pri_to_pg mapping fail: %d", ret); ++ hns3_err(hw, "pg_to_pri mapping fail: %d", ret); + return ret; + } + +@@ -1532,7 +1525,7 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns) ret = hns3_dcb_schd_setup_hw(hw); if (ret) { @@ -23582,7 +42944,7 @@ index 3d0159d787..e4417e87fd 100644 return ret; } -@@ -1737,7 +1737,7 @@ hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) +@@ -1737,7 +1730,7 @@ hns3_get_fc_mode(struct hns3_hw *hw, enum rte_eth_fc_mode mode) * hns3_dcb_pfc_enable - Enable priority flow control * @dev: pointer to ethernet device * @@ -23592,19 +42954,231 @@ index 3d0159d787..e4417e87fd 100644 int hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c -index 0bd12907d8..40a33549e0 100644 +index 0bd12907d8..a805046d19 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev.c -@@ -227,17 +227,11 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) +@@ -5,7 +5,6 @@ + #include <rte_alarm.h> + #include <rte_bus_pci.h> + #include <ethdev_pci.h> +-#include <rte_pci.h> + + #include "hns3_ethdev.h" + #include "hns3_common.h" +@@ -16,6 +15,7 @@ + #include "hns3_dcb.h" + #include "hns3_mp.h" + #include "hns3_flow.h" ++#include "hns3_ptp.h" + + #define HNS3_SERVICE_INTERVAL 1000000 /* us */ + #define HNS3_SERVICE_QUICK_INTERVAL 10 +@@ -43,6 +43,7 @@ + #define HNS3_VECTOR0_IMP_CMDQ_ERR_B 4U + #define HNS3_VECTOR0_IMP_RD_POISON_B 5U + #define HNS3_VECTOR0_ALL_MSIX_ERR_B 6U ++#define HNS3_VECTOR0_TRIGGER_IMP_RESET_B 7U + + #define HNS3_RESET_WAIT_MS 100 + #define HNS3_RESET_WAIT_CNT 200 +@@ -60,6 +61,19 @@ enum hns3_evt_cause { + HNS3_VECTOR0_EVENT_OTHER, + }; + ++struct hns3_intr_state { ++ uint32_t vector0_state; ++ uint32_t cmdq_state; ++ uint32_t hw_err_state; ++}; ++ ++#define HNS3_SPEEDS_SUPP_FEC (RTE_ETH_LINK_SPEED_10G | \ ++ RTE_ETH_LINK_SPEED_25G | \ ++ RTE_ETH_LINK_SPEED_40G | \ ++ RTE_ETH_LINK_SPEED_50G | \ ++ RTE_ETH_LINK_SPEED_100G | \ ++ RTE_ETH_LINK_SPEED_200G) ++ + static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { + { RTE_ETH_SPEED_NUM_10G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | +@@ -83,8 +97,7 @@ static const struct rte_eth_fec_capa speed_fec_capa_tbl[] = { + RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, + +- { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | +- RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | ++ { RTE_ETH_SPEED_NUM_200G, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS) } + }; + +@@ -120,63 +133,51 @@ hns3_pf_enable_irq0(struct hns3_hw *hw) + } + + static enum hns3_evt_cause +-hns3_proc_imp_reset_event(struct hns3_adapter *hns, bool is_delay, +- uint32_t *vec_val) ++hns3_proc_imp_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) + { + struct hns3_hw *hw = &hns->hw; + + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_IMPRESET_INT_B); +- if (!is_delay) { +- hw->reset.stats.imp_cnt++; +- hns3_warn(hw, "IMP reset detected, clear reset status"); +- } else { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, "IMP reset detected, don't clear reset status"); +- } ++ hw->reset.stats.imp_cnt++; ++ hns3_warn(hw, "IMP reset detected, clear reset status"); + + return HNS3_VECTOR0_EVENT_RST; + } + + static enum hns3_evt_cause +-hns3_proc_global_reset_event(struct hns3_adapter *hns, bool is_delay, +- uint32_t *vec_val) ++hns3_proc_global_reset_event(struct hns3_adapter *hns, uint32_t *vec_val) + { + struct hns3_hw *hw = &hns->hw; + + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_atomic_set_bit(HNS3_GLOBAL_RESET, &hw->reset.pending); + *vec_val = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); +- if (!is_delay) { +- hw->reset.stats.global_cnt++; +- hns3_warn(hw, "Global reset detected, clear reset status"); +- } else { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, +- "Global reset detected, don't clear reset status"); +- } ++ hw->reset.stats.global_cnt++; ++ hns3_warn(hw, "Global reset detected, clear reset status"); + + return HNS3_VECTOR0_EVENT_RST; + } + ++static void ++hns3_query_intr_state(struct hns3_hw *hw, struct hns3_intr_state *state) ++{ ++ state->vector0_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); ++ state->cmdq_state = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); ++ state->hw_err_state = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); ++} ++ + static enum hns3_evt_cause + hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + { + struct hns3_hw *hw = &hns->hw; +- uint32_t vector0_int_stats; +- uint32_t cmdq_src_val; +- uint32_t hw_err_src_reg; ++ struct hns3_intr_state state; + uint32_t val; + enum hns3_evt_cause ret; +- bool is_delay; + +- /* fetch the events from their corresponding regs */ +- vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); +- cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); +- hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); ++ hns3_query_intr_state(hw, &state); + +- is_delay = clearval == NULL ? true : false; + /* + * Assumption: If by any chance reset and mailbox events are reported + * together then we will only process reset event and defer the +@@ -184,60 +185,77 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + * RX CMDQ event this time we would receive again another interrupt + * from H/W just for the mailbox. + */ +- if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int_stats) { /* IMP */ +- ret = hns3_proc_imp_reset_event(hns, is_delay, &val); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & state.vector0_state) { /* IMP */ ++ ret = hns3_proc_imp_reset_event(hns, &val); + goto out; + } + + /* Global reset */ +- if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int_stats) { +- ret = hns3_proc_global_reset_event(hns, is_delay, &val); ++ if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & state.vector0_state) { ++ ret = hns3_proc_global_reset_event(hns, &val); + goto out; + } + + /* Check for vector0 1588 event source */ +- if (BIT(HNS3_VECTOR0_1588_INT_B) & vector0_int_stats) { ++ if (BIT(HNS3_VECTOR0_1588_INT_B) & state.vector0_state) { + val = BIT(HNS3_VECTOR0_1588_INT_B); + ret = HNS3_VECTOR0_EVENT_PTP; + goto out; + } + + /* check for vector0 msix event source */ +- if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK || +- hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) { +- val = vector0_int_stats | hw_err_src_reg; ++ if (state.vector0_state & HNS3_VECTOR0_REG_MSIX_MASK || ++ state.hw_err_state & HNS3_RAS_REG_NFE_MASK) { ++ val = state.vector0_state | state.hw_err_state; + ret = HNS3_VECTOR0_EVENT_ERR; + goto out; + } + + /* check for vector0 mailbox(=CMDQ RX) event source */ +- if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_val) { +- cmdq_src_val &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); +- val = cmdq_src_val; ++ if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & state.cmdq_state) { ++ state.cmdq_state &= ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B); ++ val = state.cmdq_state; + ret = HNS3_VECTOR0_EVENT_MBX; + goto out; + } + +- val = vector0_int_stats; ++ val = state.vector0_state; + ret = HNS3_VECTOR0_EVENT_OTHER; +-out: + +- if (clearval) +- *clearval = val; ++out: ++ *clearval = val; return ret; } -static bool -hns3_is_1588_event_type(uint32_t event_type) --{ ++void ++hns3_clear_reset_event(struct hns3_hw *hw) + { - return (event_type == HNS3_VECTOR0_EVENT_PTP); --} -- ++ uint32_t clearval = 0; ++ ++ switch (hw->reset.level) { ++ case HNS3_IMP_RESET: ++ clearval = BIT(HNS3_VECTOR0_IMPRESET_INT_B); ++ break; ++ case HNS3_GLOBAL_RESET: ++ clearval = BIT(HNS3_VECTOR0_GLOBALRESET_INT_B); ++ break; ++ default: ++ break; ++ } ++ ++ if (clearval == 0) ++ return; ++ ++ hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, clearval); ++ ++ hns3_pf_enable_irq0(hw); + } + static void hns3_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) { @@ -23614,7 +43188,88 @@ index 0bd12907d8..40a33549e0 100644 hns3_write_dev(hw, HNS3_MISC_RESET_STS_REG, regclr); else if (event_type == HNS3_VECTOR0_EVENT_MBX) hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr); -@@ -324,7 +318,7 @@ hns3_interrupt_handler(void *param) +@@ -292,6 +310,47 @@ hns3_handle_mac_tnl(struct hns3_hw *hw) + } + } + ++static void ++hns3_delay_before_clear_event_cause(struct hns3_hw *hw, uint32_t event_type, uint32_t regclr) ++{ ++#define IMPRESET_WAIT_MS_TIME 5 ++ ++ if (event_type == HNS3_VECTOR0_EVENT_RST && ++ regclr & BIT(HNS3_VECTOR0_IMPRESET_INT_B) && ++ hw->revision >= PCI_REVISION_ID_HIP09_A) { ++ rte_delay_ms(IMPRESET_WAIT_MS_TIME); ++ hns3_dbg(hw, "wait firmware watchdog initialization completed."); ++ } ++} ++ ++static bool ++hns3_reset_event_valid(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ enum hns3_reset_level new_req = HNS3_NONE_RESET; ++ enum hns3_reset_level last_req; ++ uint32_t vector0_int; ++ ++ vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_int) ++ new_req = HNS3_IMP_RESET; ++ else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_int) ++ new_req = HNS3_GLOBAL_RESET; ++ if (new_req == HNS3_NONE_RESET) ++ return true; ++ ++ last_req = hns3_get_reset_level(hns, &hw->reset.pending); ++ if (last_req == HNS3_NONE_RESET) ++ return true; ++ ++ if (new_req > last_req) ++ return true; ++ ++ hns3_warn(hw, "last_req (%u) less than or equal to new_req (%u) ignore", ++ last_req, new_req); ++ return false; ++} ++ + static void + hns3_interrupt_handler(void *param) + { +@@ -299,24 +358,25 @@ hns3_interrupt_handler(void *param) + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + enum hns3_evt_cause event_cause; ++ struct hns3_intr_state state; + uint32_t clearval = 0; +- uint32_t vector0_int; +- uint32_t ras_int; +- uint32_t cmdq_int; ++ ++ if (!hns3_reset_event_valid(hw)) ++ return; + + /* Disable interrupt */ + hns3_pf_disable_irq0(hw); + + event_cause = hns3_check_event_cause(hns, &clearval); +- vector0_int = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); +- ras_int = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); +- cmdq_int = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG); ++ hns3_query_intr_state(hw, &state); ++ hns3_delay_before_clear_event_cause(hw, event_cause, clearval); + hns3_clear_event_cause(hw, event_cause, clearval); + /* vector 0 interrupt is shared with reset and mailbox source events. */ + if (event_cause == HNS3_VECTOR0_EVENT_ERR) { + hns3_warn(hw, "received interrupt: vector0_int_stat:0x%x " + "ras_int_stat:0x%x cmdq_int_stat:0x%x", +- vector0_int, ras_int, cmdq_int); ++ state.vector0_state, state.hw_err_state, ++ state.cmdq_state); + hns3_handle_mac_tnl(hw); + hns3_handle_error(hns); + } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { +@@ -324,14 +384,19 @@ hns3_interrupt_handler(void *param) hns3_schedule_reset(hns); } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { hns3_dev_handle_mbx_msg(hw); @@ -23622,8 +43277,22 @@ index 0bd12907d8..40a33549e0 100644 + } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " "ras_int_stat:0x%x cmdq_int_stat:0x%x", - vector0_int, ras_int, cmdq_int); -@@ -574,7 +568,7 @@ hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, +- vector0_int, ras_int, cmdq_int); ++ state.vector0_state, state.hw_err_state, ++ state.cmdq_state); + } + + /* Enable interrupt if it is not cause by reset */ +- hns3_pf_enable_irq0(hw); ++ if (event_cause == HNS3_VECTOR0_EVENT_ERR || ++ event_cause == HNS3_VECTOR0_EVENT_MBX || ++ event_cause == HNS3_VECTOR0_EVENT_PTP || ++ event_cause == HNS3_VECTOR0_EVENT_OTHER) ++ hns3_pf_enable_irq0(hw); + } + + static int +@@ -574,7 +639,7 @@ hns3_set_vlan_rx_offload_cfg(struct hns3_adapter *hns, hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B, vcfg->vlan2_vlan_prionly ? 1 : 0); @@ -23632,7 +43301,7 @@ index 0bd12907d8..40a33549e0 100644 hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B, vcfg->strip_tag1_discard_en ? 1 : 0); hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B, -@@ -784,7 +778,7 @@ hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, +@@ -784,7 +849,7 @@ hns3_set_vlan_tx_offload_cfg(struct hns3_adapter *hns, vcfg->insert_tag2_en ? 1 : 0); hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0); @@ -23641,7 +43310,16 @@ index 0bd12907d8..40a33549e0 100644 hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B, vcfg->tag_shift_mode_en ? 1 : 0); -@@ -2033,11 +2027,9 @@ hns3_dev_configure(struct rte_eth_dev *dev) +@@ -1678,7 +1743,7 @@ hns3_set_default_mac_addr(struct rte_eth_dev *dev, + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); + hns3_warn(hw, +- "Failed to roll back to del setted mac addr(%s): %d", ++ "Failed to roll back to del set mac addr(%s): %d", + mac_str, ret_val); + } + +@@ -2033,11 +2098,9 @@ hns3_dev_configure(struct rte_eth_dev *dev) goto cfg_err; } @@ -23653,7 +43331,7 @@ index 0bd12907d8..40a33549e0 100644 ret = hns3_dev_rss_hash_update(dev, &rss_conf); if (ret) goto cfg_err; -@@ -2093,7 +2085,6 @@ static int +@@ -2093,7 +2156,6 @@ static int hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) { struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); @@ -23661,7 +43339,7 @@ index 0bd12907d8..40a33549e0 100644 int err; int ret; -@@ -2103,22 +2094,20 @@ hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) +@@ -2103,22 +2165,20 @@ hns3_config_mtu(struct hns3_hw *hw, uint16_t mps) return ret; } @@ -23688,18 +43366,135 @@ index 0bd12907d8..40a33549e0 100644 return ret; } -@@ -2767,6 +2756,10 @@ hns3_get_capability(struct hns3_hw *hw) - } - hw->revision = revision; +@@ -2279,6 +2339,7 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) + struct rte_eth_link new_link; + int ret; + ++ memset(&new_link, 0, sizeof(new_link)); + /* When port is stopped, report link down. */ + if (eth_dev->data->dev_started == 0) { + new_link.link_autoneg = mac->link_autoneg; +@@ -2302,7 +2363,6 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) + rte_delay_ms(HNS3_LINK_CHECK_INTERVAL); + } while (retry_cnt--); + +- memset(&new_link, 0, sizeof(new_link)); + hns3_setup_linkstatus(eth_dev, &new_link); + + out: +@@ -2675,99 +2735,18 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed) + return 0; + } + +-static void +-hns3_set_default_dev_specifications(struct hns3_hw *hw) +-{ +- hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; +- hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; +- hw->rss_key_size = HNS3_RSS_KEY_SIZE; +- hw->max_tm_rate = HNS3_ETHER_MAX_RATE; +- hw->intr.int_ql_max = HNS3_INTR_QL_NONE; +-} +- +-static void +-hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) +-{ +- struct hns3_dev_specs_0_cmd *req0; +- +- req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; +- +- hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; +- hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); +- hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); +- hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); +- hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); +-} +- +-static int +-hns3_check_dev_specifications(struct hns3_hw *hw) +-{ +- if (hw->rss_ind_tbl_size == 0 || +- hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { +- hns3_err(hw, "the size of hash lookup table configured (%u)" +- " exceeds the maximum(%u)", hw->rss_ind_tbl_size, +- HNS3_RSS_IND_TBL_SIZE_MAX); +- return -EINVAL; +- } +- +- return 0; +-} +- +-static int +-hns3_query_dev_specifications(struct hns3_hw *hw) +-{ +- struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; +- int ret; +- int i; +- +- for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, +- true); +- desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- } +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); +- +- ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); +- if (ret) +- return ret; +- +- hns3_parse_dev_specifications(hw, desc); +- +- return hns3_check_dev_specifications(hw); +-} +- + static int + hns3_get_capability(struct hns3_hw *hw) + { + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); +- struct rte_pci_device *pci_dev; + struct hns3_pf *pf = &hns->pf; +- struct rte_eth_dev *eth_dev; +- uint16_t device_id; +- uint8_t revision; + int ret; +- eth_dev = &rte_eth_devices[hw->data->port_id]; +- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); +- device_id = pci_dev->id.device_id; +- +- if (device_id == HNS3_DEV_ID_25GE_RDMA || +- device_id == HNS3_DEV_ID_50GE_RDMA || +- device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || +- device_id == HNS3_DEV_ID_200G_RDMA) +- hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); +- +- /* Get PCI revision id */ +- ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, +- HNS3_PCI_REVISION_ID); +- if (ret != HNS3_PCI_REVISION_ID_LEN) { +- PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", +- ret); +- return -EIO; +- } +- hw->revision = revision; + ret = hns3_query_mac_stats_reg_num(hw); + if (ret) + return ret; -+ - if (revision < PCI_REVISION_ID_HIP09_A) { + +- if (revision < PCI_REVISION_ID_HIP09_A) { ++ if (hw->revision < PCI_REVISION_ID_HIP09_A) { hns3_set_default_dev_specifications(hw); hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; -@@ -2820,11 +2813,8 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; +@@ -2795,7 +2774,6 @@ hns3_get_capability(struct hns3_hw *hw) + hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; + hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; + hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; +- hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; + pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; +@@ -2820,11 +2798,8 @@ hns3_check_media_type(struct hns3_hw *hw, uint8_t media_type) } break; case HNS3_MEDIA_TYPE_FIBER: @@ -23712,7 +43507,7 @@ index 0bd12907d8..40a33549e0 100644 break; default: PMD_INIT_LOG(ERR, "Unknown media type = %u!", media_type); -@@ -2855,7 +2845,6 @@ hns3_get_board_configuration(struct hns3_hw *hw) +@@ -2855,7 +2830,6 @@ hns3_get_board_configuration(struct hns3_hw *hw) hw->mac.media_type = cfg.media_type; hw->rss_size_max = cfg.rss_size_max; @@ -23720,7 +43515,7 @@ index 0bd12907d8..40a33549e0 100644 memcpy(hw->mac.mac_addr, cfg.mac_addr, RTE_ETHER_ADDR_LEN); hw->mac.phy_addr = cfg.phy_addr; hw->num_tx_desc = cfg.tqp_desc_num; -@@ -3420,7 +3409,7 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw, +@@ -3420,7 +3394,7 @@ hns3_only_alloc_priv_buff(struct hns3_hw *hw, * hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs * @hw: pointer to struct hns3_hw * @buf_alloc: pointer to buffer calculation data @@ -23729,7 +43524,33 @@ index 0bd12907d8..40a33549e0 100644 */ static int hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc) -@@ -4280,14 +4269,11 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev) +@@ -3712,7 +3686,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) + + if (cmdq_resp) { + PMD_INIT_LOG(ERR, +- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", ++ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.", + cmdq_resp); + return -EIO; + } +@@ -4109,6 +4083,7 @@ hns3_get_sfp_info(struct hns3_hw *hw, struct hns3_mac *mac_info) + mac_info->support_autoneg = resp->autoneg_ability; + mac_info->link_autoneg = (resp->autoneg == 0) ? RTE_ETH_LINK_FIXED + : RTE_ETH_LINK_AUTONEG; ++ mac_info->fec_capa = resp->fec_ability; + } else { + mac_info->query_type = HNS3_DEFAULT_QUERY; + } +@@ -4191,7 +4166,7 @@ hns3_update_fiber_link_info(struct hns3_hw *hw) + mac->supported_speed = mac_info.supported_speed; + mac->support_autoneg = mac_info.support_autoneg; + mac->link_autoneg = mac_info.link_autoneg; +- ++ mac->fec_capa = mac_info.fec_capa; + return 0; + } + +@@ -4280,14 +4255,11 @@ hns3_update_link_info(struct rte_eth_dev *eth_dev) { struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; @@ -23746,7 +43567,7 @@ index 0bd12907d8..40a33549e0 100644 } static int -@@ -4396,10 +4382,12 @@ hns3_service_handler(void *param) +@@ -4396,10 +4368,12 @@ hns3_service_handler(void *param) struct hns3_adapter *hns = eth_dev->data->dev_private; struct hns3_hw *hw = &hns->hw; @@ -23761,7 +43582,7 @@ index 0bd12907d8..40a33549e0 100644 rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, eth_dev); } -@@ -4410,6 +4398,10 @@ hns3_init_hardware(struct hns3_adapter *hns) +@@ -4410,6 +4384,10 @@ hns3_init_hardware(struct hns3_adapter *hns) struct hns3_hw *hw = &hns->hw; int ret; @@ -23772,7 +43593,20 @@ index 0bd12907d8..40a33549e0 100644 ret = hns3_map_tqp(hw); if (ret) { PMD_INIT_LOG(ERR, "Failed to map tqp: %d", ret); -@@ -4550,14 +4542,14 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw) +@@ -4483,6 +4461,12 @@ hns3_init_hardware(struct hns3_adapter *hns) + goto err_mac_init; + } + ++ ret = hns3_ptp_init(hw); ++ if (ret) { ++ PMD_INIT_LOG(ERR, "Failed to init PTP, ret = %d", ret); ++ goto err_mac_init; ++ } ++ + return 0; + + err_mac_init: +@@ -4550,14 +4534,14 @@ hns3_set_firber_default_support_speed(struct hns3_hw *hw) } /* @@ -23789,7 +43623,7 @@ index 0bd12907d8..40a33549e0 100644 * HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained * through it. If unsupported, use the SFP's speed as the value of the * supported_speed. -@@ -4574,11 +4566,13 @@ hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) +@@ -4574,11 +4558,13 @@ hns3_get_port_supported_speed(struct rte_eth_dev *eth_dev) if (ret) return ret; @@ -23806,7 +43640,18 @@ index 0bd12907d8..40a33549e0 100644 */ if (mac->supported_speed == 0) mac->supported_speed = -@@ -4650,13 +4644,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4620,6 +4606,10 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) + /* Get hardware io base address from pcie BAR2 IO space */ + hw->io_base = pci_dev->mem_resource[2].addr; + ++ ret = hns3_get_pci_revision_id(hw, &hw->revision); ++ if (ret) ++ return ret; ++ + /* Firmware command queue initialize */ + ret = hns3_cmd_init_queue(hw); + if (ret) { +@@ -4650,13 +4640,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_cmd_init; } @@ -23820,7 +43665,18 @@ index 0bd12907d8..40a33549e0 100644 hns3_config_all_msix_error(hw, true); ret = rte_intr_callback_register(pci_dev->intr_handle, -@@ -4682,7 +4669,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4667,10 +4650,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) + goto err_intr_callback_register; + } + +- ret = hns3_ptp_init(hw); +- if (ret) +- goto err_get_config; +- + /* Enable interrupt */ + rte_intr_enable(pci_dev->intr_handle); + hns3_pf_enable_irq0(hw); +@@ -4682,7 +4661,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_get_config; } @@ -23829,25 +43685,28 @@ index 0bd12907d8..40a33549e0 100644 if (ret) goto err_get_config; -@@ -4728,7 +4715,7 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4727,8 +4706,9 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) + hns3_fdir_filter_uninit(hns); err_fdir: hns3_uninit_umv_space(hw); ++ hns3_ptp_uninit(hw); err_init_hw: - hns3_tqp_stats_uninit(hw); + hns3_stats_uninit(hw); err_get_config: hns3_pf_disable_irq0(hw); rte_intr_disable(pci_dev->intr_handle); -@@ -4762,7 +4749,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) +@@ -4762,7 +4742,8 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) hns3_flow_uninit(eth_dev); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); - hns3_tqp_stats_uninit(hw); ++ hns3_ptp_uninit(hw); + hns3_stats_uninit(hw); hns3_config_mac_tnl_int(hw, false); hns3_pf_disable_irq0(hw); rte_intr_disable(pci_dev->intr_handle); -@@ -4847,7 +4834,7 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) +@@ -4847,7 +4828,7 @@ hns3_check_port_speed(struct hns3_hw *hw, uint32_t link_speeds) if (mac->media_type == HNS3_MEDIA_TYPE_COPPER) speed_bit = hns3_convert_link_speeds2bitmap_copper(link_speeds); @@ -23856,7 +43715,7 @@ index 0bd12907d8..40a33549e0 100644 speed_bit = hns3_convert_link_speeds2bitmap_fiber(link_speeds); if (!(speed_bit & supported_speed)) { -@@ -4991,32 +4978,35 @@ hns3_set_fiber_port_link_speed(struct hns3_hw *hw, +@@ -4991,32 +4972,35 @@ hns3_set_fiber_port_link_speed(struct hns3_hw *hw, return hns3_cfg_mac_speed_dup(hw, cfg->speed, cfg->duplex); } @@ -23910,7 +43769,55 @@ index 0bd12907d8..40a33549e0 100644 } return 0; -@@ -5327,7 +5317,7 @@ hns3_get_autoneg_fc_mode(struct hns3_hw *hw) +@@ -5079,7 +5063,7 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) + if (ret) + goto err_set_link_speed; + +- return 0; ++ return hns3_restore_filter(hns); + + err_set_link_speed: + (void)hns3_cfg_mac_mode(hw, false); +@@ -5096,12 +5080,6 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue) + return ret; + } + +-static void +-hns3_restore_filter(struct rte_eth_dev *dev) +-{ +- hns3_restore_rss_filter(dev); +-} +- + static int + hns3_dev_start(struct rte_eth_dev *dev) + { +@@ -5155,10 +5133,7 @@ hns3_dev_start(struct rte_eth_dev *dev) + rte_spinlock_unlock(&hw->lock); + + hns3_rx_scattered_calc(dev); +- hns3_set_rxtx_function(dev); +- hns3_mp_req_start_rxtx(dev); +- +- hns3_restore_filter(dev); ++ hns3_start_rxtx_datapath(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); +@@ -5236,12 +5211,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) + dev->data->dev_started = 0; + + hw->adapter_state = HNS3_NIC_STOPPING; +- hns3_set_rxtx_function(dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(dev); +- /* Prevent crashes when queues are still in use. */ +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(dev); + + rte_spinlock_lock(&hw->lock); + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { +@@ -5327,7 +5297,7 @@ hns3_get_autoneg_fc_mode(struct hns3_hw *hw) /* * Flow control auto-negotiation is not supported for fiber and @@ -23919,7 +43826,25 @@ index 0bd12907d8..40a33549e0 100644 */ case HNS3_MEDIA_TYPE_FIBER: case HNS3_MEDIA_TYPE_BACKPLANE: -@@ -5579,15 +5569,15 @@ hns3_reinit_dev(struct hns3_adapter *hns) +@@ -5415,16 +5385,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) + + if (!pf->support_fc_autoneg) { + if (autoneg != 0) { +- hns3_err(hw, "unsupported fc auto-negotiation setting."); +- return -EOPNOTSUPP; +- } +- +- /* +- * Flow control auto-negotiation of the NIC is not supported, +- * but other auto-negotiation features may be supported. +- */ +- if (autoneg != hw->mac.link_autoneg) { +- hns3_err(hw, "please use 'link_speeds' in struct rte_eth_conf to disable autoneg!"); ++ hns3_err(hw, "unsupported fc auto-negotiation."); + return -EOPNOTSUPP; + } + +@@ -5579,15 +5540,15 @@ hns3_reinit_dev(struct hns3_adapter *hns) return ret; } @@ -23939,7 +43864,259 @@ index 0bd12907d8..40a33549e0 100644 return ret; } -@@ -6191,7 +6181,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) +@@ -5633,23 +5594,60 @@ is_pf_reset_done(struct hns3_hw *hw) + return true; + } + ++static enum hns3_reset_level ++hns3_detect_reset_event(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ enum hns3_reset_level new_req = HNS3_NONE_RESET; ++ enum hns3_reset_level last_req; ++ uint32_t vector0_intr_state; ++ ++ last_req = hns3_get_reset_level(hns, &hw->reset.pending); ++ vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ new_req = HNS3_IMP_RESET; ++ } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ new_req = HNS3_GLOBAL_RESET; ++ } ++ ++ if (new_req == HNS3_NONE_RESET) ++ return HNS3_NONE_RESET; ++ ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); ++ } ++ ++ return new_req; ++} ++ + bool + hns3_is_reset_pending(struct hns3_adapter *hns) + { ++ enum hns3_reset_level new_req; + struct hns3_hw *hw = &hns->hw; +- enum hns3_reset_level reset; ++ enum hns3_reset_level last_req; + +- hns3_check_event_cause(hns, NULL); +- reset = hns3_get_reset_level(hns, &hw->reset.pending); +- if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && +- hw->reset.level < reset) { +- hns3_warn(hw, "High level reset %d is pending", reset); ++ /* ++ * Only primary can process can process the reset event, ++ * so don't check reset event in secondary. ++ */ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return false; ++ ++ new_req = hns3_detect_reset_event(hw); ++ last_req = hns3_get_reset_level(hns, &hw->reset.pending); ++ if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && ++ new_req < last_req) { ++ hns3_warn(hw, "High level reset %d is pending", last_req); + return true; + } +- reset = hns3_get_reset_level(hns, &hw->reset.request); +- if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && +- hw->reset.level < reset) { +- hns3_warn(hw, "High level reset %d is request", reset); ++ last_req = hns3_get_reset_level(hns, &hw->reset.request); ++ if (last_req != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && ++ hw->reset.level < last_req) { ++ hns3_warn(hw, "High level reset %d is request", last_req); + return true; + } + return false; +@@ -5696,17 +5694,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) + return hns3_cmd_send(hw, &desc, 1); + } + +-static int +-hns3_imp_reset_cmd(struct hns3_hw *hw) +-{ +- struct hns3_cmd_desc desc; +- +- hns3_cmd_setup_basic_desc(&desc, 0xFFFE, false); +- desc.data[0] = 0xeedd; +- +- return hns3_cmd_send(hw, &desc, 1); +-} +- + static void + hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) + { +@@ -5724,7 +5711,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) + + switch (reset_level) { + case HNS3_IMP_RESET: +- hns3_imp_reset_cmd(hw); ++ val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG); ++ hns3_set_bit(val, HNS3_VECTOR0_TRIGGER_IMP_RESET_B, 1); ++ hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val); + hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", + tv.tv_sec, tv.tv_usec); + break; +@@ -5849,12 +5838,7 @@ hns3_stop_service(struct hns3_adapter *hns) + rte_eal_alarm_cancel(hns3_service_handler, eth_dev); + hns3_update_linkstatus_and_event(hw, false); + } +- +- hns3_set_rxtx_function(eth_dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(eth_dev); +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(eth_dev); + + rte_spinlock_lock(&hw->lock); + if (hns->hw.adapter_state == HNS3_NIC_STARTED || +@@ -5887,8 +5871,7 @@ hns3_start_service(struct hns3_adapter *hns) + hw->reset.level == HNS3_GLOBAL_RESET) + hns3_set_rst_done(hw); + eth_dev = &rte_eth_devices[hw->data->port_id]; +- hns3_set_rxtx_function(eth_dev); +- hns3_mp_req_start_rxtx(eth_dev); ++ hns3_start_rxtx_datapath(eth_dev); + if (hw->adapter_state == HNS3_NIC_STARTED) { + /* + * This API parent function already hold the hns3_hw.lock, the +@@ -5943,10 +5926,6 @@ hns3_restore_conf(struct hns3_adapter *hns) + if (ret) + goto err_promisc; + +- ret = hns3_restore_all_fdir_filter(hns); +- if (ret) +- goto err_promisc; +- + ret = hns3_restore_ptp(hns); + if (ret) + goto err_promisc; +@@ -6042,56 +6021,27 @@ hns3_reset_service(void *param) + hns3_msix_process(hns, reset_level); + } + +-static unsigned int +-hns3_get_speed_capa_num(uint16_t device_id) ++static uint32_t ++hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, ++ uint32_t speed_capa) + { +- unsigned int num; +- +- switch (device_id) { +- case HNS3_DEV_ID_25GE: +- case HNS3_DEV_ID_25GE_RDMA: +- num = 2; +- break; +- case HNS3_DEV_ID_100G_RDMA_MACSEC: +- case HNS3_DEV_ID_200G_RDMA: +- num = 1; +- break; +- default: +- num = 0; +- break; +- } ++ uint32_t speed_bit; ++ uint32_t num = 0; ++ uint32_t i; + +- return num; +-} ++ for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { ++ speed_bit = ++ rte_eth_speed_bitflag(speed_fec_capa_tbl[i].speed, ++ RTE_ETH_LINK_FULL_DUPLEX); ++ if ((speed_capa & speed_bit) == 0) ++ continue; + +-static int +-hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa, +- uint16_t device_id) +-{ +- switch (device_id) { +- case HNS3_DEV_ID_25GE: +- /* fallthrough */ +- case HNS3_DEV_ID_25GE_RDMA: +- speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed; +- speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa; +- +- /* In HNS3 device, the 25G NIC is compatible with 10G rate */ +- speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed; +- speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa; +- break; +- case HNS3_DEV_ID_100G_RDMA_MACSEC: +- speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed; +- speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa; +- break; +- case HNS3_DEV_ID_200G_RDMA: +- speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed; +- speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa; +- break; +- default: +- return -ENOTSUP; ++ speed_fec_capa[num].speed = speed_fec_capa_tbl[i].speed; ++ speed_fec_capa[num].capa = speed_fec_capa_tbl[i].capa; ++ num++; + } + +- return 0; ++ return num; + } + + static int +@@ -6100,28 +6050,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, + unsigned int num) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); +- uint16_t device_id = pci_dev->id.device_id; +- unsigned int capa_num; +- int ret; ++ unsigned int speed_num; ++ uint32_t speed_capa; + +- capa_num = hns3_get_speed_capa_num(device_id); +- if (capa_num == 0) { +- hns3_err(hw, "device(0x%x) is not supported by hns3 PMD", +- device_id); ++ speed_capa = hns3_get_speed_capa(hw); ++ /* speed_num counts number of speed capabilities */ ++ speed_num = __builtin_popcount(speed_capa & HNS3_SPEEDS_SUPP_FEC); ++ if (speed_num == 0) + return -ENOTSUP; +- } + +- if (speed_fec_capa == NULL || num < capa_num) +- return capa_num; ++ if (speed_fec_capa == NULL) ++ return speed_num; + +- ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id); +- if (ret) +- return -ENOTSUP; ++ if (num < speed_num) { ++ hns3_err(hw, "not enough array size(%u) to store FEC capabilities, should not be less than %u", ++ num, speed_num); ++ return -EINVAL; ++ } + +- return capa_num; ++ return hns3_get_speed_fec_capa(speed_fec_capa, speed_capa); + } + ++ + static int + get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) + { +@@ -6191,7 +6141,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) } /* @@ -23948,81 +44125,364 @@ index 0bd12907d8..40a33549e0 100644 * that defined in the ethdev library. So the sequence needs * to be converted. */ -diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h -index aa45b31261..134a33ee2f 100644 ---- a/dpdk/drivers/net/hns3/hns3_ethdev.h -+++ b/dpdk/drivers/net/hns3/hns3_ethdev.h -@@ -126,7 +126,7 @@ struct hns3_tc_info { - uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */ - uint8_t pgid; - uint32_t bw_limit; -- uint8_t up_to_tc_map; /* user priority maping on the TC */ -+ uint8_t up_to_tc_map; /* user priority mapping on the TC */ - }; +@@ -6259,62 +6209,53 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) + } - struct hns3_dcb_info { -@@ -502,8 +502,15 @@ struct hns3_hw { - struct hns3_tqp_stats tqp_stats; - /* Include Mac stats | Rx stats | Tx stats */ - struct hns3_mac_stats mac_stats; -+ uint32_t mac_stats_reg_num; - struct hns3_rx_missed_stats imissed_stats; - uint64_t oerror_stats; -+ /* -+ * The lock is used to protect statistics update in stats APIs and -+ * periodic task. -+ */ -+ rte_spinlock_t stats_lock; -+ - uint32_t fw_version; - uint16_t pf_vf_if_version; /* version of communication interface */ + static uint32_t +-get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa) +-{ +- struct hns3_mac *mac = &hw->mac; +- uint32_t cur_capa; ++hns3_parse_hw_fec_capa(uint8_t hw_fec_capa) ++{ ++ const struct { ++ uint32_t hw_fec_capa; ++ uint32_t fec_capa; ++ } fec_capa_map[] = { ++ { HNS3_FIBER_FEC_AUTO_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) }, ++ { HNS3_FIBER_FEC_BASER_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(BASER) }, ++ { HNS3_FIBER_FEC_RS_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(RS) }, ++ { HNS3_FIBER_FEC_NOFEC_BIT, RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) }, ++ }; ++ uint32_t capa = 0; ++ uint32_t i; -@@ -523,7 +530,6 @@ struct hns3_hw { +- switch (mac->link_speed) { +- case RTE_ETH_SPEED_NUM_10G: +- cur_capa = fec_capa[1].capa; +- break; +- case RTE_ETH_SPEED_NUM_25G: +- case RTE_ETH_SPEED_NUM_100G: +- case RTE_ETH_SPEED_NUM_200G: +- cur_capa = fec_capa[0].capa; +- break; +- default: +- cur_capa = 0; +- break; ++ for (i = 0; i < RTE_DIM(fec_capa_map); i++) { ++ if ((hw_fec_capa & fec_capa_map[i].hw_fec_capa) != 0) ++ capa |= fec_capa_map[i].fec_capa; + } - /* The configuration info of RSS */ - struct hns3_rss_conf rss_info; -- bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */ - uint16_t rss_ind_tbl_size; - uint16_t rss_key_size; +- return cur_capa; ++ return capa; + } -@@ -571,12 +577,12 @@ struct hns3_hw { - /* - * vlan mode. - * value range: -- * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE -+ * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHIFT_AND_DISCARD_MODE - * - * - HNS3_SW_SHIFT_AND_DISCARD_MODE - * For some versions of hardware network engine, because of the - * hardware limitation, PMD needs to detect the PVID status -- * to work with haredware to implement PVID-related functions. -+ * to work with hardware to implement PVID-related functions. - * For example, driver need discard the stripped PVID tag to ensure - * the PVID will not report to mbuf and shift the inserted VLAN tag - * to avoid port based VLAN covering it. -@@ -724,7 +730,7 @@ enum hns3_mp_req_type { - HNS3_MP_REQ_MAX - }; +-static bool +-is_fec_mode_one_bit_set(uint32_t mode) ++static uint32_t ++hns3_get_current_speed_fec_cap(struct hns3_mac *mac) + { +- int cnt = 0; +- uint8_t i; ++ uint32_t i; --/* Pameters for IPC. */ -+/* Parameters for IPC. */ - struct hns3_mp_param { - enum hns3_mp_req_type type; - int port_id; -diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -index 805abd4543..0af4dcb324 100644 ---- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -+++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -@@ -242,7 +242,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, - if (ret == -EPERM) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - old_addr); -- hns3_warn(hw, "Has permanet mac addr(%s) for vf", +- for (i = 0; i < sizeof(mode); i++) +- if (mode >> i & 0x1) +- cnt++; ++ if (mac->fec_capa != 0) ++ return hns3_parse_hw_fec_capa(mac->fec_capa); ++ ++ for (i = 0; i < RTE_DIM(speed_fec_capa_tbl); i++) { ++ if (mac->link_speed == speed_fec_capa_tbl[i].speed) ++ return speed_fec_capa_tbl[i].capa; ++ } + +- return cnt == 1 ? true : false; ++ return 0; + } + + static int +-hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) ++hns3_fec_mode_valid(struct rte_eth_dev *dev, uint32_t mode) + { +-#define FEC_CAPA_NUM 2 + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); +- struct hns3_pf *pf = &hns->pf; +- +- struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM]; + uint32_t cur_capa; +- uint32_t num = FEC_CAPA_NUM; +- int ret; +- +- ret = hns3_fec_get_capability(dev, fec_capa, num); +- if (ret < 0) +- return ret; + +- /* HNS3 PMD only support one bit set mode, e.g. 0x1, 0x4 */ +- if (!is_fec_mode_one_bit_set(mode)) { +- hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, " +- "FEC mode should be only one bit set", mode); ++ if (__builtin_popcount(mode) != 1) { ++ hns3_err(hw, "FEC mode(0x%x) should be only one bit set", mode); + return -EINVAL; + } + +@@ -6322,12 +6263,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) + * Check whether the configured mode is within the FEC capability. + * If not, the configured mode will not be supported. + */ +- cur_capa = get_current_speed_fec_cap(hw, fec_capa); +- if (!(cur_capa & mode)) { +- hns3_err(hw, "unsupported FEC mode = 0x%x", mode); ++ cur_capa = hns3_get_current_speed_fec_cap(&hw->mac); ++ if ((cur_capa & mode) == 0) { ++ hns3_err(hw, "unsupported FEC mode(0x%x)", mode); + return -EINVAL; + } + ++ return 0; ++} ++ ++static int ++hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) ++{ ++ struct hns3_adapter *hns = dev->data->dev_private; ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns); ++ struct hns3_pf *pf = &hns->pf; ++ int ret; ++ ++ ret = hns3_fec_mode_valid(dev, mode); ++ if (ret != 0) ++ return ret; ++ + rte_spinlock_lock(&hw->lock); + ret = hns3_set_fec_hw(hw, mode); + if (ret) { +@@ -6382,7 +6338,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + hns3_err(hw, +- "fail to get optical module exist state, ret = %d.\n", ++ "fail to get optical module exist state, ret = %d.", + ret); + return false; + } +@@ -6420,7 +6376,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, + + ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); + if (ret) { +- hns3_err(hw, "fail to get module EEPROM info, ret = %d.\n", ++ hns3_err(hw, "fail to get module EEPROM info, ret = %d.", + ret); + return ret; + } +@@ -6457,7 +6413,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, + return -ENOTSUP; + + if (!hns3_optical_module_existed(hw)) { +- hns3_err(hw, "fail to read module EEPROM: no module is connected.\n"); ++ hns3_err(hw, "fail to read module EEPROM: no module is connected."); + return -EIO; + } + +@@ -6520,7 +6476,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: +- hns3_err(hw, "unknown module, type = %u, extra_type = %u.\n", ++ hns3_err(hw, "unknown module, type = %u, extra_type = %u.", + sfp_type.type, sfp_type.ext_type); + return -EINVAL; + } +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h +index aa45b31261..5ba9503bf8 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev.h ++++ b/dpdk/drivers/net/hns3/hns3_ethdev.h +@@ -75,7 +75,6 @@ + #define HNS3_DEFAULT_MTU 1500UL + #define HNS3_DEFAULT_FRAME_LEN (HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD) + #define HNS3_HIP08_MIN_TX_PKT_LEN 33 +-#define HNS3_HIP09_MIN_TX_PKT_LEN 9 + + #define HNS3_BITS_PER_BYTE 8 + +@@ -126,7 +125,7 @@ struct hns3_tc_info { + uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */ + uint8_t pgid; + uint32_t bw_limit; +- uint8_t up_to_tc_map; /* user priority maping on the TC */ ++ uint8_t up_to_tc_map; /* user priority mapping on the TC */ + }; + + struct hns3_dcb_info { +@@ -217,6 +216,8 @@ struct hns3_mac { + uint32_t advertising; /* advertised capability in the local part */ + uint32_t lp_advertising; /* advertised capability in the link partner */ + uint8_t support_autoneg; ++ /* current supported fec modes. see HNS3_FIBER_FEC_XXX_BIT */ ++ uint32_t fec_capa; + }; + + struct hns3_fake_queue_data { +@@ -502,8 +503,15 @@ struct hns3_hw { + struct hns3_tqp_stats tqp_stats; + /* Include Mac stats | Rx stats | Tx stats */ + struct hns3_mac_stats mac_stats; ++ uint32_t mac_stats_reg_num; + struct hns3_rx_missed_stats imissed_stats; + uint64_t oerror_stats; ++ /* ++ * The lock is used to protect statistics update in stats APIs and ++ * periodic task. ++ */ ++ rte_spinlock_t stats_lock; ++ + uint32_t fw_version; + uint16_t pf_vf_if_version; /* version of communication interface */ + +@@ -523,7 +531,6 @@ struct hns3_hw { + + /* The configuration info of RSS */ + struct hns3_rss_conf rss_info; +- bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */ + uint16_t rss_ind_tbl_size; + uint16_t rss_key_size; + +@@ -548,7 +555,7 @@ struct hns3_hw { + * The minimum length of the packet supported by hardware in the Tx + * direction. + */ +- uint32_t min_tx_pkt_len; ++ uint8_t min_tx_pkt_len; + + struct hns3_queue_intr intr; + /* +@@ -571,12 +578,12 @@ struct hns3_hw { + /* + * vlan mode. + * value range: +- * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE ++ * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHIFT_AND_DISCARD_MODE + * + * - HNS3_SW_SHIFT_AND_DISCARD_MODE + * For some versions of hardware network engine, because of the + * hardware limitation, PMD needs to detect the PVID status +- * to work with haredware to implement PVID-related functions. ++ * to work with hardware to implement PVID-related functions. + * For example, driver need discard the stripped PVID tag to ensure + * the PVID will not report to mbuf and shift the inserted VLAN tag + * to avoid port based VLAN covering it. +@@ -724,7 +731,7 @@ enum hns3_mp_req_type { + HNS3_MP_REQ_MAX + }; + +-/* Pameters for IPC. */ ++/* Parameters for IPC. */ + struct hns3_mp_param { + enum hns3_mp_req_type type; + int port_id; +@@ -871,13 +878,6 @@ struct hns3_adapter { + struct hns3_ptype_table ptype_tbl __rte_cache_aligned; + }; + +-#define HNS3_DEVARG_RX_FUNC_HINT "rx_func_hint" +-#define HNS3_DEVARG_TX_FUNC_HINT "tx_func_hint" +- +-#define HNS3_DEVARG_DEV_CAPS_MASK "dev_caps_mask" +- +-#define HNS3_DEVARG_MBX_TIME_LIMIT_MS "mbx_time_limit_ms" +- + enum { + HNS3_DEV_SUPPORT_DCB_B, + HNS3_DEV_SUPPORT_COPPER_B, +@@ -891,6 +891,7 @@ enum { + HNS3_DEV_SUPPORT_RAS_IMP_B, + HNS3_DEV_SUPPORT_TM_B, + HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, ++ HNS3_DEV_SUPPORT_GRO_B, + }; + + #define hns3_dev_get_support(hw, _name) \ +@@ -996,15 +997,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) + #define hns3_read_dev(a, reg) \ + hns3_read_reg((a)->io_base, (reg)) + +-#define NEXT_ITEM_OF_ACTION(act, actions, index) \ +- do { \ +- act = (actions) + (index); \ +- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ +- (index)++; \ +- act = actions + index; \ +- } \ +- } while (0) +- + static inline uint64_t + hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) + { +@@ -1043,22 +1035,8 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); + void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + uint32_t link_speed, uint8_t link_duplex); + void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); +- +-int hns3_restore_ptp(struct hns3_adapter *hns); +-int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, +- struct rte_eth_conf *conf); +-int hns3_ptp_init(struct hns3_hw *hw); +-int hns3_timesync_enable(struct rte_eth_dev *dev); +-int hns3_timesync_disable(struct rte_eth_dev *dev); +-int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, +- struct timespec *timestamp, +- uint32_t flags __rte_unused); +-int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, +- struct timespec *timestamp); +-int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts); +-int hns3_timesync_write_time(struct rte_eth_dev *dev, +- const struct timespec *ts); +-int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); ++void hns3_clear_reset_event(struct hns3_hw *hw); ++void hns3vf_clear_reset_event(struct hns3_hw *hw); + + static inline bool + is_reset_pending(struct hns3_adapter *hns) +@@ -1071,4 +1049,15 @@ is_reset_pending(struct hns3_adapter *hns) + return ret; + } + ++static inline void ++hns3_clear_reset_status(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ ++ if (hns->is_vf) ++ hns3vf_clear_reset_event(hw); ++ else ++ hns3_clear_reset_event(hw); ++} ++ + #endif /* _HNS3_ETHDEV_H_ */ +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +index 805abd4543..0d1d271f37 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +@@ -6,7 +6,6 @@ + #include <rte_alarm.h> + #include <ethdev_pci.h> + #include <rte_io.h> +-#include <rte_pci.h> + #include <rte_vfio.h> + + #include "hns3_ethdev.h" +@@ -242,7 +241,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + if (ret == -EPERM) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + old_addr); +- hns3_warn(hw, "Has permanet mac addr(%s) for vf", + hns3_warn(hw, "Has permanent mac addr(%s) for vf", mac_str); } else { hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, -@@ -318,7 +318,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, +@@ -250,6 +249,8 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", + mac_str, ret); + } ++ rte_spinlock_unlock(&hw->lock); ++ return ret; + } + + rte_ether_addr_copy(mac_addr, +@@ -318,7 +319,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, * 1. The promiscuous/allmulticast mode can be configured successfully * only based on the trusted VF device. If based on the non trusted * VF device, configuring promiscuous/allmulticast mode will fail. @@ -24031,7 +44491,7 @@ index 805abd4543..0af4dcb324 100644 * kernel ethdev driver on the host by the following command: * "ip link set <eth num> vf <vf id> turst on" * 2. After the promiscuous mode is configured successfully, hns3 VF PMD -@@ -330,7 +330,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, +@@ -330,7 +331,7 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, * filter is still effective even in promiscuous mode. If upper * applications don't call rte_eth_dev_vlan_filter API function to * set vlan based on VF device, hns3 VF PMD will can't receive @@ -24040,7 +44500,7 @@ index 805abd4543..0af4dcb324 100644 */ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; -@@ -496,7 +496,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) +@@ -496,7 +497,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev) /* When RSS is not configured, redirect the packet queue 0 */ if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; @@ -24048,7 +44508,99 @@ index 805abd4543..0af4dcb324 100644 rss_conf = conf->rx_adv_conf.rss_conf; ret = hns3_dev_rss_hash_update(dev, &rss_conf); if (ret) -@@ -780,6 +779,14 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) +@@ -611,6 +611,19 @@ hns3vf_enable_irq0(struct hns3_hw *hw) + hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); + } + ++void ++hns3vf_clear_reset_event(struct hns3_hw *hw) ++{ ++ uint32_t clearval; ++ uint32_t cmdq_stat_reg; ++ ++ cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); ++ clearval = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); ++ hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, clearval); ++ ++ hns3vf_enable_irq0(hw); ++} ++ + static enum hns3vf_evt_cause + hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + { +@@ -685,67 +698,10 @@ hns3vf_interrupt_handler(void *param) + break; + } + +- /* Enable interrupt */ +- hns3vf_enable_irq0(hw); +-} +- +-static void +-hns3vf_set_default_dev_specifications(struct hns3_hw *hw) +-{ +- hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT; +- hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE; +- hw->rss_key_size = HNS3_RSS_KEY_SIZE; +- hw->intr.int_ql_max = HNS3_INTR_QL_NONE; +-} +- +-static void +-hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) +-{ +- struct hns3_dev_specs_0_cmd *req0; +- +- req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; +- +- hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; +- hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); +- hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); +- hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); +-} +- +-static int +-hns3vf_check_dev_specifications(struct hns3_hw *hw) +-{ +- if (hw->rss_ind_tbl_size == 0 || +- hw->rss_ind_tbl_size > HNS3_RSS_IND_TBL_SIZE_MAX) { +- hns3_warn(hw, "the size of hash lookup table configured (%u)" +- " exceeds the maximum(%u)", hw->rss_ind_tbl_size, +- HNS3_RSS_IND_TBL_SIZE_MAX); +- return -EINVAL; +- } +- +- return 0; +-} +- +-static int +-hns3vf_query_dev_specifications(struct hns3_hw *hw) +-{ +- struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM]; +- int ret; +- int i; +- +- for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) { +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, +- true); +- desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); +- } +- hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true); +- +- ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM); +- if (ret) +- return ret; +- +- hns3vf_parse_dev_specifications(hw, desc); +- +- return hns3vf_check_dev_specifications(hw); ++ /* Enable interrupt if it is not caused by reset */ ++ if (event_cause == HNS3VF_VECTOR0_EVENT_MBX || ++ event_cause == HNS3VF_VECTOR0_EVENT_OTHER) ++ hns3vf_enable_irq0(hw); + } + + void +@@ -780,6 +736,14 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) while (remain_ms > 0) { rte_delay_ms(HNS3_POLL_RESPONE_MS); @@ -24063,7 +44615,53 @@ index 805abd4543..0af4dcb324 100644 if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != HNS3_PF_PUSH_LSC_CAP_UNKNOWN) break; -@@ -1031,7 +1038,6 @@ hns3vf_get_configuration(struct hns3_hw *hw) +@@ -810,26 +774,10 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) + static int + hns3vf_get_capability(struct hns3_hw *hw) + { +- struct rte_pci_device *pci_dev; +- struct rte_eth_dev *eth_dev; +- uint8_t revision; + int ret; + +- eth_dev = &rte_eth_devices[hw->data->port_id]; +- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); +- +- /* Get PCI revision id */ +- ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN, +- HNS3_PCI_REVISION_ID); +- if (ret != HNS3_PCI_REVISION_ID_LEN) { +- PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d", +- ret); +- return -EIO; +- } +- hw->revision = revision; +- +- if (revision < PCI_REVISION_ID_HIP09_A) { +- hns3vf_set_default_dev_specifications(hw); ++ if (hw->revision < PCI_REVISION_ID_HIP09_A) { ++ hns3_set_default_dev_specifications(hw); + hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; + hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; +@@ -840,7 +788,7 @@ hns3vf_get_capability(struct hns3_hw *hw) + return 0; + } + +- ret = hns3vf_query_dev_specifications(hw); ++ ret = hns3_query_dev_specifications(hw); + if (ret) { + PMD_INIT_LOG(ERR, + "failed to query dev specifications, ret = %d", +@@ -852,7 +800,6 @@ hns3vf_get_capability(struct hns3_hw *hw) + hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; + hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; + hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; +- hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; + +@@ -1031,7 +978,6 @@ hns3vf_get_configuration(struct hns3_hw *hw) int ret; hw->mac.media_type = HNS3_MEDIA_TYPE_NONE; @@ -24071,7 +44669,7 @@ index 805abd4543..0af4dcb324 100644 /* Get device capability */ ret = hns3vf_get_capability(hw); -@@ -1385,10 +1391,12 @@ hns3vf_service_handler(void *param) +@@ -1385,10 +1331,12 @@ hns3vf_service_handler(void *param) * Before querying the link status, check whether there is a reset * pending, and if so, abandon the query. */ @@ -24086,7 +44684,18 @@ index 805abd4543..0af4dcb324 100644 rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, eth_dev); -@@ -1558,17 +1566,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) +@@ -1515,6 +1463,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) + /* Get hardware io base address from pcie BAR2 IO space */ + hw->io_base = pci_dev->mem_resource[2].addr; + ++ ret = hns3_get_pci_revision_id(hw, &hw->revision); ++ if (ret) ++ return ret; ++ + /* Firmware command queue initialize */ + ret = hns3_cmd_init_queue(hw); + if (ret) { +@@ -1558,17 +1510,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) goto err_get_config; } @@ -24105,7 +44714,7 @@ index 805abd4543..0af4dcb324 100644 ret = hns3vf_set_tc_queue_mapping(hns, hw->tqps_num, hw->tqps_num); if (ret) { PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret); -@@ -1596,7 +1597,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) +@@ -1596,7 +1541,7 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) return 0; err_set_tc_queue: @@ -24114,7 +44723,7 @@ index 805abd4543..0af4dcb324 100644 err_get_config: hns3vf_disable_irq0(hw); -@@ -1627,7 +1628,7 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) +@@ -1627,7 +1572,7 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev) (void)hns3vf_set_alive(hw, false); (void)hns3vf_set_promisc_mode(hw, false, false, false); hns3_flow_uninit(eth_dev); @@ -24123,7 +44732,69 @@ index 805abd4543..0af4dcb324 100644 hns3vf_disable_irq0(hw); rte_intr_disable(pci_dev->intr_handle); hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler, -@@ -1925,6 +1926,7 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) +@@ -1678,12 +1623,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) + dev->data->dev_started = 0; + + hw->adapter_state = HNS3_NIC_STOPPING; +- hns3_set_rxtx_function(dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(dev); +- /* Prevent crashes when queues are still in use. */ +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(dev); + + rte_spinlock_lock(&hw->lock); + if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { +@@ -1785,16 +1725,12 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) + hns3_enable_rxd_adv_layout(hw); + + ret = hns3_init_queues(hns, reset_queue); +- if (ret) ++ if (ret) { + hns3_err(hw, "failed to init queues, ret = %d.", ret); ++ return ret; ++ } + +- return ret; +-} +- +-static void +-hns3vf_restore_filter(struct rte_eth_dev *dev) +-{ +- hns3_restore_rss_filter(dev); ++ return hns3_restore_filter(hns); + } + + static int +@@ -1843,10 +1779,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) + rte_spinlock_unlock(&hw->lock); + + hns3_rx_scattered_calc(dev); +- hns3_set_rxtx_function(dev); +- hns3_mp_req_start_rxtx(dev); +- +- hns3vf_restore_filter(dev); ++ hns3_start_rxtx_datapath(dev); + + /* Enable interrupt of all rx queues before enabling queues */ + hns3_dev_all_rx_queue_intr_enable(hw, true); +@@ -1911,7 +1844,13 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) + if (hw->reset.level == HNS3_VF_FULL_RESET) + return false; + +- /* Check the registers to confirm whether there is reset pending */ ++ /* ++ * Only primary can process can process the reset event, ++ * so don't check reset event in secondary. ++ */ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return false; ++ + hns3vf_check_event_cause(hns, NULL); + reset = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && +@@ -1925,6 +1864,7 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) static int hns3vf_wait_hardware_ready(struct hns3_adapter *hns) { @@ -24131,7 +44802,7 @@ index 805abd4543..0af4dcb324 100644 struct hns3_hw *hw = &hns->hw; struct hns3_wait_data *wait_data = hw->reset.wait_data; struct timeval tv; -@@ -1945,12 +1947,14 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) +@@ -1945,12 +1885,14 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) return 0; wait_data->check_completion = NULL; @@ -24148,7 +44819,43 @@ index 805abd4543..0af4dcb324 100644 return -EAGAIN; } else if (wait_data->result == HNS3_WAIT_TIMEOUT) { hns3_clock_gettime(&tv); -@@ -2472,7 +2476,6 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) +@@ -2006,11 +1948,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) + } + hw->mac.link_status = RTE_ETH_LINK_DOWN; + +- hns3_set_rxtx_function(eth_dev); +- rte_wmb(); +- /* Disable datapath on secondary process. */ +- hns3_mp_req_stop_rxtx(eth_dev); +- rte_delay_ms(hw->cfg_max_queues); ++ hns3_stop_rxtx_datapath(eth_dev); + + rte_spinlock_lock(&hw->lock); + if (hw->adapter_state == HNS3_NIC_STARTED || +@@ -2040,8 +1978,7 @@ hns3vf_start_service(struct hns3_adapter *hns) + struct rte_eth_dev *eth_dev; + + eth_dev = &rte_eth_devices[hw->data->port_id]; +- hns3_set_rxtx_function(eth_dev); +- hns3_mp_req_start_rxtx(eth_dev); ++ hns3_start_rxtx_datapath(eth_dev); + if (hw->adapter_state == HNS3_NIC_STARTED) { + hns3vf_start_poll_job(eth_dev); + +@@ -2268,8 +2205,11 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) + */ + if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO || + pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) { +- if (hns3vf_enable_msix(pci_dev, true)) ++ ret = hns3vf_enable_msix(pci_dev, true); ++ if (ret != 0) { + hns3_err(hw, "Failed to enable msix"); ++ return ret; ++ } + } + + rte_intr_enable(pci_dev->intr_handle); +@@ -2472,7 +2412,6 @@ hns3vf_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) { @@ -24157,7 +44864,7 @@ index 805abd4543..0af4dcb324 100644 return 0; } diff --git a/dpdk/drivers/net/hns3/hns3_fdir.c b/dpdk/drivers/net/hns3/hns3_fdir.c -index d043f5786d..2426264138 100644 +index d043f5786d..a2dd25fb21 100644 --- a/dpdk/drivers/net/hns3/hns3_fdir.c +++ b/dpdk/drivers/net/hns3/hns3_fdir.c @@ -631,7 +631,7 @@ static bool hns3_fd_convert_tuple(struct hns3_hw *hw, @@ -24169,6 +44876,25 @@ index d043f5786d..2426264138 100644 } return true; } +@@ -975,7 +975,7 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, + rule->key_conf.spec.src_port, + rule->key_conf.spec.dst_port, ret); + else +- hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); ++ ret = hns3_remove_fdir_filter(hw, fdir_info, &rule->key_conf); + + return ret; + } +@@ -1069,6 +1069,9 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns) + bool err = false; + int ret; + ++ if (hns->is_vf) ++ return 0; ++ + /* + * This API is called in the reset recovery process, the parent function + * must hold hw->lock. diff --git a/dpdk/drivers/net/hns3/hns3_fdir.h b/dpdk/drivers/net/hns3/hns3_fdir.h index f9efff3b52..07b393393d 100644 --- a/dpdk/drivers/net/hns3/hns3_fdir.h @@ -24183,10 +44909,10 @@ index f9efff3b52..07b393393d 100644 /* * equal 0 when action is drop. diff --git a/dpdk/drivers/net/hns3/hns3_flow.c b/dpdk/drivers/net/hns3/hns3_flow.c -index 9f2f9cb6cd..24caf8e870 100644 +index 9f2f9cb6cd..74b08451e1 100644 --- a/dpdk/drivers/net/hns3/hns3_flow.c +++ b/dpdk/drivers/net/hns3/hns3_flow.c -@@ -10,15 +10,6 @@ +@@ -10,13 +10,123 @@ #include "hns3_logs.h" #include "hns3_flow.h" @@ -24197,12 +44923,205 @@ index 9f2f9cb6cd..24caf8e870 100644 - 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, - 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, - 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA --}; -- ++#define NEXT_ITEM_OF_ACTION(act, actions, index) \ ++ do { \ ++ (act) = (actions) + (index); \ ++ while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \ ++ (index)++; \ ++ (act) = (actions) + (index); \ ++ } \ ++ } while (0) ++ ++#define NEXT_ITEM_OF_PATTERN(item, pattern, index) \ ++ do { \ ++ (item) = (pattern) + (index); \ ++ while ((item)->type == RTE_FLOW_ITEM_TYPE_VOID) { \ ++ (index)++; \ ++ (item) = (pattern) + (index); \ ++ } \ ++ } while (0) ++ ++#define HNS3_HASH_HDR_ETH RTE_BIT64(0) ++#define HNS3_HASH_HDR_IPV4 RTE_BIT64(1) ++#define HNS3_HASH_HDR_IPV6 RTE_BIT64(2) ++#define HNS3_HASH_HDR_TCP RTE_BIT64(3) ++#define HNS3_HASH_HDR_UDP RTE_BIT64(4) ++#define HNS3_HASH_HDR_SCTP RTE_BIT64(5) ++ ++#define HNS3_HASH_VOID_NEXT_ALLOW BIT_ULL(RTE_FLOW_ITEM_TYPE_ETH) ++ ++#define HNS3_HASH_ETH_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV4) | \ ++ BIT_ULL(RTE_FLOW_ITEM_TYPE_IPV6)) ++ ++#define HNS3_HASH_IP_NEXT_ALLOW (BIT_ULL(RTE_FLOW_ITEM_TYPE_TCP) | \ ++ BIT_ULL(RTE_FLOW_ITEM_TYPE_UDP) | \ ++ BIT_ULL(RTE_FLOW_ITEM_TYPE_SCTP)) ++ ++static const uint64_t hash_pattern_next_allow_items[] = { ++ [RTE_FLOW_ITEM_TYPE_VOID] = HNS3_HASH_VOID_NEXT_ALLOW, ++ [RTE_FLOW_ITEM_TYPE_ETH] = HNS3_HASH_ETH_NEXT_ALLOW, ++ [RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_IP_NEXT_ALLOW, ++ [RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_IP_NEXT_ALLOW, ++}; ++ ++static const uint64_t hash_pattern_item_header[] = { ++ [RTE_FLOW_ITEM_TYPE_ETH] = HNS3_HASH_HDR_ETH, ++ [RTE_FLOW_ITEM_TYPE_IPV4] = HNS3_HASH_HDR_IPV4, ++ [RTE_FLOW_ITEM_TYPE_IPV6] = HNS3_HASH_HDR_IPV6, ++ [RTE_FLOW_ITEM_TYPE_TCP] = HNS3_HASH_HDR_TCP, ++ [RTE_FLOW_ITEM_TYPE_UDP] = HNS3_HASH_HDR_UDP, ++ [RTE_FLOW_ITEM_TYPE_SCTP] = HNS3_HASH_HDR_SCTP, ++}; ++ ++#define HNS3_HASH_IPV4 (HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV4) ++#define HNS3_HASH_IPV4_TCP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV4 | \ ++ HNS3_HASH_HDR_TCP) ++#define HNS3_HASH_IPV4_UDP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV4 | \ ++ HNS3_HASH_HDR_UDP) ++#define HNS3_HASH_IPV4_SCTP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV4 | \ ++ HNS3_HASH_HDR_SCTP) ++#define HNS3_HASH_IPV6 (HNS3_HASH_HDR_ETH | HNS3_HASH_HDR_IPV6) ++#define HNS3_HASH_IPV6_TCP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV6 | \ ++ HNS3_HASH_HDR_TCP) ++#define HNS3_HASH_IPV6_UDP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV6 | \ ++ HNS3_HASH_HDR_UDP) ++#define HNS3_HASH_IPV6_SCTP (HNS3_HASH_HDR_ETH | \ ++ HNS3_HASH_HDR_IPV6 | \ ++ HNS3_HASH_HDR_SCTP) ++ ++static const struct hns3_hash_map_info { ++ /* flow type specified, zero means action works for all flow types. */ ++ uint64_t pattern_type; ++ uint64_t rss_pctype; /* packet type with prefix RTE_ETH_RSS_xxx */ ++ uint64_t l3l4_types; /* Supported L3/L4 RSS types for this packet type */ ++ uint64_t hw_pctype; /* packet type in driver */ ++ uint64_t tuple_mask; /* full tuples of the hw_pctype */ ++} hash_map_table[] = { ++ /* IPV4 */ ++ { HNS3_HASH_IPV4, ++ RTE_ETH_RSS_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { HNS3_HASH_IPV4, ++ RTE_ETH_RSS_NONFRAG_IPV4_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV4_NONF, HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { HNS3_HASH_IPV4, ++ RTE_ETH_RSS_FRAG_IPV4, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV4_FLAG, HNS3_RSS_TUPLE_IPV4_FLAG_M }, ++ { HNS3_HASH_IPV4_TCP, ++ RTE_ETH_RSS_NONFRAG_IPV4_TCP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV4_TCP, HNS3_RSS_TUPLE_IPV4_TCP_M }, ++ { HNS3_HASH_IPV4_UDP, ++ RTE_ETH_RSS_NONFRAG_IPV4_UDP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV4_UDP, HNS3_RSS_TUPLE_IPV4_UDP_M }, ++ { HNS3_HASH_IPV4_SCTP, ++ RTE_ETH_RSS_NONFRAG_IPV4_SCTP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV4_SCTP, HNS3_RSS_TUPLE_IPV4_SCTP_M }, ++ /* IPV6 */ ++ { HNS3_HASH_IPV6, ++ RTE_ETH_RSS_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { HNS3_HASH_IPV6, ++ RTE_ETH_RSS_NONFRAG_IPV6_OTHER, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV6_NONF, HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { HNS3_HASH_IPV6, ++ RTE_ETH_RSS_FRAG_IPV6, HNS3_RSS_SUPPORT_L3_SRC_DST, ++ HNS3_RSS_PCTYPE_IPV6_FLAG, HNS3_RSS_TUPLE_IPV6_FLAG_M }, ++ { HNS3_HASH_IPV6_TCP, ++ RTE_ETH_RSS_NONFRAG_IPV6_TCP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV6_TCP, HNS3_RSS_TUPLE_IPV6_TCP_M }, ++ { HNS3_HASH_IPV6_UDP, ++ RTE_ETH_RSS_NONFRAG_IPV6_UDP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV6_UDP, HNS3_RSS_TUPLE_IPV6_UDP_M }, ++ { HNS3_HASH_IPV6_SCTP, ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP, HNS3_RSS_SUPPORT_L3L4, ++ HNS3_RSS_PCTYPE_IPV6_SCTP, HNS3_RSS_TUPLE_IPV6_SCTP_M }, + }; + static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF }; - static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 }; +@@ -88,7 +198,7 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) + } + + /* +- * This function is used to find rss general action. ++ * This function is used to parse filter type. + * 1. As we know RSS is used to spread packets among several queues, the flow + * API provide the struct rte_flow_action_rss, user could config its field + * sush as: func/level/types/key/queue to control RSS function. +@@ -96,16 +206,18 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) + * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule + * which action is RSS queues region. + * 3. When action is RSS, we use the following rule to distinguish: +- * Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue +- * region configuration. ++ * Case 1: pattern has ETH and all fields in RSS action except 'queues' are ++ * zero or default, indicate it is queue region configuration. + * Case other: an rss general action. + */ +-static const struct rte_flow_action * +-hns3_find_rss_general_action(const struct rte_flow_item pattern[], +- const struct rte_flow_action actions[]) ++static void ++hns3_parse_filter_type(const struct rte_flow_item pattern[], ++ const struct rte_flow_action actions[], ++ struct hns3_filter_info *filter_info) + { ++ const struct rte_flow_action_rss *rss_act; + const struct rte_flow_action *act = NULL; +- const struct hns3_rss_conf *rss; ++ bool only_has_queues = false; + bool have_eth = false; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { +@@ -114,8 +226,10 @@ hns3_find_rss_general_action(const struct rte_flow_item pattern[], + break; + } + } +- if (!act) +- return NULL; ++ if (act == NULL) { ++ filter_info->type = RTE_ETH_FILTER_FDIR; ++ return; ++ } + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) { +@@ -124,19 +238,21 @@ hns3_find_rss_general_action(const struct rte_flow_item pattern[], + } + } + +- rss = act->conf; +- if (have_eth && rss->conf.queue_num) { ++ rss_act = act->conf; ++ only_has_queues = (rss_act->queue_num > 0) && ++ (rss_act->func == RTE_ETH_HASH_FUNCTION_DEFAULT && ++ rss_act->types == 0 && rss_act->key_len == 0); ++ if (have_eth && only_has_queues) { + /* +- * Pattern have ETH and action's queue_num > 0, indicate this is +- * queue region configuration. +- * Because queue region is implemented by FDIR + RSS in hns3 +- * hardware, it needs to enter FDIR process, so here return NULL +- * to avoid enter RSS process. ++ * Pattern has ETH and all fields in RSS action except 'queues' ++ * are zero or default, which indicates this is queue region ++ * configuration. + */ +- return NULL; ++ filter_info->type = RTE_ETH_FILTER_FDIR; ++ return; + } -@@ -338,7 +329,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev, +- return act; ++ filter_info->type = RTE_ETH_FILTER_HASH; + } + + static inline struct hns3_flow_counter * +@@ -338,7 +454,7 @@ hns3_handle_action_queue_region(struct rte_eth_dev *dev, * * @param actions[in] * @param rule[out] @@ -24211,7 +45130,7 @@ index 9f2f9cb6cd..24caf8e870 100644 * @param error[out] */ static int -@@ -369,7 +360,7 @@ hns3_handle_actions(struct rte_eth_dev *dev, +@@ -369,7 +485,7 @@ hns3_handle_actions(struct rte_eth_dev *dev, * Queue region is implemented by FDIR + RSS in hns3 hardware, * the FDIR's action is one queue region (start_queue_id and * queue_num), then RSS spread packets to the queue region by @@ -24220,7 +45139,16 @@ index 9f2f9cb6cd..24caf8e870 100644 */ case RTE_FLOW_ACTION_TYPE_RSS: ret = hns3_handle_action_queue_region(dev, actions, -@@ -940,7 +931,7 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, +@@ -764,7 +880,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, +- "Only support src & dst port in SCTP"); ++ "Only support src & dst port & v-tag in SCTP"); + if (sctp_mask->hdr.src_port) { + hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); + rule->key_conf.mask.src_port = +@@ -940,7 +1056,7 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, @@ -24229,7 +45157,7 @@ index 9f2f9cb6cd..24caf8e870 100644 /* TNI must be totally masked or not. */ if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) && -@@ -985,7 +976,7 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, +@@ -985,7 +1101,7 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, @@ -24238,96 +45166,1398 @@ index 9f2f9cb6cd..24caf8e870 100644 /* VNI must be totally masked or not. */ if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN)) -@@ -1238,6 +1229,7 @@ static bool - hns3_action_rss_same(const struct rte_flow_action_rss *comp, - const struct rte_flow_action_rss *with) +@@ -1208,7 +1324,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) { -+ bool rss_key_is_same; - bool func_is_same; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_fdir_rule_ele *fdir_rule_ptr; +- struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_flow_mem *flow_node; - /* -@@ -1251,13 +1243,19 @@ hns3_action_rss_same(const struct rte_flow_action_rss *comp, - if (comp->func == RTE_ETH_HASH_FUNCTION_MAX) - func_is_same = false; - else + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); +@@ -1218,13 +1333,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); + } + +- rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); +- while (rss_filter_ptr) { +- TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); +- rte_free(rss_filter_ptr); +- rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); +- } +- + flow_node = TAILQ_FIRST(&hw->flow_list); + while (flow_node) { + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); +@@ -1235,253 +1343,489 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) + } + + static bool +-hns3_action_rss_same(const struct rte_flow_action_rss *comp, +- const struct rte_flow_action_rss *with) ++hns3_flow_rule_key_same(const struct rte_flow_action_rss *comp, ++ const struct rte_flow_action_rss *with) + { +- bool func_is_same; ++ if (comp->key_len != with->key_len) ++ return false; + +- /* +- * When user flush all RSS rule, RSS func is set invalid with +- * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after +- * flushed, any validate RSS func is different with it before +- * flushed. Others, when user create an action RSS with RSS func +- * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same +- * between continuous RSS flow. +- */ +- if (comp->func == RTE_ETH_HASH_FUNCTION_MAX) +- func_is_same = false; +- else - func_is_same = with->func ? (comp->func == with->func) : true; -+ func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ? -+ (comp->func == with->func) : true; ++ if (with->key_len == 0) ++ return true; ++ ++ if (comp->key == NULL && with->key == NULL) ++ return true; ++ ++ if (!(comp->key != NULL && with->key != NULL)) ++ return false; - return (func_is_same && -+ if (with->key_len == 0 || with->key == NULL) -+ rss_key_is_same = 1; -+ else -+ rss_key_is_same = comp->key_len == with->key_len && -+ !memcmp(comp->key, with->key, with->key_len); -+ -+ return (func_is_same && rss_key_is_same && - comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) && +- comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) && - comp->level == with->level && comp->key_len == with->key_len && -+ comp->level == with->level && - comp->queue_num == with->queue_num && +- comp->queue_num == with->queue_num && - !memcmp(comp->key, with->key, with->key_len) && - !memcmp(comp->queue, with->queue, - sizeof(*with->queue) * with->queue_num)); +- !memcmp(comp->queue, with->queue, +- sizeof(*with->queue) * with->queue_num)); ++ return !memcmp(comp->key, with->key, with->key_len); } -@@ -1309,7 +1307,7 @@ hns3_rss_input_tuple_supported(struct hns3_hw *hw, + +-static int +-hns3_rss_conf_copy(struct hns3_rss_conf *out, +- const struct rte_flow_action_rss *in) ++static bool ++hns3_flow_rule_queues_same(const struct rte_flow_action_rss *comp, ++ const struct rte_flow_action_rss *with) + { +- if (in->key_len > RTE_DIM(out->key) || +- in->queue_num > RTE_DIM(out->queue)) +- return -EINVAL; +- if (in->key == NULL && in->key_len) +- return -EINVAL; +- out->conf = (struct rte_flow_action_rss) { +- .func = in->func, +- .level = in->level, +- .types = in->types, +- .key_len = in->key_len, +- .queue_num = in->queue_num, +- }; +- out->conf.queue = memcpy(out->queue, in->queue, +- sizeof(*in->queue) * in->queue_num); +- if (in->key) +- out->conf.key = memcpy(out->key, in->key, in->key_len); ++ if (comp->queue_num != with->queue_num) ++ return false; + +- return 0; ++ if (with->queue_num == 0) ++ return true; ++ ++ if (comp->queue == NULL && with->queue == NULL) ++ return true; ++ ++ if (!(comp->queue != NULL && with->queue != NULL)) ++ return false; ++ ++ return !memcmp(comp->queue, with->queue, with->queue_num); ++} ++ ++static bool ++hns3_action_rss_same(const struct rte_flow_action_rss *comp, ++ const struct rte_flow_action_rss *with) ++{ ++ bool same_level; ++ bool same_types; ++ bool same_func; ++ ++ same_level = (comp->level == with->level); ++ same_types = (comp->types == with->types); ++ same_func = (comp->func == with->func); ++ ++ return same_level && same_types && same_func && ++ hns3_flow_rule_key_same(comp, with) && ++ hns3_flow_rule_queues_same(comp, with); } - /* + static bool +-hns3_rss_input_tuple_supported(struct hns3_hw *hw, +- const struct rte_flow_action_rss *rss) ++hns3_valid_ipv6_sctp_rss_types(struct hns3_hw *hw, uint64_t types) + { + /* +- * For IP packet, it is not supported to use src/dst port fields to RSS +- * hash for the following packet types. +- * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG +- * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst +- * port fields to RSS hash for IPV6 SCTP packet type. However, the +- * Kunpeng930 and future kunpeng series support to use src/dst port +- * fields to RSS hash for IPv6 SCTP packet type. ++ * Some hardware don't support to use src/dst port fields to hash ++ * for IPV6 SCTP packet type. + */ +- if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) && +- (rss->types & RTE_ETH_RSS_IP || +- (!hw->rss_info.ipv6_sctp_offload_supported && +- rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP))) ++ if (types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP && ++ types & HNS3_RSS_SUPPORT_L4_SRC_DST && ++ !hw->rss_info.ipv6_sctp_offload_supported) + return false; + + return true; + } + +-/* - * This function is used to parse rss action validatation. -+ * This function is used to parse rss action validation. - */ +- */ static int - hns3_parse_rss_filter(struct rte_eth_dev *dev, -@@ -1391,15 +1389,10 @@ hns3_disable_rss(struct hns3_hw *hw) +-hns3_parse_rss_filter(struct rte_eth_dev *dev, +- const struct rte_flow_action *actions, +- struct rte_flow_error *error) ++hns3_flow_parse_hash_func(const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) { +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- struct hns3_rss_conf *rss_conf = &hw->rss_info; +- const struct rte_flow_action_rss *rss; +- const struct rte_flow_action *act; +- uint32_t act_index = 0; +- uint16_t n; ++ if (rss_act->func >= RTE_ETH_HASH_FUNCTION_MAX) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, "RSS hash func are not supported"); + +- NEXT_ITEM_OF_ACTION(act, actions, act_index); +- rss = act->conf; ++ rss_conf->conf.func = rss_act->func; ++ return 0; ++} + +- if (rss == NULL) { ++static int ++hns3_flow_parse_hash_key(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ if (rss_act->key_len != hw->rss_key_size) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, +- act, "no valid queues"); +- } ++ NULL, "invalid RSS key length"); ++ ++ if (rss_act->key != NULL) ++ memcpy(rss_conf->key, rss_act->key, rss_act->key_len); ++ else ++ memcpy(rss_conf->key, hns3_hash_key, ++ RTE_MIN(sizeof(hns3_hash_key), rss_act->key_len)); ++ /* Need to record if user sets hash key. */ ++ rss_conf->conf.key = rss_act->key; ++ rss_conf->conf.key_len = rss_act->key_len; ++ ++ return 0; ++} ++ ++static int ++hns3_flow_parse_queues(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ uint16_t i; + +- if (rss->queue_num > RTE_DIM(rss_conf->queue)) ++ if (rss_act->queue_num > hw->rss_ind_tbl_size) + return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "queue number configured exceeds " +- "queue buffer size driver supported"); ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, ++ "queue number can not exceed RSS indirection table."); + +- for (n = 0; n < rss->queue_num; n++) { +- if (rss->queue[n] < hw->alloc_rss_size) +- continue; +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "queue id must be less than queue number allocated to a TC"); ++ if (rss_act->queue_num > HNS3_RSS_QUEUES_BUFFER_NUM) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, ++ "queue number configured exceeds queue buffer size driver supported"); ++ ++ for (i = 0; i < rss_act->queue_num; i++) { ++ if (rss_act->queue[i] >= hw->alloc_rss_size) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, ++ "queue id must be less than queue number allocated to a TC"); ++ } ++ ++ memcpy(rss_conf->queue, rss_act->queue, ++ rss_act->queue_num * sizeof(rss_conf->queue[0])); ++ rss_conf->conf.queue = rss_conf->queue; ++ rss_conf->conf.queue_num = rss_act->queue_num; ++ ++ return 0; ++} ++ ++static int ++hns3_flow_get_hw_pctype(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ const struct hns3_hash_map_info *map, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ uint64_t l3l4_src_dst, l3l4_refine, left_types; ++ ++ if (rss_act->types == 0) { ++ /* Disable RSS hash of this packet type if types is zero. */ ++ rss_conf->hw_pctypes |= map->hw_pctype; ++ return 0; + } + +- if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) ++ /* ++ * Can not have extra types except rss_pctype and l3l4_type in this map. ++ */ ++ left_types = ~map->rss_pctype & rss_act->types; ++ if (left_types & ~map->l3l4_types) + return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, +- act, +- "Flow types is unsupported by " +- "hns3's RSS"); +- if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX) +- return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "RSS hash func are not supported"); +- if (rss->level) ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, ++ "cannot set extra types."); ++ ++ l3l4_src_dst = left_types; ++ /* L3/L4 SRC and DST shouldn't be specified at the same time. */ ++ l3l4_refine = rte_eth_rss_hf_refine(l3l4_src_dst); ++ if (l3l4_refine != l3l4_src_dst) + return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "a nonzero RSS encapsulation level is not supported"); +- if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, ++ "cannot specify L3_SRC/DST_ONLY or L4_SRC/DST_ONLY at the same."); ++ ++ if (!hns3_valid_ipv6_sctp_rss_types(hw, rss_act->types)) + return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, +- "RSS hash key must be exactly 40 bytes"); ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, ++ "hardware doesn't support to use L4 src/dst to hash for IPV6-SCTP."); + +- if (!hns3_rss_input_tuple_supported(hw, rss)) +- return rte_flow_error_set(error, EINVAL, ++ rss_conf->hw_pctypes |= map->hw_pctype; ++ ++ return 0; ++} ++ ++static int ++hns3_flow_parse_rss_types_by_ptype(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ uint64_t pattern_type, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ const struct hns3_hash_map_info *map; ++ bool matched = false; ++ uint16_t i; ++ int ret; ++ ++ for (i = 0; i < RTE_DIM(hash_map_table); i++) { ++ map = &hash_map_table[i]; ++ if (map->pattern_type != pattern_type) { ++ /* ++ * If the target pattern type is already matched with ++ * the one before this pattern in the hash map table, ++ * no need to continue walk. ++ */ ++ if (matched) ++ break; ++ continue; ++ } ++ matched = true; ++ ++ /* ++ * If pattern type is matched and the 'types' is zero, all packet flow ++ * types related to this pattern type disable RSS hash. ++ * Otherwise, RSS types must match the pattern type and cannot have no ++ * extra or unsupported types. ++ */ ++ if (rss_act->types != 0 && !(map->rss_pctype & rss_act->types)) ++ continue; ++ ++ ret = hns3_flow_get_hw_pctype(hw, rss_act, map, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ if (rss_conf->hw_pctypes != 0) ++ return 0; ++ ++ if (matched) ++ return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, +- &rss->types, +- "input RSS types are not supported"); ++ NULL, "RSS types are unsupported"); ++ ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, "Pattern specified is unsupported"); ++} + +- act_index++; ++static uint64_t ++hns3_flow_get_all_hw_pctypes(uint64_t types) ++{ ++ uint64_t hw_pctypes = 0; ++ uint16_t i; + +- /* Check if the next not void action is END */ +- NEXT_ITEM_OF_ACTION(act, actions, act_index); +- if (act->type != RTE_FLOW_ACTION_TYPE_END) { +- memset(rss_conf, 0, sizeof(struct hns3_rss_conf)); +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, +- act, "Not supported action."); ++ for (i = 0; i < RTE_DIM(hash_map_table); i++) { ++ if (types & hash_map_table[i].rss_pctype) ++ hw_pctypes |= hash_map_table[i].hw_pctype; + } + +- return 0; ++ return hw_pctypes; ++} ++ ++static int ++hns3_flow_parse_rss_types(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *rss_act, ++ uint64_t pattern_type, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) ++{ ++ rss_conf->conf.types = rss_act->types; ++ ++ /* no pattern specified to set global RSS types. */ ++ if (pattern_type == 0) { ++ if (!hns3_check_rss_types_valid(hw, rss_act->types)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ NULL, "RSS types is invalid."); ++ rss_conf->hw_pctypes = ++ hns3_flow_get_all_hw_pctypes(rss_act->types); ++ return 0; ++ } ++ ++ return hns3_flow_parse_rss_types_by_ptype(hw, rss_act, pattern_type, ++ rss_conf, error); + } + + static int +-hns3_disable_rss(struct hns3_hw *hw) ++hns3_flow_parse_hash_global_conf(struct rte_eth_dev *dev, ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) + { ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; - /* Redirected the redirection table to queue 0 */ - ret = hns3_rss_reset_indir_table(hw); -+ ret = hns3_set_rss_tuple_by_rss_hf(hw, 0); - if (ret) +- if (ret) ++ ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error); ++ if (ret != 0) return ret; - /* Disable RSS */ - hw->rss_info.conf.types = 0; - hw->rss_dis_flag = true; -- - return 0; ++ if (rss_act->queue_num > 0) { ++ ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ if (rss_act->key_len > 0) { ++ ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } + +- return 0; ++ return hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type, ++ rss_conf, error); + } + +-static void +-hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf) ++static int ++hns3_flow_parse_pattern_type(const struct rte_flow_item pattern[], ++ uint64_t *ptype, struct rte_flow_error *error) + { +- if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) { +- hns3_warn(hw, "Default RSS hash key to be set"); +- rss_conf->key = hns3_hash_key; +- rss_conf->key_len = HNS3_RSS_KEY_SIZE; ++ enum rte_flow_item_type pre_type = RTE_FLOW_ITEM_TYPE_VOID; ++ const char *message = "Pattern specified isn't supported"; ++ uint64_t item_hdr, pattern_hdrs = 0; ++ enum rte_flow_item_type cur_type; ++ ++ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { ++ if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ++ continue; ++ if (pattern->mask || pattern->spec || pattern->last) { ++ message = "Header info shouldn't be specified"; ++ goto unsup; ++ } ++ ++ /* Check the sub-item allowed by the previous item . */ ++ if (pre_type >= RTE_DIM(hash_pattern_next_allow_items) || ++ !(hash_pattern_next_allow_items[pre_type] & ++ BIT_ULL(pattern->type))) ++ goto unsup; ++ ++ cur_type = pattern->type; ++ /* Unsupported for current type being greater than array size. */ ++ if (cur_type >= RTE_DIM(hash_pattern_item_header)) ++ goto unsup; ++ ++ /* The value is zero, which means unsupported current header. */ ++ item_hdr = hash_pattern_item_header[cur_type]; ++ if (item_hdr == 0) ++ goto unsup; ++ ++ /* Have duplicate pattern header. */ ++ if (item_hdr & pattern_hdrs) ++ goto unsup; ++ pre_type = cur_type; ++ pattern_hdrs |= item_hdr; + } +-} + +-static int +-hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, +- uint8_t *hash_algo) +-{ +- enum rte_eth_hash_function algo_func = *func; +- switch (algo_func) { +- case RTE_ETH_HASH_FUNCTION_DEFAULT: +- /* Keep *hash_algo as what it used to be */ +- algo_func = hw->rss_info.conf.func; +- break; +- case RTE_ETH_HASH_FUNCTION_TOEPLITZ: +- *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; +- break; +- case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: +- *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE; +- break; +- case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: +- *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; +- break; +- default: +- hns3_err(hw, "Invalid RSS algorithm configuration(%d)", +- algo_func); +- return -EINVAL; ++ if (pattern_hdrs != 0) { ++ *ptype = pattern_hdrs; ++ return 0; + } +- *func = algo_func; + +- return 0; ++unsup: ++ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, ++ pattern, message); } -@@ -1445,7 +1438,6 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, static int - hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) +-hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) ++hns3_flow_parse_pattern_act(struct rte_eth_dev *dev, ++ const struct rte_flow_item pattern[], ++ const struct rte_flow_action_rss *rss_act, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) { - struct hns3_rss_tuple_cfg *tuple; ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; - hns3_parse_rss_key(hw, rss_config); -@@ -1461,8 +1453,7 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) +- hns3_parse_rss_key(hw, rss_config); ++ ret = hns3_flow_parse_hash_func(rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; + +- ret = hns3_parse_rss_algorithm(hw, &rss_config->func, +- &hw->rss_info.hash_algo); +- if (ret) ++ if (rss_act->key_len > 0) { ++ ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ if (rss_act->queue_num > 0) { ++ ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error); ++ if (ret != 0) ++ return ret; ++ } ++ ++ ret = hns3_flow_parse_pattern_type(pattern, &rss_conf->pattern_type, ++ error); ++ if (ret != 0) + return ret; + +- ret = hns3_rss_set_algo_key(hw, rss_config->key); +- if (ret) ++ ret = hns3_flow_parse_rss_types(hw, rss_act, rss_conf->pattern_type, ++ rss_conf, error); ++ if (ret != 0) + return ret; - hw->rss_info.conf.func = rss_config->func; +- hw->rss_info.conf.func = rss_config->func; ++ if (rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT || ++ rss_act->key_len > 0 || rss_act->queue_num > 0) ++ hns3_warn(hw, "hash func, key and queues are global config, which work for all flow types. " ++ "Recommend: don't set them together with pattern."); - tuple = &hw->rss_info.rss_tuple_sets; - ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types); -+ ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_config->types); +- if (ret) +- hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret); ++ return 0; ++} + +- return ret; ++static bool ++hns3_rss_action_is_dup(struct hns3_hw *hw, ++ const struct hns3_flow_rss_conf *conf) ++{ ++ struct hns3_rss_conf_ele *filter; ++ ++ TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) { ++ if (conf->pattern_type != filter->filter_info.pattern_type) ++ continue; ++ ++ if (hns3_action_rss_same(&filter->filter_info.conf, &conf->conf)) ++ return true; ++ } ++ ++ return false; + } + ++/* ++ * This function is used to parse rss action validation. ++ */ + static int +-hns3_update_indir_table(struct rte_eth_dev *dev, +- const struct rte_flow_action_rss *conf, uint16_t num) ++hns3_parse_rss_filter(struct rte_eth_dev *dev, ++ const struct rte_flow_item pattern[], ++ const struct rte_flow_action *actions, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow_error *error) + { + struct hns3_adapter *hns = dev->data->dev_private; ++ const struct rte_flow_action_rss *rss_act; ++ const struct rte_flow_action *act; ++ const struct rte_flow_item *pat; + struct hns3_hw *hw = &hns->hw; ++ uint32_t index = 0; ++ int ret; ++ ++ NEXT_ITEM_OF_ACTION(act, actions, index); ++ if (actions[1].type != RTE_FLOW_ACTION_TYPE_END) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, ++ &actions[1], ++ "Only support one action for RSS."); ++ ++ rss_act = (const struct rte_flow_action_rss *)act->conf; ++ if (rss_act == NULL) { ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ act, "lost RSS action configuration"); ++ } ++ ++ if (rss_act->level != 0) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ act, ++ "RSS level is not supported"); ++ ++ index = 0; ++ NEXT_ITEM_OF_PATTERN(pat, pattern, index); ++ if (pat[0].type == RTE_FLOW_ITEM_TYPE_END) { ++ rss_conf->pattern_type = 0; ++ ret = hns3_flow_parse_hash_global_conf(dev, rss_act, ++ rss_conf, error); ++ } else { ++ ret = hns3_flow_parse_pattern_act(dev, pat, rss_act, ++ rss_conf, error); ++ } ++ if (ret != 0) ++ return ret; ++ ++ if (hns3_rss_action_is_dup(hw, rss_conf)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION_CONF, ++ act, "duplicate RSS rule"); ++ ++ return 0; ++} ++ ++static int ++hns3_update_indir_table(struct hns3_hw *hw, ++ const struct rte_flow_action_rss *conf, uint16_t num) ++{ + uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; + uint16_t j; + uint32_t i; + + /* Fill in redirection table */ +- memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, +- sizeof(hw->rss_info.rss_indirection_tbl)); + for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) { + j %= num; + if (conf->queue[j] >= hw->alloc_rss_size) { +@@ -1496,101 +1840,106 @@ hns3_update_indir_table(struct rte_eth_dev *dev, + return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size); + } + ++static uint64_t ++hns3_flow_get_pctype_tuple_mask(uint64_t hw_pctype) ++{ ++ uint64_t tuple_mask = 0; ++ uint16_t i; ++ ++ for (i = 0; i < RTE_DIM(hash_map_table); i++) { ++ if (hw_pctype == hash_map_table[i].hw_pctype) { ++ tuple_mask = hash_map_table[i].tuple_mask; ++ break; ++ } ++ } ++ ++ return tuple_mask; ++} ++ + static int +-hns3_config_rss_filter(struct rte_eth_dev *dev, +- const struct hns3_rss_conf *conf, bool add) ++hns3_flow_set_rss_ptype_tuple(struct hns3_hw *hw, ++ struct hns3_flow_rss_conf *rss_conf) + { +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_rss_conf_ele *rss_filter_ptr; +- struct hns3_hw *hw = &hns->hw; +- struct hns3_rss_conf *rss_info; +- uint64_t flow_types; +- uint16_t num; ++ uint64_t old_tuple_fields, new_tuple_fields; ++ uint64_t hw_pctypes, tuples, tuple_mask = 0; ++ bool cfg_global_tuple; + int ret; + +- struct rte_flow_action_rss rss_flow_conf = { +- .func = conf->conf.func, +- .level = conf->conf.level, +- .types = conf->conf.types, +- .key_len = conf->conf.key_len, +- .queue_num = conf->conf.queue_num, +- .key = conf->conf.key_len ? +- (void *)(uintptr_t)conf->conf.key : NULL, +- .queue = conf->conf.queue, +- }; +- +- /* Filter the unsupported flow types */ +- flow_types = conf->conf.types ? +- rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT : +- hw->rss_info.conf.types; +- if (flow_types != rss_flow_conf.types) +- hns3_warn(hw, "modified RSS types based on hardware support, " +- "requested:0x%" PRIx64 " configured:0x%" PRIx64, +- rss_flow_conf.types, flow_types); +- /* Update the useful flow types */ +- rss_flow_conf.types = flow_types; +- +- rss_info = &hw->rss_info; +- if (!add) { +- if (!conf->valid) +- return 0; +- +- ret = hns3_disable_rss(hw); +- if (ret) { +- hns3_err(hw, "RSS disable failed(%d)", ret); ++ cfg_global_tuple = (rss_conf->pattern_type == 0); ++ if (!cfg_global_tuple) { ++ /* ++ * To ensure that different packets do not affect each other, ++ * we have to first read all tuple fields, and then only modify ++ * the tuples for the specified packet type. ++ */ ++ ret = hns3_get_rss_tuple_field(hw, &old_tuple_fields); ++ if (ret != 0) + return ret; +- } + +- if (rss_flow_conf.queue_num) { +- /* +- * Due the content of queue pointer have been reset to +- * 0, the rss_info->conf.queue should be set to NULL +- */ +- rss_info->conf.queue = NULL; +- rss_info->conf.queue_num = 0; ++ new_tuple_fields = old_tuple_fields; ++ hw_pctypes = rss_conf->hw_pctypes; ++ while (hw_pctypes > 0) { ++ uint32_t idx = rte_bsf64(hw_pctypes); ++ uint64_t pctype = BIT_ULL(idx); ++ ++ tuple_mask = hns3_flow_get_pctype_tuple_mask(pctype); ++ tuples = hns3_rss_calc_tuple_filed(rss_conf->conf.types); ++ new_tuple_fields &= ~tuple_mask; ++ new_tuple_fields |= tuples; ++ hw_pctypes &= ~pctype; + } +- +- /* set RSS func invalid after flushed */ +- rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX; +- return 0; ++ } else { ++ new_tuple_fields = ++ hns3_rss_calc_tuple_filed(rss_conf->conf.types); + } + +- /* Set rx queues to use */ +- num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num); +- if (rss_flow_conf.queue_num > num) +- hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated", +- rss_flow_conf.queue_num); +- hns3_info(hw, "Max of contiguous %u PF queues are configured", num); ++ ret = hns3_set_rss_tuple_field(hw, new_tuple_fields); ++ if (ret != 0) ++ return ret; + +- rte_spinlock_lock(&hw->lock); +- if (num) { +- ret = hns3_update_indir_table(dev, &rss_flow_conf, num); +- if (ret) +- goto rss_config_err; +- } ++ if (!cfg_global_tuple) ++ hns3_info(hw, "RSS tuple fields changed from 0x%" PRIx64 " to 0x%" PRIx64, ++ old_tuple_fields, new_tuple_fields); + +- /* Set hash algorithm and flow types by the user's config */ +- ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf); +- if (ret) +- goto rss_config_err; ++ return 0; ++} + +- ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf); +- if (ret) { +- hns3_err(hw, "RSS config init fail(%d)", ret); +- goto rss_config_err; ++static int ++hns3_config_rss_filter(struct hns3_hw *hw, ++ struct hns3_flow_rss_conf *rss_conf) ++{ ++ struct rte_flow_action_rss *rss_act; ++ int ret; ++ ++ rss_act = &rss_conf->conf; ++ if (rss_act->queue_num > 0) { ++ ret = hns3_update_indir_table(hw, rss_act, rss_act->queue_num); ++ if (ret) { ++ hns3_err(hw, "set queues action failed, ret = %d", ret); ++ return ret; ++ } + } + +- /* +- * When create a new RSS rule, the old rule will be overlaid and set +- * invalid. +- */ +- TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries) +- rss_filter_ptr->filter_info.valid = false; ++ if (rss_act->key_len > 0 || ++ rss_act->func != RTE_ETH_HASH_FUNCTION_DEFAULT) { ++ ret = hns3_update_rss_algo_key(hw, rss_act->func, rss_conf->key, ++ rss_act->key_len); ++ if (ret != 0) { ++ hns3_err(hw, "set func or hash key action failed, ret = %d", ++ ret); ++ return ret; ++ } ++ } + +-rss_config_err: +- rte_spinlock_unlock(&hw->lock); ++ if (rss_conf->hw_pctypes > 0) { ++ ret = hns3_flow_set_rss_ptype_tuple(hw, rss_conf); ++ if (ret != 0) { ++ hns3_err(hw, "set types action failed, ret = %d", ret); ++ return ret; ++ } ++ } + +- return ret; ++ return 0; + } + + static int +@@ -1599,61 +1948,60 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev) + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_hw *hw = &hns->hw; +- int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */ +- int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */ +- int ret = 0; + + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); + while (rss_filter_ptr) { + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); +- ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, +- false); +- if (ret) +- rss_rule_fail_cnt++; +- else +- rss_rule_succ_cnt++; + rte_free(rss_filter_ptr); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); + } + +- if (rss_rule_fail_cnt) { +- hns3_err(hw, "fail to delete all RSS filters, success num = %d " +- "fail num = %d", rss_rule_succ_cnt, +- rss_rule_fail_cnt); +- ret = -EIO; ++ return hns3_config_rss(hns); ++} ++ ++static int ++hns3_reconfig_all_rss_filter(struct hns3_hw *hw) ++{ ++ struct hns3_rss_conf_ele *filter; ++ uint32_t rule_no = 0; ++ int ret; ++ ++ TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) { ++ ret = hns3_config_rss_filter(hw, &filter->filter_info); ++ if (ret != 0) { ++ hns3_err(hw, "config %uth RSS filter failed, ret = %d", ++ rule_no, ret); ++ return ret; ++ } ++ rule_no++; + } + +- return ret; ++ return 0; + } + +-int +-hns3_restore_rss_filter(struct rte_eth_dev *dev) ++static int ++hns3_restore_rss_filter(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; ++ int ret; + +- /* When user flush all rules, it doesn't need to restore RSS rule */ +- if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX) +- return 0; ++ pthread_mutex_lock(&hw->flows_lock); ++ ret = hns3_reconfig_all_rss_filter(hw); ++ pthread_mutex_unlock(&hw->flows_lock); + +- return hns3_config_rss_filter(dev, &hw->rss_info, true); ++ return ret; + } + +-static int +-hns3_flow_parse_rss(struct rte_eth_dev *dev, +- const struct hns3_rss_conf *conf, bool add) ++int ++hns3_restore_filter(struct hns3_adapter *hns) + { +- struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; +- bool ret; ++ int ret; + +- ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf); +- if (ret) { +- hns3_err(hw, "Enter duplicate RSS configuration : %d", ret); +- return -EINVAL; +- } ++ ret = hns3_restore_all_fdir_filter(hns); ++ if (ret != 0) ++ return ret; + +- return hns3_config_rss_filter(dev, conf, add); ++ return hns3_restore_rss_filter(hw); + } + + static int +@@ -1682,27 +2030,153 @@ hns3_flow_args_check(const struct rte_flow_attr *attr, + + /* + * Check if the flow rule is supported by hns3. +- * It only checkes the format. Don't guarantee the rule can be programmed into ++ * It only checks the format. Don't guarantee the rule can be programmed into + * the HW. Because there can be no enough room for the rule. + */ + static int + hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], +- struct rte_flow_error *error) ++ struct rte_flow_error *error, ++ struct hns3_filter_info *filter_info) + { +- struct hns3_fdir_rule fdir_rule; ++ union hns3_filter_conf *conf; + int ret; + + ret = hns3_flow_args_check(attr, pattern, actions, error); if (ret) - hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret); + return ret; + +- if (hns3_find_rss_general_action(pattern, actions)) +- return hns3_parse_rss_filter(dev, actions, error); ++ hns3_parse_filter_type(pattern, actions, filter_info); ++ conf = &filter_info->conf; ++ if (filter_info->type == RTE_ETH_FILTER_HASH) ++ return hns3_parse_rss_filter(dev, pattern, actions, ++ &conf->rss_conf, error); ++ ++ return hns3_parse_fdir_filter(dev, pattern, actions, ++ &conf->fdir_conf, error); ++} ++ ++static int ++hns3_flow_rebuild_all_rss_filter(struct hns3_adapter *hns) ++{ ++ struct hns3_hw *hw = &hns->hw; ++ int ret; ++ ++ ret = hns3_config_rss(hns); ++ if (ret != 0) { ++ hns3_err(hw, "restore original RSS configuration failed, ret = %d.", ++ ret); ++ return ret; ++ } ++ ret = hns3_reconfig_all_rss_filter(hw); ++ if (ret != 0) ++ hns3_err(hw, "rebuild all RSS filter failed, ret = %d.", ret); ++ ++ return ret; ++} ++ ++static int ++hns3_flow_create_rss_rule(struct rte_eth_dev *dev, ++ struct hns3_flow_rss_conf *rss_conf, ++ struct rte_flow *flow) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ struct hns3_rss_conf_ele *rss_filter_ptr; ++ struct hns3_flow_rss_conf *new_conf; ++ struct rte_flow_action_rss *rss_act; ++ int ret; ++ ++ rss_filter_ptr = rte_zmalloc("hns3 rss filter", ++ sizeof(struct hns3_rss_conf_ele), 0); ++ if (rss_filter_ptr == NULL) { ++ hns3_err(hw, "failed to allocate hns3_rss_filter memory"); ++ return -ENOMEM; ++ } ++ ++ new_conf = &rss_filter_ptr->filter_info; ++ memcpy(new_conf, rss_conf, sizeof(*new_conf)); ++ rss_act = &new_conf->conf; ++ if (rss_act->queue_num > 0) ++ new_conf->conf.queue = new_conf->queue; ++ /* ++ * There are two ways to deliver hash key action: ++ * 1> 'key_len' is greater than zero and 'key' isn't NULL. ++ * 2> 'key_len' is greater than zero, but 'key' is NULL. ++ * For case 2, we need to keep 'key' of the new_conf is NULL so as to ++ * inherit the configuration from user in case of failing to verify ++ * duplicate rule later. ++ */ ++ if (rss_act->key_len > 0 && rss_act->key != NULL) ++ new_conf->conf.key = new_conf->key; ++ ++ ret = hns3_config_rss_filter(hw, new_conf); ++ if (ret != 0) { ++ rte_free(rss_filter_ptr); ++ (void)hns3_flow_rebuild_all_rss_filter(hns); ++ return ret; ++ } ++ ++ TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); ++ flow->rule = rss_filter_ptr; ++ flow->filter_type = RTE_ETH_FILTER_HASH; ++ ++ return 0; ++} ++ ++static int ++hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, ++ struct hns3_fdir_rule *fdir_rule, ++ struct rte_flow_error *error, ++ struct rte_flow *flow) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ struct hns3_fdir_rule_ele *fdir_rule_ptr; ++ int ret; ++ ++ if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) { ++ ret = hns3_counter_new(dev, 0, ++ fdir_rule->act_cnt.id, error); ++ if (ret != 0) ++ return ret; ++ ++ flow->counter_id = fdir_rule->act_cnt.id; ++ } ++ ++ fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", ++ sizeof(struct hns3_fdir_rule_ele), 0); ++ if (fdir_rule_ptr == NULL) { ++ hns3_err(hw, "failed to allocate fdir_rule memory."); ++ ret = -ENOMEM; ++ goto err_malloc; ++ } ++ ++ /* ++ * After all the preceding tasks are successfully configured, configure ++ * rules to the hardware to simplify the rollback of rules in the ++ * hardware. ++ */ ++ ret = hns3_fdir_filter_program(hns, fdir_rule, false); ++ if (ret != 0) ++ goto err_fdir_filter; ++ ++ memcpy(&fdir_rule_ptr->fdir_conf, fdir_rule, ++ sizeof(struct hns3_fdir_rule)); ++ TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); ++ flow->rule = fdir_rule_ptr; ++ flow->filter_type = RTE_ETH_FILTER_FDIR; ++ ++ return 0; ++ ++err_fdir_filter: ++ rte_free(fdir_rule_ptr); ++err_malloc: ++ if (fdir_rule->flags & HNS3_RULE_FLAG_COUNTER) ++ hns3_counter_release(dev, fdir_rule->act_cnt.id); + +- memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); +- return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); ++ return ret; + } + + /* +@@ -1718,17 +2192,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + struct rte_flow_error *error) + { + struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; +- const struct hns3_rss_conf *rss_conf; +- struct hns3_fdir_rule_ele *fdir_rule_ptr; +- struct hns3_rss_conf_ele *rss_filter_ptr; ++ struct hns3_filter_info filter_info = {0}; + struct hns3_flow_mem *flow_node; +- const struct rte_flow_action *act; ++ struct hns3_hw *hw = &hns->hw; ++ union hns3_filter_conf *conf; + struct rte_flow *flow; +- struct hns3_fdir_rule fdir_rule; + int ret; + +- ret = hns3_flow_validate(dev, attr, pattern, actions, error); ++ ret = hns3_flow_validate(dev, attr, pattern, actions, error, ++ &filter_info); + if (ret) + return NULL; + +@@ -1748,79 +2220,22 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + } + + flow_node->flow = flow; ++ conf = &filter_info.conf; + TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries); +- +- act = hns3_find_rss_general_action(pattern, actions); +- if (act) { +- rss_conf = act->conf; +- +- ret = hns3_flow_parse_rss(dev, rss_conf, true); +- if (ret) +- goto err; +- +- rss_filter_ptr = rte_zmalloc("hns3 rss filter", +- sizeof(struct hns3_rss_conf_ele), +- 0); +- if (rss_filter_ptr == NULL) { +- hns3_err(hw, +- "Failed to allocate hns3_rss_filter memory"); +- ret = -ENOMEM; +- goto err; +- } +- hns3_rss_conf_copy(&rss_filter_ptr->filter_info, +- &rss_conf->conf); +- rss_filter_ptr->filter_info.valid = true; +- TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); +- +- flow->rule = rss_filter_ptr; +- flow->filter_type = RTE_ETH_FILTER_HASH; +- return flow; +- } +- +- memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); +- ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); +- if (ret) +- goto out; +- +- if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) { +- ret = hns3_counter_new(dev, 0, fdir_rule.act_cnt.id, error); +- if (ret) +- goto out; +- +- flow->counter_id = fdir_rule.act_cnt.id; +- } +- +- fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", +- sizeof(struct hns3_fdir_rule_ele), +- 0); +- if (fdir_rule_ptr == NULL) { +- hns3_err(hw, "failed to allocate fdir_rule memory."); +- ret = -ENOMEM; +- goto err_fdir; +- } +- +- ret = hns3_fdir_filter_program(hns, &fdir_rule, false); +- if (!ret) { +- memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, +- sizeof(struct hns3_fdir_rule)); +- TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); +- flow->rule = fdir_rule_ptr; +- flow->filter_type = RTE_ETH_FILTER_FDIR; +- ++ if (filter_info.type == RTE_ETH_FILTER_HASH) ++ ret = hns3_flow_create_rss_rule(dev, &conf->rss_conf, flow); ++ else ++ ret = hns3_flow_create_fdir_rule(dev, &conf->fdir_conf, ++ error, flow); ++ if (ret == 0) + return flow; +- } + +- rte_free(fdir_rule_ptr); +-err_fdir: +- if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) +- hns3_counter_release(dev, fdir_rule.act_cnt.id); +-err: + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow"); +-out: + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); + rte_free(flow_node); + rte_free(flow); ++ + return NULL; + } + +@@ -1864,16 +2279,10 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, + break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; +- ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, +- false); +- if (ret) +- return rte_flow_error_set(error, EIO, +- RTE_FLOW_ERROR_TYPE_HANDLE, +- flow, +- "Destroy RSS fail.Try again"); + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); + rte_free(rss_filter_ptr); + rss_filter_ptr = NULL; ++ (void)hns3_flow_rebuild_all_rss_filter(hns); + break; + default: + return rte_flow_error_set(error, EINVAL, +@@ -1980,10 +2389,12 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev, + struct rte_flow_error *error) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct hns3_filter_info filter_info = {0}; + int ret; + + pthread_mutex_lock(&hw->flows_lock); +- ret = hns3_flow_validate(dev, attr, pattern, actions, error); ++ ret = hns3_flow_validate(dev, attr, pattern, actions, error, ++ &filter_info); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +diff --git a/dpdk/drivers/net/hns3/hns3_flow.h b/dpdk/drivers/net/hns3/hns3_flow.h +index 2eb451b720..a700de73c7 100644 +--- a/dpdk/drivers/net/hns3/hns3_flow.h ++++ b/dpdk/drivers/net/hns3/hns3_flow.h +@@ -7,6 +7,8 @@ + + #include <rte_flow.h> + ++#include "hns3_fdir.h" ++ + struct hns3_flow_counter { + LIST_ENTRY(hns3_flow_counter) next; /* Pointer to the next counter. */ + uint32_t shared:1; /* Share counter ID with other flow rules. */ +@@ -21,10 +23,18 @@ struct rte_flow { + uint32_t counter_id; + }; + ++struct hns3_flow_rss_conf { ++ struct rte_flow_action_rss conf; ++ uint8_t key[HNS3_RSS_KEY_SIZE_MAX]; /* Hash key */ ++ uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ ++ uint64_t pattern_type; ++ uint64_t hw_pctypes; /* packet types in driver */ ++}; ++ + /* rss filter list structure */ + struct hns3_rss_conf_ele { + TAILQ_ENTRY(hns3_rss_conf_ele) entries; +- struct hns3_rss_conf filter_info; ++ struct hns3_flow_rss_conf filter_info; + }; -@@ -1682,7 +1673,7 @@ hns3_flow_args_check(const struct rte_flow_attr *attr, + /* hns3_flow memory list structure */ +@@ -33,6 +43,17 @@ struct hns3_flow_mem { + struct rte_flow *flow; + }; - /* - * Check if the flow rule is supported by hns3. -- * It only checkes the format. Don't guarantee the rule can be programmed into -+ * It only checks the format. Don't guarantee the rule can be programmed into - * the HW. Because there can be no enough room for the rule. - */ - static int ++ ++union hns3_filter_conf { ++ struct hns3_fdir_rule fdir_conf; ++ struct hns3_flow_rss_conf rss_conf; ++}; ++ ++struct hns3_filter_info { ++ enum rte_filter_type type; ++ union hns3_filter_conf conf; ++}; ++ + TAILQ_HEAD(hns3_rss_filter_list, hns3_rss_conf_ele); + TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); + +@@ -40,5 +61,6 @@ int hns3_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); + void hns3_flow_init(struct rte_eth_dev *dev); + void hns3_flow_uninit(struct rte_eth_dev *dev); ++int hns3_restore_filter(struct hns3_adapter *hns); + + #endif /* _HNS3_FLOW_H_ */ +diff --git a/dpdk/drivers/net/hns3/hns3_intr.c b/dpdk/drivers/net/hns3/hns3_intr.c +index 66dc509086..b049774e9a 100644 +--- a/dpdk/drivers/net/hns3/hns3_intr.c ++++ b/dpdk/drivers/net/hns3/hns3_intr.c +@@ -2419,8 +2419,8 @@ hns3_schedule_reset(struct hns3_adapter *hns) + if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == + SCHEDULE_DEFERRED) + rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); +- else +- __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, ++ ++ __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, + __ATOMIC_RELAXED); + + rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); +@@ -2730,6 +2730,7 @@ hns3_reset_post(struct hns3_adapter *hns) + /* IMP will wait ready flag before reset */ + hns3_notify_reset_ready(hw, false); + hns3_clear_reset_level(hw, &hw->reset.pending); ++ hns3_clear_reset_status(hw); + __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); + hw->reset.attempts = 0; + hw->reset.stats.success_cnt++; +@@ -2844,6 +2845,7 @@ hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level) + return ret; + err: + hns3_clear_reset_level(hw, &hw->reset.pending); ++ hns3_clear_reset_event(hw); + if (hns3_reset_err_handle(hns)) { + hw->reset.stage = RESET_STAGE_PREWAIT; + hns3_schedule_reset(hns); diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c -index b3563d4694..02028dcd9c 100644 +index b3563d4694..f1743c195e 100644 --- a/dpdk/drivers/net/hns3/hns3_mbx.c +++ b/dpdk/drivers/net/hns3/hns3_mbx.c -@@ -78,14 +78,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +@@ -40,23 +40,6 @@ hns3_resp_to_errno(uint16_t resp_code) + return -EIO; + } + +-static void +-hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode) +-{ +- if (hw->mbx_resp.matching_scheme == +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) { +- hw->mbx_resp.lost++; +- hns3_err(hw, +- "VF could not get mbx(%u,%u) head(%u) tail(%u) " +- "lost(%u) from PF", +- code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail, +- hw->mbx_resp.lost); +- return; +- } +- +- hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode); +-} +- + static int + hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + uint8_t *resp_data, uint16_t resp_len) +@@ -67,7 +50,6 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_mbx_resp_status *mbx_resp; + uint32_t wait_time = 0; +- bool received; + + if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) { + hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)", +@@ -78,14 +60,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS; while (wait_time < mbx_time_limit) { if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { @@ -24344,8 +46574,146 @@ index b3563d4694..02028dcd9c 100644 "reset pending"); return -EIO; } +@@ -93,20 +75,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + hns3_dev_handle_mbx_msg(hw); + rte_delay_us(HNS3_WAIT_RESP_US); + +- if (hw->mbx_resp.matching_scheme == +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) +- received = (hw->mbx_resp.head == +- hw->mbx_resp.tail + hw->mbx_resp.lost); +- else +- received = hw->mbx_resp.received_match_resp; +- if (received) ++ if (hw->mbx_resp.received_match_resp) + break; + + wait_time += HNS3_WAIT_RESP_US; + } + hw->mbx_resp.req_msg_data = 0; + if (wait_time >= mbx_time_limit) { +- hns3_mbx_proc_timeout(hw, code, subcode); ++ hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode); + return -ETIME; + } + rte_io_rmb(); +@@ -132,7 +108,6 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) + * we get the exact scheme which is used. + */ + hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode; +- hw->mbx_resp.head++; + + /* Update match_id and ensure the value of match_id is not zero */ + hw->mbx_resp.match_id++; +@@ -185,7 +160,6 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + req->match_id = hw->mbx_resp.match_id; + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { +- hw->mbx_resp.head--; + rte_spinlock_unlock(&hw->mbx_resp.lock); + hns3_err(hw, "VF failed(=%d) to send mbx message to PF", + ret); +@@ -254,41 +228,10 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); + } + +-/* +- * Case1: receive response after timeout, req_msg_data +- * is 0, not equal resp_msg, do lost-- +- * Case2: receive last response during new send_mbx_msg, +- * req_msg_data is different with resp_msg, let +- * lost--, continue to wait for response. +- */ +-static void +-hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg) +-{ +- struct hns3_mbx_resp_status *resp = &hw->mbx_resp; +- uint32_t tail = resp->tail + 1; +- +- if (tail > resp->head) +- tail = resp->head; +- if (resp->req_msg_data != resp_msg) { +- if (resp->lost) +- resp->lost--; +- hns3_warn(hw, "Received a mismatched response req_msg(%x) " +- "resp_msg(%x) head(%u) tail(%u) lost(%u)", +- resp->req_msg_data, resp_msg, resp->head, tail, +- resp->lost); +- } else if (tail + resp->lost > resp->head) { +- resp->lost--; +- hns3_warn(hw, "Received a new response again resp_msg(%x) " +- "head(%u) tail(%u) lost(%u)", resp_msg, +- resp->head, tail, resp->lost); +- } +- rte_io_wmb(); +- resp->tail = tail; +-} +- + static void + hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + { ++#define HNS3_MBX_RESP_CODE_OFFSET 16 + struct hns3_mbx_resp_status *resp = &hw->mbx_resp; + uint32_t msg_data; + +@@ -298,12 +241,6 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + * match_id to its response. So VF could use the match_id + * to match the request. + */ +- if (resp->matching_scheme != +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) { +- resp->matching_scheme = +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID; +- hns3_info(hw, "detect mailbox support match id!"); +- } + if (req->match_id == resp->match_id) { + resp->resp_status = hns3_resp_to_errno(req->msg[3]); + memcpy(resp->additional_info, &req->msg[4], +@@ -319,11 +256,19 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + * support copy request's match_id to its response. So VF follows the + * original scheme to process. + */ ++ msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; ++ if (resp->req_msg_data != msg_data) { ++ hns3_warn(hw, ++ "received response tag (%u) is mismatched with requested tag (%u)", ++ msg_data, resp->req_msg_data); ++ return; ++ } ++ + resp->resp_status = hns3_resp_to_errno(req->msg[3]); + memcpy(resp->additional_info, &req->msg[4], + HNS3_MBX_MAX_RESP_DATA_SIZE); +- msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2]; +- hns3_update_resp_position(hw, msg_data); ++ rte_io_wmb(); ++ resp->received_match_resp = true; + } + + static void +@@ -429,15 +374,17 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) + * Clear opcode to inform intr thread don't process + * again. + */ +- crq->desc[crq->next_to_use].opcode = 0; ++ crq->desc[next_to_use].opcode = 0; + } + + scan_next: + next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num; + } + +- crq->next_to_use = next_to_use; +- hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); ++ /* ++ * Note: the crq->next_to_use field should not updated, otherwise, ++ * mailbox messages may be discarded. ++ */ + } + + void diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h -index d637bd2b23..0172a2e288 100644 +index d637bd2b23..1d9a788df5 100644 --- a/dpdk/drivers/net/hns3/hns3_mbx.h +++ b/dpdk/drivers/net/hns3/hns3_mbx.h @@ -22,7 +22,7 @@ enum HNS3_MBX_OPCODE { @@ -24357,8 +46725,30 @@ index d637bd2b23..0172a2e288 100644 HNS3_MBX_GET_BDNUM, /* (VF -> PF) get BD num */ HNS3_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */ HNS3_MBX_GET_STREAMID, /* (VF -> PF) get stream id */ +@@ -89,21 +89,11 @@ enum hns3_mbx_link_fail_subcode { + #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 + #define HNS3_MBX_DEF_TIME_LIMIT_MS 500 + +-enum { +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL = 0, +- HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID +-}; +- + struct hns3_mbx_resp_status { + rte_spinlock_t lock; /* protects against contending sync cmd resp */ + +- uint8_t matching_scheme; +- + /* The following fields used in the matching scheme for original */ + uint32_t req_msg_data; +- uint32_t head; +- uint32_t tail; +- uint32_t lost; + + /* The following fields used in the matching scheme for match_id */ + uint16_t match_id; diff --git a/dpdk/drivers/net/hns3/hns3_mp.c b/dpdk/drivers/net/hns3/hns3_mp.c -index 999b407f7d..e74ddea195 100644 +index 999b407f7d..c3005b943f 100644 --- a/dpdk/drivers/net/hns3/hns3_mp.c +++ b/dpdk/drivers/net/hns3/hns3_mp.c @@ -74,7 +74,6 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) @@ -24369,7 +46759,19 @@ index 999b407f7d..e74ddea195 100644 struct rte_eth_dev *dev; int ret; -@@ -98,14 +97,12 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) +@@ -88,24 +87,22 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + case HNS3_MP_REQ_START_RXTX: + PMD_INIT_LOG(INFO, "port %u starting datapath", + dev->data->port_id); +- hns3_set_rxtx_function(dev); ++ hns3_start_rxtx_datapath(dev); + break; + case HNS3_MP_REQ_STOP_RXTX: + PMD_INIT_LOG(INFO, "port %u stopping datapath", + dev->data->port_id); +- hns3_set_rxtx_function(dev); ++ hns3_stop_rxtx_datapath(dev); + break; case HNS3_MP_REQ_START_TX: PMD_INIT_LOG(INFO, "port %u starting Tx datapath", dev->data->port_id); @@ -24387,10 +46789,54 @@ index 999b407f7d..e74ddea195 100644 default: rte_errno = EINVAL; diff --git a/dpdk/drivers/net/hns3/hns3_ptp.c b/dpdk/drivers/net/hns3/hns3_ptp.c -index 9a829d7011..0b0061bba5 100644 +index 9a829d7011..894ac6dd71 100644 --- a/dpdk/drivers/net/hns3/hns3_ptp.c +++ b/dpdk/drivers/net/hns3/hns3_ptp.c -@@ -81,7 +81,7 @@ hns3_timesync_configure(struct hns3_adapter *hns, bool en) +@@ -7,7 +7,7 @@ + #include <rte_time.h> + + #include "hns3_ethdev.h" +-#include "hns3_regs.h" ++#include "hns3_ptp.h" + #include "hns3_logs.h" + + uint64_t hns3_timestamp_rx_dynflag; +@@ -56,9 +56,23 @@ hns3_ptp_int_en(struct hns3_hw *hw, bool en) + return ret; + } + ++static void ++hns3_ptp_timesync_write_time(struct hns3_hw *hw, const struct timespec *ts) ++{ ++ uint64_t sec = ts->tv_sec; ++ uint64_t ns = ts->tv_nsec; ++ ++ /* Set the timecounters to a new value. */ ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_H, upper_32_bits(sec)); ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_M, lower_32_bits(sec)); ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_L, lower_32_bits(ns)); ++ hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_RDY, 1); ++} ++ + int + hns3_ptp_init(struct hns3_hw *hw) + { ++ struct timespec sys_time; + int ret; + + if (!hns3_dev_get_support(hw, PTP)) +@@ -71,6 +85,10 @@ hns3_ptp_init(struct hns3_hw *hw) + /* Start PTP timer */ + hns3_write_dev(hw, HNS3_CFG_TIME_CYC_EN, 1); + ++ /* Initializing the RTC. */ ++ clock_gettime(CLOCK_REALTIME, &sys_time); ++ hns3_ptp_timesync_write_time(hw, &sys_time); ++ + return 0; + } + +@@ -81,7 +99,7 @@ hns3_timesync_configure(struct hns3_adapter *hns, bool en) struct hns3_hw *hw = &hns->hw; struct hns3_pf *pf = &hns->pf; struct hns3_cmd_desc desc; @@ -24399,16 +46845,179 @@ index 9a829d7011..0b0061bba5 100644 int ret; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_PTP_MODE, false); -@@ -125,6 +125,7 @@ hns3_timesync_enable(struct rte_eth_dev *dev) +@@ -216,17 +234,21 @@ hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + int + hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) + { ++#define HNS3_PTP_SEC_H_OFFSET 32 ++#define HNS3_PTP_SEC_H_MASK 0xFFFF ++ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ uint32_t sec_hi, sec_lo; + uint64_t ns, sec; - if (pf->ptp_enable) - return 0; -+ hns3_warn(hw, "note: please ensure Rx/Tx burst mode is simple or common when enabling PTP!"); + if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; - rte_spinlock_lock(&hw->lock); - ret = hns3_timesync_configure(hns, true); +- sec = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L); +- sec |= (uint64_t)(hns3_read_dev(hw, HNS3_CURR_TIME_OUT_H) & 0xFFFF) +- << 32; +- + ns = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_NS); ++ sec_hi = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_H) & HNS3_PTP_SEC_H_MASK; ++ sec_lo = hns3_read_dev(hw, HNS3_CURR_TIME_OUT_L); ++ sec = ((uint64_t)sec_hi << HNS3_PTP_SEC_H_OFFSET) | sec_lo; ++ + ns += sec * NSEC_PER_SEC; + *ts = rte_ns_to_timespec(ns); + +@@ -237,17 +259,11 @@ int + hns3_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) + { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- uint64_t sec = ts->tv_sec; +- uint64_t ns = ts->tv_nsec; + + if (!hns3_dev_get_support(hw, PTP)) + return -ENOTSUP; + +- /* Set the timecounters to a new value. */ +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_H, upper_32_bits(sec)); +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_M, lower_32_bits(sec)); +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_L, lower_32_bits(ns)); +- hns3_write_dev(hw, HNS3_CFG_TIME_SYNC_RDY, 1); ++ hns3_ptp_timesync_write_time(hw, ts); + + return 0; + } +@@ -290,3 +306,21 @@ hns3_restore_ptp(struct hns3_adapter *hns) + + return ret; + } ++ ++void ++hns3_ptp_uninit(struct hns3_hw *hw) ++{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); ++ int ret; ++ ++ if (!hns3_dev_get_support(hw, PTP)) ++ return; ++ ++ ret = hns3_ptp_int_en(hw, false); ++ if (ret != 0) ++ hns3_err(hw, "disable PTP interrupt failed, ret = %d.", ret); ++ ++ ret = hns3_timesync_configure(hns, false); ++ if (ret != 0) ++ hns3_err(hw, "disable timesync failed, ret = %d.", ret); ++} +diff --git a/dpdk/drivers/net/hns3/hns3_ptp.h b/dpdk/drivers/net/hns3/hns3_ptp.h +new file mode 100644 +index 0000000000..2b8717fa3c +--- /dev/null ++++ b/dpdk/drivers/net/hns3/hns3_ptp.h +@@ -0,0 +1,48 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright(c) 2023 HiSilicon Limited. ++ */ ++ ++#ifndef HNS3_PTP_H ++#define HNS3_PTP_H ++ ++/* Register bit for 1588 event */ ++#define HNS3_VECTOR0_1588_INT_B 0 ++ ++#define HNS3_PTP_BASE_ADDRESS 0x29000 ++ ++#define HNS3_TX_1588_SEQID_BACK (HNS3_PTP_BASE_ADDRESS + 0x0) ++#define HNS3_TX_1588_TSP_BACK_0 (HNS3_PTP_BASE_ADDRESS + 0x4) ++#define HNS3_TX_1588_TSP_BACK_1 (HNS3_PTP_BASE_ADDRESS + 0x8) ++#define HNS3_TX_1588_TSP_BACK_2 (HNS3_PTP_BASE_ADDRESS + 0xc) ++ ++#define HNS3_TX_1588_BACK_TSP_CNT (HNS3_PTP_BASE_ADDRESS + 0x30) ++ ++#define HNS3_CFG_TIME_SYNC_H (HNS3_PTP_BASE_ADDRESS + 0x50) ++#define HNS3_CFG_TIME_SYNC_M (HNS3_PTP_BASE_ADDRESS + 0x54) ++#define HNS3_CFG_TIME_SYNC_L (HNS3_PTP_BASE_ADDRESS + 0x58) ++#define HNS3_CFG_TIME_SYNC_RDY (HNS3_PTP_BASE_ADDRESS + 0x5c) ++ ++#define HNS3_CFG_TIME_CYC_EN (HNS3_PTP_BASE_ADDRESS + 0x70) ++ ++#define HNS3_CURR_TIME_OUT_H (HNS3_PTP_BASE_ADDRESS + 0x74) ++#define HNS3_CURR_TIME_OUT_L (HNS3_PTP_BASE_ADDRESS + 0x78) ++#define HNS3_CURR_TIME_OUT_NS (HNS3_PTP_BASE_ADDRESS + 0x7c) ++ ++int hns3_restore_ptp(struct hns3_adapter *hns); ++int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, ++ struct rte_eth_conf *conf); ++int hns3_ptp_init(struct hns3_hw *hw); ++void hns3_ptp_uninit(struct hns3_hw *hw); ++int hns3_timesync_enable(struct rte_eth_dev *dev); ++int hns3_timesync_disable(struct rte_eth_dev *dev); ++int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, ++ struct timespec *timestamp, ++ uint32_t flags __rte_unused); ++int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, ++ struct timespec *timestamp); ++int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts); ++int hns3_timesync_write_time(struct rte_eth_dev *dev, ++ const struct timespec *ts); ++int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); ++ ++#endif /* HNS3_PTP_H */ +diff --git a/dpdk/drivers/net/hns3/hns3_regs.c b/dpdk/drivers/net/hns3/hns3_regs.c +index 86a4cf74d5..95b93af733 100644 +--- a/dpdk/drivers/net/hns3/hns3_regs.c ++++ b/dpdk/drivers/net/hns3/hns3_regs.c +@@ -294,8 +294,9 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data) + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + uint32_t *origin_data_ptr = data; + uint32_t reg_offset; +- int reg_num; +- int i, j; ++ size_t reg_num; ++ uint16_t j; ++ size_t i; + + /* fetching per-PF registers values from PF PCIe register space */ + reg_num = sizeof(cmdq_reg_addrs) / sizeof(uint32_t); +diff --git a/dpdk/drivers/net/hns3/hns3_regs.h b/dpdk/drivers/net/hns3/hns3_regs.h +index 5812eb39db..d5f9d0ae9f 100644 +--- a/dpdk/drivers/net/hns3/hns3_regs.h ++++ b/dpdk/drivers/net/hns3/hns3_regs.h +@@ -121,29 +121,6 @@ + #define HNS3_TQP_INTR_RL_DEFAULT 0 + #define HNS3_TQP_INTR_QL_DEFAULT 0 + +-/* Register bit for 1588 event */ +-#define HNS3_VECTOR0_1588_INT_B 0 +- +-#define HNS3_PTP_BASE_ADDRESS 0x29000 +- +-#define HNS3_TX_1588_SEQID_BACK (HNS3_PTP_BASE_ADDRESS + 0x0) +-#define HNS3_TX_1588_TSP_BACK_0 (HNS3_PTP_BASE_ADDRESS + 0x4) +-#define HNS3_TX_1588_TSP_BACK_1 (HNS3_PTP_BASE_ADDRESS + 0x8) +-#define HNS3_TX_1588_TSP_BACK_2 (HNS3_PTP_BASE_ADDRESS + 0xc) +- +-#define HNS3_TX_1588_BACK_TSP_CNT (HNS3_PTP_BASE_ADDRESS + 0x30) +- +-#define HNS3_CFG_TIME_SYNC_H (HNS3_PTP_BASE_ADDRESS + 0x50) +-#define HNS3_CFG_TIME_SYNC_M (HNS3_PTP_BASE_ADDRESS + 0x54) +-#define HNS3_CFG_TIME_SYNC_L (HNS3_PTP_BASE_ADDRESS + 0x58) +-#define HNS3_CFG_TIME_SYNC_RDY (HNS3_PTP_BASE_ADDRESS + 0x5c) +- +-#define HNS3_CFG_TIME_CYC_EN (HNS3_PTP_BASE_ADDRESS + 0x70) +- +-#define HNS3_CURR_TIME_OUT_H (HNS3_PTP_BASE_ADDRESS + 0x74) +-#define HNS3_CURR_TIME_OUT_L (HNS3_PTP_BASE_ADDRESS + 0x78) +-#define HNS3_CURR_TIME_OUT_NS (HNS3_PTP_BASE_ADDRESS + 0x7c) +- + /* gl_usec convert to hardware count, as writing each 1 represents 2us */ + #define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1) + /* rl_usec convert to hardware count, as writing each 1 represents 4us */ diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c -index 3a4b699ae2..980fbe74e8 100644 +index 3a4b699ae2..b587954508 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.c +++ b/dpdk/drivers/net/hns3/hns3_rss.c @@ -9,10 +9,8 @@ @@ -24420,74 +47029,801 @@ index 3a4b699ae2..980fbe74e8 100644 - */ -static const uint8_t hns3_hash_key[] = { +/* Default hash keys */ -+const uint8_t hns3_hash_key[] = { ++const uint8_t hns3_hash_key[HNS3_RSS_KEY_SIZE] = { 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, -@@ -152,10 +150,6 @@ static const struct { - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, +@@ -20,215 +18,301 @@ static const uint8_t hns3_hash_key[] = { + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA + }; + +-enum hns3_tuple_field { +- /* IPV4_TCP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D = 0, +- HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S, +- HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D, +- HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S, +- +- /* IPV4_UDP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D = 8, +- HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S, +- HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D, +- HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S, +- +- /* IPV4_SCTP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D = 16, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, +- +- /* IPV4 ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, +- HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S, +- HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D, +- HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S, +- +- /* IPV6_TCP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D = 32, +- HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S, +- HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D, +- HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S, +- +- /* IPV6_UDP ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D = 40, +- HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S, +- HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D, +- HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S, +- +- /* IPV6_SCTP ENABLE FIELD */ +- HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D = 48, +- HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, +- +- /* IPV6 ENABLE FIELD */ +- HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, +- HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S, +- HNS3_RSS_FIELD_IPV6_FRAG_IP_D, +- HNS3_RSS_FIELD_IPV6_FRAG_IP_S ++const uint8_t hns3_hash_func_map[] = { ++ [RTE_ETH_HASH_FUNCTION_DEFAULT] = HNS3_RSS_HASH_ALGO_TOEPLITZ, ++ [RTE_ETH_HASH_FUNCTION_TOEPLITZ] = HNS3_RSS_HASH_ALGO_TOEPLITZ, ++ [RTE_ETH_HASH_FUNCTION_SIMPLE_XOR] = HNS3_RSS_HASH_ALGO_SIMPLE, ++ [RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ] = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP, ++}; ++ ++enum hns3_rss_tuple_type { ++ HNS3_RSS_IP_TUPLE, ++ HNS3_RSS_IP_L4_TUPLE, + }; + + static const struct { + uint64_t rss_types; ++ uint16_t tuple_type; + uint64_t rss_field; ++ uint64_t tuple_mask; + } hns3_set_tuple_table[] = { ++ /* IPV4-FRAG */ + { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) }, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV4_FLAG_M }, + { RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) }, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_FLAG_M }, ++ { RTE_ETH_RSS_FRAG_IPV4, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_FLAG_M }, ++ ++ /* IPV4 */ ++ { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_L3_SRC_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_L3_DST_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { RTE_ETH_RSS_IPV4, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ ++ /* IPV4-OTHER */ ++ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV4_OTHER, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV4_NONF_M }, ++ ++ /* IPV4-TCP */ + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_L4_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV4_TCP, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV4_TCP_M }, ++ ++ /* IPV4-UDP */ + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_L4_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV4_UDP, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV4_UDP_M }, ++ ++ /* IPV4-SCTP */ + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV4_SCTP | RTE_ETH_RSS_L4_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) }, +- { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) }, +- { RTE_ETH_RSS_NONFRAG_IPV4_OTHER | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER), ++ HNS3_RSS_TUPLE_IPV4_SCTP_M }, ++ ++ /* IPV6-FRAG */ + { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) }, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV6_FLAG_M }, + { RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) }, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_FLAG_M }, ++ { RTE_ETH_RSS_FRAG_IPV6, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_FLAG_M }, ++ ++ /* IPV6 */ ++ { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_L3_SRC_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_L3_DST_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { RTE_ETH_RSS_IPV6, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ ++ /* IPV6-OTHER */ ++ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV6_OTHER, ++ HNS3_RSS_IP_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D), ++ HNS3_RSS_TUPLE_IPV6_NONF_M }, ++ ++ /* IPV6-TCP */ + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_L4_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV6_TCP, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D), ++ HNS3_RSS_TUPLE_IPV6_TCP_M }, ++ ++ /* IPV6-UDP */ + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_L4_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV6_UDP, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D), ++ HNS3_RSS_TUPLE_IPV6_UDP_M }, ++ ++ /* IPV6-SCTP */ + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) }, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + { RTE_ETH_RSS_NONFRAG_IPV6_SCTP | RTE_ETH_RSS_L4_DST_ONLY, +- BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) }, +- { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_SRC_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) }, +- { RTE_ETH_RSS_NONFRAG_IPV6_OTHER | RTE_ETH_RSS_L3_DST_ONLY, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, +-}; +- +-static const struct { +- uint64_t rss_types; +- uint64_t rss_field; +-} hns3_set_rss_types[] = { +- { RTE_ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) }, +- { RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, - { RTE_ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, - { RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) | -@@ -241,31 +235,6 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) +- { RTE_ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) }, +- { RTE_ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) }, +- { RTE_ETH_RSS_NONFRAG_IPV4_OTHER, +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, +- { RTE_ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) }, +- { RTE_ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) }, +- { RTE_ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) }, +- { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, ++ { RTE_ETH_RSS_NONFRAG_IPV6_SCTP, ++ HNS3_RSS_IP_L4_TUPLE, ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) | +- BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) | +- BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) }, +- { RTE_ETH_RSS_NONFRAG_IPV6_OTHER, +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) } ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER), ++ HNS3_RSS_TUPLE_IPV6_SCTP_M }, + }; + + /* + * rss_generic_config command function, opcode:0x0D01. +- * Used to set algorithm, key_offset and hash key of rss. ++ * Used to set algorithm and hash key of RSS. + */ +-int +-hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) ++static int ++hns3_rss_set_algo_key(struct hns3_hw *hw, uint8_t hash_algo, ++ const uint8_t *key, uint8_t key_len) + { +-#define HNS3_KEY_OFFSET_MAX 3 +-#define HNS3_SET_HASH_KEY_BYTE_FOUR 2 +- + struct hns3_rss_generic_config_cmd *req; + struct hns3_cmd_desc desc; +- uint32_t key_offset, key_size; +- const uint8_t *key_cur; +- uint8_t cur_offset; ++ const uint8_t *cur_key; ++ uint16_t cur_key_size; ++ uint16_t max_bd_num; ++ uint16_t idx; + int ret; + + req = (struct hns3_rss_generic_config_cmd *)desc.data; + +- /* +- * key_offset=0, hash key byte0~15 is set to hardware. +- * key_offset=1, hash key byte16~31 is set to hardware. +- * key_offset=2, hash key byte32~39 is set to hardware. +- */ +- for (key_offset = 0; key_offset < HNS3_KEY_OFFSET_MAX; key_offset++) { ++ max_bd_num = DIV_ROUND_UP(key_len, HNS3_RSS_HASH_KEY_NUM); ++ for (idx = 0; idx < max_bd_num; idx++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG, + false); + +- req->hash_config |= +- (hw->rss_info.hash_algo & HNS3_RSS_HASH_ALGO_MASK); +- req->hash_config |= (key_offset << HNS3_RSS_HASH_KEY_OFFSET_B); ++ req->hash_config |= (hash_algo & HNS3_RSS_HASH_ALGO_MASK); ++ req->hash_config |= (idx << HNS3_RSS_HASH_KEY_OFFSET_B); + +- if (key_offset == HNS3_SET_HASH_KEY_BYTE_FOUR) +- key_size = HNS3_RSS_KEY_SIZE - HNS3_RSS_HASH_KEY_NUM * +- HNS3_SET_HASH_KEY_BYTE_FOUR; ++ if (idx == max_bd_num - 1 && ++ (key_len % HNS3_RSS_HASH_KEY_NUM) != 0) ++ cur_key_size = key_len % HNS3_RSS_HASH_KEY_NUM; + else +- key_size = HNS3_RSS_HASH_KEY_NUM; ++ cur_key_size = HNS3_RSS_HASH_KEY_NUM; + +- cur_offset = key_offset * HNS3_RSS_HASH_KEY_NUM; +- key_cur = key + cur_offset; +- memcpy(req->hash_key, key_cur, key_size); ++ cur_key = key + idx * HNS3_RSS_HASH_KEY_NUM; ++ memcpy(req->hash_key, cur_key, cur_key_size); + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { +@@ -236,34 +320,50 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) + return ret; + } + } +- /* Update the shadow RSS key with user specified */ +- memcpy(hw->rss_info.key, key, HNS3_RSS_KEY_SIZE); ++ return 0; } -/* - * Used to configure the tuple selection for RSS hash input. - */ --static int + static int -hns3_rss_set_input_tuple(struct hns3_hw *hw) --{ ++hns3_rss_get_algo_key(struct hns3_hw *hw, uint8_t *hash_algo, ++ uint8_t *key, uint8_t key_len) + { - struct hns3_rss_conf *rss_config = &hw->rss_info; - struct hns3_rss_input_tuple_cmd *req; - struct hns3_cmd_desc desc_tuple; -- int ret; -- ++ struct hns3_rss_generic_config_cmd *req; ++ struct hns3_cmd_desc desc; ++ uint16_t cur_key_size; ++ uint16_t max_bd_num; ++ uint8_t *cur_key; ++ uint16_t idx; + int ret; + - hns3_cmd_setup_basic_desc(&desc_tuple, HNS3_OPC_RSS_INPUT_TUPLE, false); -- ++ req = (struct hns3_rss_generic_config_cmd *)desc.data; ++ max_bd_num = DIV_ROUND_UP(key_len, HNS3_RSS_HASH_KEY_NUM); ++ for (idx = 0; idx < max_bd_num; idx++) { ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_GENERIC_CONFIG, ++ true); ++ ++ req->hash_config |= (idx << HNS3_RSS_HASH_KEY_OFFSET_B); ++ ret = hns3_cmd_send(hw, &desc, 1); ++ if (ret) { ++ hns3_err(hw, "fail to obtain RSS algo and key from firmware, ret = %d", ++ ret); ++ return ret; ++ } + - req = (struct hns3_rss_input_tuple_cmd *)desc_tuple.data; -- ++ if (idx == 0) ++ *hash_algo = req->hash_config & HNS3_RSS_HASH_ALGO_MASK; + - req->tuple_field = - rte_cpu_to_le_64(rss_config->rss_tuple_sets.rss_tuple_fields); -- ++ if (idx == max_bd_num - 1 && ++ (key_len % HNS3_RSS_HASH_KEY_NUM) != 0) ++ cur_key_size = key_len % HNS3_RSS_HASH_KEY_NUM; ++ else ++ cur_key_size = HNS3_RSS_HASH_KEY_NUM; + - ret = hns3_cmd_send(hw, &desc_tuple, 1); - if (ret) - hns3_err(hw, "Configure RSS input tuple mode failed %d", ret); -- ++ cur_key = key + idx * HNS3_RSS_HASH_KEY_NUM; ++ memcpy(cur_key, req->hash_key, cur_key_size); ++ } + - return ret; --} -- ++ return 0; + } + /* - * rss_indirection_table command function, opcode:0x0D07. - * Used to configure the indirection table of rss. -@@ -339,8 +308,7 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) +@@ -274,6 +374,7 @@ int + hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) + { + struct hns3_rss_indirection_table_cmd *req; ++ uint16_t max_bd_num, cfg_tbl_size; + struct hns3_cmd_desc desc; + uint8_t qid_msb_off; + uint8_t qid_msb_val; +@@ -282,14 +383,20 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) + int ret; + + req = (struct hns3_rss_indirection_table_cmd *)desc.data; +- +- for (i = 0; i < size / HNS3_RSS_CFG_TBL_SIZE; i++) { ++ max_bd_num = DIV_ROUND_UP(size, HNS3_RSS_CFG_TBL_SIZE); ++ for (i = 0; i < max_bd_num; i++) { + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INDIR_TABLE, + false); + req->start_table_index = + rte_cpu_to_le_16(i * HNS3_RSS_CFG_TBL_SIZE); + req->rss_set_bitmap = rte_cpu_to_le_16(HNS3_RSS_SET_BITMAP_MSK); +- for (j = 0; j < HNS3_RSS_CFG_TBL_SIZE; j++) { ++ ++ if (i == max_bd_num - 1 && (size % HNS3_RSS_CFG_TBL_SIZE) != 0) ++ cfg_tbl_size = size % HNS3_RSS_CFG_TBL_SIZE; ++ else ++ cfg_tbl_size = HNS3_RSS_CFG_TBL_SIZE; ++ ++ for (j = 0; j < cfg_tbl_size; j++) { + q_id = indir[i * HNS3_RSS_CFG_TBL_SIZE + j]; + req->rss_result_l[j] = q_id & 0xff; + +@@ -310,9 +417,53 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) + } + } + +- /* Update redirection table of hw */ +- memcpy(hw->rss_info.rss_indirection_tbl, indir, +- sizeof(uint16_t) * size); ++ return 0; ++} ++ ++static int ++hns3_get_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) ++{ ++ struct hns3_rss_indirection_table_cmd *req; ++ uint16_t max_bd_num, cfg_tbl_size; ++ uint8_t qid_msb_off, qid_msb_idx; ++ struct hns3_cmd_desc desc; ++ uint16_t q_id, q_hi, q_lo; ++ uint8_t rss_result_h; ++ uint16_t i, j; ++ int ret; ++ ++ req = (struct hns3_rss_indirection_table_cmd *)desc.data; ++ max_bd_num = DIV_ROUND_UP(size, HNS3_RSS_CFG_TBL_SIZE); ++ for (i = 0; i < max_bd_num; i++) { ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INDIR_TABLE, ++ true); ++ req->start_table_index = ++ rte_cpu_to_le_16(i * HNS3_RSS_CFG_TBL_SIZE); ++ ret = hns3_cmd_send(hw, &desc, 1); ++ if (ret) { ++ hns3_err(hw, "fail to get RSS indirection table from firmware, ret = %d", ++ ret); ++ return ret; ++ } ++ ++ if (i == max_bd_num - 1 && (size % HNS3_RSS_CFG_TBL_SIZE) != 0) ++ cfg_tbl_size = size % HNS3_RSS_CFG_TBL_SIZE; ++ else ++ cfg_tbl_size = HNS3_RSS_CFG_TBL_SIZE; ++ ++ for (j = 0; j < cfg_tbl_size; j++) { ++ qid_msb_idx = ++ j * HNS3_RSS_CFG_TBL_BW_H / HNS3_BITS_PER_BYTE; ++ rss_result_h = req->rss_result_h[qid_msb_idx]; ++ qid_msb_off = ++ j * HNS3_RSS_CFG_TBL_BW_H % HNS3_BITS_PER_BYTE; ++ q_hi = (rss_result_h >> qid_msb_off) & ++ HNS3_RSS_CFG_TBL_BW_H_M; ++ q_lo = req->rss_result_l[j]; ++ q_id = (q_hi << HNS3_RSS_CFG_TBL_BW_L) | q_lo; ++ indir[i * HNS3_RSS_CFG_TBL_SIZE + j] = q_id; ++ } ++ } + + return 0; } +@@ -331,63 +482,134 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) + } - int + ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size); +- if (ret) +- hns3_err(hw, "RSS uninit indir table failed: %d", ret); ++ if (ret != 0) ++ hns3_err(hw, "RSS uninit indir table failed, ret = %d.", ret); ++ else ++ memcpy(hw->rss_info.rss_indirection_tbl, lut, ++ sizeof(uint16_t) * hw->rss_ind_tbl_size); + rte_free(lut); + + return ret; + } + +-int -hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, - struct hns3_rss_tuple_cfg *tuple, uint64_t rss_hf) -+hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf) ++bool ++hns3_check_rss_types_valid(struct hns3_hw *hw, uint64_t types) { - struct hns3_rss_input_tuple_cmd *req; - struct hns3_cmd_desc desc; -@@ -385,7 +353,8 @@ hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, - return ret; +- struct hns3_rss_input_tuple_cmd *req; +- struct hns3_cmd_desc desc; +- uint32_t fields_count = 0; /* count times for setting tuple fields */ +- uint32_t i; +- int ret; +- +- hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false); ++ uint64_t ip_mask = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | ++ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | ++ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | ++ RTE_ETH_RSS_NONFRAG_IPV6_OTHER; ++ uint64_t ip_l4_mask = RTE_ETH_RSS_NONFRAG_IPV4_TCP | ++ RTE_ETH_RSS_NONFRAG_IPV4_UDP | ++ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | ++ RTE_ETH_RSS_NONFRAG_IPV6_TCP | ++ RTE_ETH_RSS_NONFRAG_IPV6_UDP | ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP; ++ bool has_l4_src_dst = !!(types & HNS3_RSS_SUPPORT_L4_SRC_DST); ++ bool has_ip_pkt = !!(types & ip_mask); ++ uint64_t final_types; ++ ++ if (types == 0) ++ return true; ++ ++ if ((types & HNS3_ETH_RSS_SUPPORT) == 0) { ++ hns3_err(hw, "specified types(0x%" PRIx64 ") are unsupported.", ++ types); ++ return false; ++ } + +- req = (struct hns3_rss_input_tuple_cmd *)desc.data; ++ if ((types & HNS3_RSS_SUPPORT_L3_SRC_DST) != 0 && ++ (types & HNS3_RSS_SUPPORT_FLOW_TYPE) == 0) { ++ hns3_err(hw, "IP or IP-TCP/UDP/SCTP packet type isn't specified, L3_SRC/DST_ONLY cannot be set."); ++ return false; ++ } + +- for (i = 0; i < RTE_DIM(hns3_set_tuple_table); i++) { +- if ((rss_hf & hns3_set_tuple_table[i].rss_types) == +- hns3_set_tuple_table[i].rss_types) { +- req->tuple_field |= +- rte_cpu_to_le_64(hns3_set_tuple_table[i].rss_field); +- fields_count++; ++ if (has_l4_src_dst && (types & ip_l4_mask) == 0) { ++ if (!has_ip_pkt) { ++ hns3_err(hw, "IP-TCP/UDP/SCTP packet type isn't specified, L4_SRC/DST_ONLY cannot be set."); ++ return false; + } ++ /* ++ * For the case that the types has L4_SRC/DST_ONLY but hasn't ++ * IP-TCP/UDP/SCTP packet type, this types is considered valid ++ * if it also has IP packet type. ++ */ ++ hns3_warn(hw, "L4_SRC/DST_ONLY is ignored because of no including L4 packet."); + } + +- /* +- * When user does not specify the following types or a combination of +- * the following types, it enables all fields for the supported RSS +- * types. the following types as: +- * - RTE_ETH_RSS_L3_SRC_ONLY +- * - RTE_ETH_RSS_L3_DST_ONLY +- * - RTE_ETH_RSS_L4_SRC_ONLY +- * - RTE_ETH_RSS_L4_DST_ONLY +- */ +- if (fields_count == 0) { +- for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) { +- if ((rss_hf & hns3_set_rss_types[i].rss_types) == +- hns3_set_rss_types[i].rss_types) +- req->tuple_field |= rte_cpu_to_le_64( +- hns3_set_rss_types[i].rss_field); ++ if ((types & ~HNS3_ETH_RSS_SUPPORT) != 0) { ++ final_types = types & HNS3_ETH_RSS_SUPPORT; ++ hns3_warn(hw, "set RSS types based on hardware support, requested:0x%" PRIx64 " configured:0x%" PRIx64 "", ++ types, final_types); ++ } ++ ++ return true; ++} ++ ++uint64_t ++hns3_rss_calc_tuple_filed(uint64_t rss_hf) ++{ ++ uint64_t l3_only_mask = RTE_ETH_RSS_L3_SRC_ONLY | ++ RTE_ETH_RSS_L3_DST_ONLY; ++ uint64_t l4_only_mask = RTE_ETH_RSS_L4_SRC_ONLY | ++ RTE_ETH_RSS_L4_DST_ONLY; ++ uint64_t l3_l4_only_mask = l3_only_mask | l4_only_mask; ++ bool has_l3_l4_only = !!(rss_hf & l3_l4_only_mask); ++ bool has_l3_only = !!(rss_hf & l3_only_mask); ++ uint64_t tuple = 0; ++ uint32_t i; ++ ++ for (i = 0; i < RTE_DIM(hns3_set_tuple_table); i++) { ++ if ((rss_hf & hns3_set_tuple_table[i].rss_types) != ++ hns3_set_tuple_table[i].rss_types) ++ continue; ++ ++ if (hns3_set_tuple_table[i].tuple_type == HNS3_RSS_IP_TUPLE) { ++ if (hns3_set_tuple_table[i].rss_types & l3_only_mask || ++ !has_l3_only) ++ tuple |= hns3_set_tuple_table[i].rss_field; ++ continue; + } ++ ++ /* For IP types with L4, we need check both L3 and L4 */ ++ if (hns3_set_tuple_table[i].rss_types & l3_l4_only_mask || ++ !has_l3_l4_only) ++ tuple |= hns3_set_tuple_table[i].rss_field; } ++ return tuple; ++} ++ ++int ++hns3_set_rss_tuple_field(struct hns3_hw *hw, uint64_t tuple_fields) ++{ ++ struct hns3_rss_input_tuple_cmd *req; ++ struct hns3_cmd_desc desc; ++ int ret; ++ ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false); ++ req = (struct hns3_rss_input_tuple_cmd *)desc.data; ++ req->tuple_field = rte_cpu_to_le_64(tuple_fields); + ret = hns3_cmd_send(hw, &desc, 1); +- if (ret) { +- hns3_err(hw, "Update RSS flow types tuples failed %d", ret); +- return ret; +- } ++ if (ret != 0) ++ hns3_err(hw, "set RSS hash tuple fields failed ret = %d", ret); + - tuple->rss_tuple_fields = rte_le_to_cpu_64(req->tuple_field); -+ /* Update supported flow types when set tuple success */ -+ hw->rss_info.conf.types = rss_hf; ++ return ret; ++} - return 0; +- return 0; ++int ++hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf) ++{ ++ uint64_t tuple_fields; ++ int ret; ++ ++ tuple_fields = hns3_rss_calc_tuple_filed(rss_hf); ++ ret = hns3_set_rss_tuple_field(hw, tuple_fields); ++ if (ret != 0) ++ hns3_err(hw, "Update RSS flow types tuples failed, ret = %d", ++ ret); ++ ++ return ret; } -@@ -403,55 +372,36 @@ int + + /* +@@ -403,59 +625,137 @@ int hns3_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { @@ -24496,18 +47832,21 @@ index 3a4b699ae2..980fbe74e8 100644 - struct hns3_rss_tuple_cfg *tuple = &hw->rss_info.rss_tuple_sets; - struct hns3_rss_conf *rss_cfg = &hw->rss_info; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); -+ uint64_t rss_hf_bk = hw->rss_info.conf.types; ++ uint64_t rss_hf_bk = hw->rss_info.rss_hf; uint8_t key_len = rss_conf->rss_key_len; uint64_t rss_hf = rss_conf->rss_hf; uint8_t *key = rss_conf->rss_key; int ret; - if (hw->rss_dis_flag) -+ if (key && key_len != HNS3_RSS_KEY_SIZE) { ++ if (key && key_len != hw->rss_key_size) { + hns3_err(hw, "the hash key len(%u) is invalid, must be %u", -+ key_len, HNS3_RSS_KEY_SIZE); - return -EINVAL; ++ key_len, hw->rss_key_size); ++ return -EINVAL; + } ++ ++ if (!hns3_check_rss_types_valid(hw, rss_hf)) + return -EINVAL; rte_spinlock_lock(&hw->lock); - ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_hf); @@ -24539,11 +47878,16 @@ index 3a4b699ae2..980fbe74e8 100644 - ret = -EINVAL; - goto conf_err; - } - ret = hns3_rss_set_algo_key(hw, key); +- ret = hns3_rss_set_algo_key(hw, key); ++ ret = hns3_rss_set_algo_key(hw, hw->rss_info.hash_algo, ++ key, hw->rss_key_size); if (ret) - goto conf_err; + goto set_algo_key_fail; ++ /* Update the shadow RSS key with user specified */ ++ memcpy(hw->rss_info.key, key, hw->rss_key_size); } ++ hw->rss_info.rss_hf = rss_hf; rte_spinlock_unlock(&hw->lock); return 0; @@ -24555,7 +47899,204 @@ index 3a4b699ae2..980fbe74e8 100644 rte_spinlock_unlock(&hw->lock); return ret; } -@@ -582,33 +532,59 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + ++int ++hns3_get_rss_tuple_field(struct hns3_hw *hw, uint64_t *tuple_fields) ++{ ++ struct hns3_rss_input_tuple_cmd *req; ++ struct hns3_cmd_desc desc; ++ int ret; ++ ++ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, true); ++ req = (struct hns3_rss_input_tuple_cmd *)desc.data; ++ ret = hns3_cmd_send(hw, &desc, 1); ++ if (ret != 0) { ++ hns3_err(hw, "fail to get RSS hash tuple fields from firmware, ret = %d", ++ ret); ++ return ret; ++ } ++ ++ *tuple_fields = rte_le_to_cpu_64(req->tuple_field); ++ ++ return 0; ++} ++ ++static uint64_t ++hns3_rss_tuple_fields_to_rss_hf(struct hns3_hw *hw, uint64_t tuple_fields) ++{ ++ uint64_t ipv6_sctp_l4_mask = ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S); ++ uint64_t rss_hf = 0; ++ uint64_t tuple_mask; ++ uint32_t i; ++ ++ for (i = 0; i < RTE_DIM(hns3_set_tuple_table); i++) { ++ tuple_mask = hns3_set_tuple_table[i].tuple_mask; ++ /* ++ * The RSS hash of the packet type is disabled if its tuples is ++ * zero. ++ */ ++ if ((tuple_fields & tuple_mask) == 0) ++ continue; ++ ++ /* ++ * Some hardware don't support to use src/dst port fields to ++ * hash for IPV6-SCTP packet. ++ */ ++ if ((hns3_set_tuple_table[i].rss_types & ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP) && ++ !hw->rss_info.ipv6_sctp_offload_supported) ++ tuple_mask &= ~ipv6_sctp_l4_mask; ++ ++ /* ++ * The framework (ethdev ops) or driver (rte flow API) ensure ++ * that both L3_SRC/DST_ONLY and L4_SRC/DST_ONLY cannot be set ++ * to driver at the same time. But if user doesn't specify ++ * anything L3/L4_SRC/DST_ONLY, driver enables all tuple fields. ++ * In this case, driver should not report L3/L4_SRC/DST_ONLY. ++ */ ++ if ((tuple_fields & tuple_mask) == tuple_mask) { ++ /* Skip the item enabled part tuples. */ ++ if ((tuple_fields & hns3_set_tuple_table[i].rss_field) != ++ tuple_mask) ++ continue; ++ ++ rss_hf |= hns3_set_tuple_table[i].rss_types; ++ continue; ++ } ++ ++ /* Match the item enabled part tuples.*/ ++ if ((tuple_fields & hns3_set_tuple_table[i].rss_field) == ++ hns3_set_tuple_table[i].rss_field) ++ rss_hf |= hns3_set_tuple_table[i].rss_types; ++ } ++ ++ return rss_hf; ++} ++ ++static int ++hns3_rss_hash_get_rss_hf(struct hns3_hw *hw, uint64_t *rss_hf) ++{ ++ uint64_t tuple_fields; ++ int ret; ++ ++ ret = hns3_get_rss_tuple_field(hw, &tuple_fields); ++ if (ret != 0) ++ return ret; ++ ++ *rss_hf = hns3_rss_tuple_fields_to_rss_hf(hw, tuple_fields); ++ ++ return 0; ++} ++ + /* + * Get rss key and rss_hf types set of RSS hash configuration. + * @param dev +@@ -471,19 +771,32 @@ hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + { + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; +- struct hns3_rss_conf *rss_cfg = &hw->rss_info; ++ uint8_t hash_algo = 0; ++ int ret; + + rte_spinlock_lock(&hw->lock); +- rss_conf->rss_hf = rss_cfg->conf.types; ++ ret = hns3_rss_hash_get_rss_hf(hw, &rss_conf->rss_hf); ++ if (ret != 0) { ++ hns3_err(hw, "obtain hash tuples failed, ret = %d", ret); ++ goto out; ++ } + + /* Get the RSS Key required by the user */ +- if (rss_conf->rss_key && rss_conf->rss_key_len >= HNS3_RSS_KEY_SIZE) { +- memcpy(rss_conf->rss_key, rss_cfg->key, HNS3_RSS_KEY_SIZE); +- rss_conf->rss_key_len = HNS3_RSS_KEY_SIZE; ++ if (rss_conf->rss_key && rss_conf->rss_key_len >= hw->rss_key_size) { ++ ret = hns3_rss_get_algo_key(hw, &hash_algo, rss_conf->rss_key, ++ hw->rss_key_size); ++ if (ret != 0) { ++ hns3_err(hw, "obtain hash algo and key failed, ret = %d", ++ ret); ++ goto out; ++ } ++ rss_conf->rss_key_len = hw->rss_key_size; + } ++ ++out: + rte_spinlock_unlock(&hw->lock); + +- return 0; ++ return ret; + } + + /* +@@ -523,12 +836,12 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) { +- rte_spinlock_unlock(&hw->lock); + hns3_err(hw, "queue id(%u) set to redirection table " + "exceeds queue number(%u) allocated to a TC", + reta_conf[idx].reta[shift], + hw->alloc_rss_size); +- return -EINVAL; ++ ret = -EINVAL; ++ goto out; + } + + if (reta_conf[idx].mask & (1ULL << shift)) +@@ -537,7 +850,13 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, + + ret = hns3_set_rss_indir_table(hw, indirection_tbl, + hw->rss_ind_tbl_size); ++ if (ret != 0) ++ goto out; + ++ memcpy(rss_cfg->rss_indirection_tbl, indirection_tbl, ++ sizeof(uint16_t) * hw->rss_ind_tbl_size); ++ ++out: + rte_spinlock_unlock(&hw->lock); + return ret; + } +@@ -559,10 +878,11 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + uint16_t reta_size) + { + struct hns3_adapter *hns = dev->data->dev_private; ++ uint16_t reta_table[HNS3_RSS_IND_TBL_SIZE_MAX]; + struct hns3_hw *hw = &hns->hw; +- struct hns3_rss_conf *rss_cfg = &hw->rss_info; + uint16_t idx, shift; + uint16_t i; ++ int ret; + + if (reta_size != hw->rss_ind_tbl_size) { + hns3_err(hw, "The size of hash lookup table configured (%u)" +@@ -571,44 +891,78 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, + return -EINVAL; + } + rte_spinlock_lock(&hw->lock); ++ ret = hns3_get_rss_indir_table(hw, reta_table, reta_size); ++ if (ret != 0) { ++ rte_spinlock_unlock(&hw->lock); ++ hns3_err(hw, "query RSS redirection table failed, ret = %d.", ++ ret); ++ return ret; ++ } ++ rte_spinlock_unlock(&hw->lock); ++ + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) +- reta_conf[idx].reta[shift] = +- rss_cfg->rss_indirection_tbl[i]; ++ reta_conf[idx].reta[shift] = reta_table[i]; + } +- rte_spinlock_unlock(&hw->lock); ++ return 0; } @@ -24630,7 +48171,75 @@ index 3a4b699ae2..980fbe74e8 100644 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_TC_MODE, false); for (i = 0; i < HNS3_MAX_TC_NUM; i++) { uint16_t mode = 0; -@@ -675,7 +651,8 @@ hns3_config_rss(struct hns3_adapter *hns) +@@ -630,6 +984,52 @@ hns3_set_rss_tc_mode(struct hns3_hw *hw) + return ret; + } + ++/* ++ * Note: the 'hash_algo' is defined by enum rte_eth_hash_function. ++ */ ++int ++hns3_update_rss_algo_key(struct hns3_hw *hw, uint8_t hash_func, ++ uint8_t *key, uint8_t key_len) ++{ ++ uint8_t rss_key[HNS3_RSS_KEY_SIZE_MAX] = {0}; ++ bool modify_key, modify_algo; ++ uint8_t hash_algo = 0; ++ int ret; ++ ++ modify_key = (key != NULL && key_len > 0); ++ modify_algo = hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT; ++ if (!modify_key && !modify_algo) ++ return 0; ++ ++ if (modify_algo && hash_func >= RTE_DIM(hns3_hash_func_map)) { ++ hns3_err(hw, "hash func (%u) is unsupported.", hash_func); ++ return -ENOTSUP; ++ } ++ if (modify_key && key_len != hw->rss_key_size) { ++ hns3_err(hw, "hash key length (%u) is invalid.", key_len); ++ return -EINVAL; ++ } ++ ++ ret = hns3_rss_get_algo_key(hw, &hash_algo, rss_key, hw->rss_key_size); ++ if (ret != 0) { ++ hns3_err(hw, "fail to get RSS hash algorithm and key, ret = %d", ++ ret); ++ return ret; ++ } ++ ++ if (modify_algo) ++ hash_algo = hns3_hash_func_map[hash_func]; ++ if (modify_key) ++ memcpy(rss_key, key, key_len); ++ ++ ret = hns3_rss_set_algo_key(hw, hash_algo, rss_key, hw->rss_key_size); ++ if (ret != 0) ++ hns3_err(hw, "fail to set RSS hash algorithm and key, ret = %d", ++ ret); ++ ++ return ret; ++} ++ + static void + hns3_rss_tuple_uninit(struct hns3_hw *hw) + { +@@ -656,10 +1056,11 @@ hns3_rss_set_default_args(struct hns3_hw *hw) + int i; + + /* Default hash algorithm */ +- rss_cfg->conf.func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; ++ rss_cfg->hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; + +- /* Default RSS key */ +- memcpy(rss_cfg->key, hns3_hash_key, HNS3_RSS_KEY_SIZE); ++ hw->rss_info.rss_hf = 0; ++ memcpy(rss_cfg->key, hns3_hash_key, ++ RTE_MIN(sizeof(hns3_hash_key), hw->rss_key_size)); + + /* Initialize RSS indirection table */ + for (i = 0; i < hw->rss_ind_tbl_size; i++) +@@ -675,67 +1076,42 @@ hns3_config_rss(struct hns3_adapter *hns) struct hns3_hw *hw = &hns->hw; struct hns3_rss_conf *rss_cfg = &hw->rss_info; uint8_t *hash_key = rss_cfg->key; @@ -24640,16 +48249,26 @@ index 3a4b699ae2..980fbe74e8 100644 enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode; -@@ -691,51 +668,30 @@ hns3_config_rss(struct hns3_adapter *hns) - break; - } - +- switch (hw->rss_info.conf.func) { +- case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: +- hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE; +- break; +- case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: +- hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; +- break; +- default: +- hw->rss_info.hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; +- break; +- } +- - /* When RSS is off, redirect the packet queue 0 */ - if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) - hns3_rss_uninit(hns); - - /* Configure RSS hash algorithm and hash key offset */ - ret = hns3_rss_set_algo_key(hw, hash_key); +- /* Configure RSS hash algorithm and hash key offset */ +- ret = hns3_rss_set_algo_key(hw, hash_key); ++ ret = hns3_rss_set_algo_key(hw, rss_cfg->hash_algo, ++ hash_key, hw->rss_key_size); if (ret) return ret; @@ -24676,74 +48295,250 @@ index 3a4b699ae2..980fbe74e8 100644 - goto rss_indir_table_uninit; - - return ret; -- ++ return ret; + -rss_indir_table_uninit: - if (((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) { - ret1 = hns3_rss_reset_indir_table(hw); - if (ret1 != 0) - return ret; -- } -- --rss_tuple_uninit: -- hns3_rss_tuple_uninit(hw); -+ return ret; - -- /* Disable RSS */ -- hw->rss_info.conf.types = 0; + /* -+ * When muli-queue RSS mode flag is not set or unsupported tuples are ++ * When multi-queue RSS mode flag is not set or unsupported tuples are + * set, disable all tuples. + */ -+ rss_hf = hw->rss_info.conf.types; ++ rss_hf = hw->rss_info.rss_hf; + if (!((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) || + !(rss_hf & HNS3_ETH_RSS_SUPPORT)) + rss_hf = 0; ++ ++ ret = hns3_set_rss_tuple_by_rss_hf(hw, rss_hf); ++ if (ret != 0) { ++ hns3_err(hw, "set RSS tuples failed, ret = %d.", ret); ++ return ret; + } ++ hw->rss_info.rss_hf = rss_hf; +-rss_tuple_uninit: +- hns3_rss_tuple_uninit(hw); +- +- /* Disable RSS */ +- hw->rss_info.conf.types = 0; +- - return ret; -+ return hns3_set_rss_tuple_by_rss_hf(hw, rss_hf); ++ return 0; } /* +@@ -753,5 +1129,5 @@ hns3_rss_uninit(struct hns3_adapter *hns) + return; + + /* Disable RSS */ +- hw->rss_info.conf.types = 0; ++ hw->rss_info.rss_hf = 0; + } diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h -index 6f153a1b7b..56627cbd4c 100644 +index 6f153a1b7b..5c0f0b75f0 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.h +++ b/dpdk/drivers/net/hns3/hns3_rss.h -@@ -41,9 +41,8 @@ struct hns3_rss_tuple_cfg { +@@ -7,25 +7,107 @@ + #include <rte_ethdev.h> + #include <rte_flow.h> + +-#define HNS3_ETH_RSS_SUPPORT ( \ +- RTE_ETH_RSS_FRAG_IPV4 | \ +- RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ +- RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ +- RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \ +- RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \ +- RTE_ETH_RSS_FRAG_IPV6 | \ +- RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ +- RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ +- RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ +- RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \ +- RTE_ETH_RSS_L3_SRC_ONLY | \ +- RTE_ETH_RSS_L3_DST_ONLY | \ +- RTE_ETH_RSS_L4_SRC_ONLY | \ +- RTE_ETH_RSS_L4_DST_ONLY) ++#define HNS3_RSS_SUPPORT_L3_SRC_DST (RTE_ETH_RSS_L3_SRC_ONLY | \ ++ RTE_ETH_RSS_L3_DST_ONLY) ++#define HNS3_RSS_SUPPORT_L4_SRC_DST (RTE_ETH_RSS_L4_SRC_ONLY | \ ++ RTE_ETH_RSS_L4_DST_ONLY) ++#define HNS3_RSS_SUPPORT_L3L4 (HNS3_RSS_SUPPORT_L3_SRC_DST | \ ++ HNS3_RSS_SUPPORT_L4_SRC_DST) ++ ++#define HNS3_RSS_SUPPORT_FLOW_TYPE (RTE_ETH_RSS_IPV4 | \ ++ RTE_ETH_RSS_FRAG_IPV4 | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \ ++ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \ ++ RTE_ETH_RSS_IPV6 | \ ++ RTE_ETH_RSS_FRAG_IPV6 | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ ++ RTE_ETH_RSS_NONFRAG_IPV6_OTHER) ++ ++#define HNS3_ETH_RSS_SUPPORT (HNS3_RSS_SUPPORT_FLOW_TYPE | \ ++ HNS3_RSS_SUPPORT_L3L4) ++ ++enum hns3_tuple_field { ++ /* IPV4_TCP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D = 0, ++ HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S, ++ HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S, ++ ++ /* IPV4_UDP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D = 8, ++ HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S, ++ HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S, ++ ++ /* IPV4_SCTP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D = 16, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, ++ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, ++ ++ /* IPV4 ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, ++ HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S, ++ HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D, ++ HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S, ++ ++ /* IPV6_TCP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D = 32, ++ HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S, ++ HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S, ++ ++ /* IPV6_UDP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D = 40, ++ HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S, ++ HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S, ++ ++ /* IPV6_SCTP ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D = 48, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, ++ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, ++ ++ /* IPV6 ENABLE FIELD */ ++ HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, ++ HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S, ++ HNS3_RSS_FIELD_IPV6_FRAG_IP_D, ++ HNS3_RSS_FIELD_IPV6_FRAG_IP_S ++}; ++ ++#define HNS3_RSS_PCTYPE_IPV4_TCP BIT_ULL(0) ++#define HNS3_RSS_PCTYPE_IPV4_UDP BIT_ULL(8) ++#define HNS3_RSS_PCTYPE_IPV4_SCTP BIT_ULL(16) ++#define HNS3_RSS_PCTYPE_IPV4_NONF BIT_ULL(24) ++#define HNS3_RSS_PCTYPE_IPV4_FLAG BIT_ULL(26) ++#define HNS3_RSS_PCTYPE_IPV6_TCP BIT_ULL(32) ++#define HNS3_RSS_PCTYPE_IPV6_UDP BIT_ULL(40) ++#define HNS3_RSS_PCTYPE_IPV6_SCTP BIT_ULL(48) ++#define HNS3_RSS_PCTYPE_IPV6_NONF BIT_ULL(56) ++#define HNS3_RSS_PCTYPE_IPV6_FLAG BIT_ULL(58) ++ ++#define HNS3_RSS_TUPLE_IPV4_TCP_M GENMASK(3, 0) ++#define HNS3_RSS_TUPLE_IPV4_UDP_M GENMASK(11, 8) ++#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(20, 16) ++#define HNS3_RSS_TUPLE_IPV4_NONF_M GENMASK(25, 24) ++#define HNS3_RSS_TUPLE_IPV4_FLAG_M GENMASK(27, 26) ++#define HNS3_RSS_TUPLE_IPV6_TCP_M GENMASK(35, 32) ++#define HNS3_RSS_TUPLE_IPV6_UDP_M GENMASK(43, 40) ++#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(52, 48) ++#define HNS3_RSS_TUPLE_IPV6_NONF_M GENMASK(57, 56) ++#define HNS3_RSS_TUPLE_IPV6_FLAG_M GENMASK(59, 58) + + #define HNS3_RSS_IND_TBL_SIZE 512 /* The size of hash lookup table */ + #define HNS3_RSS_IND_TBL_SIZE_MAX 2048 + #define HNS3_RSS_KEY_SIZE 40 ++#define HNS3_RSS_KEY_SIZE_MAX 128 + #define HNS3_RSS_SET_BITMAP_MSK 0xffff + + #define HNS3_RSS_HASH_ALGO_TOEPLITZ 0 +@@ -33,20 +115,13 @@ + #define HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP 2 + #define HNS3_RSS_HASH_ALGO_MASK 0xf + +-struct hns3_rss_tuple_cfg { +- uint64_t rss_tuple_fields; +-}; +- +-#define HNS3_RSS_QUEUES_BUFFER_NUM 64 /* Same as the Max rx/tx queue num */ ++/* Same as the Max queue num under TC */ ++#define HNS3_RSS_QUEUES_BUFFER_NUM 512 struct hns3_rss_conf { - /* RSS parameters :algorithm, flow_types, key, queue */ - struct rte_flow_action_rss conf; +- /* RSS parameters :algorithm, flow_types, key, queue */ +- struct rte_flow_action_rss conf; - uint8_t hash_algo; /* hash function type definited by hardware */ -+ uint8_t hash_algo; /* hash function type defined by hardware */ - uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */ +- uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */ - struct hns3_rss_tuple_cfg rss_tuple_sets; ++ uint64_t rss_hf; ++ uint8_t hash_algo; /* hash function type defined by hardware */ ++ uint8_t key[HNS3_RSS_KEY_SIZE_MAX]; /* Hash key */ uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; - uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ - bool valid; /* check if RSS rule is valid */ -@@ -89,6 +88,8 @@ static inline uint32_t roundup_pow_of_two(uint32_t x) +- uint16_t queue[HNS3_RSS_QUEUES_BUFFER_NUM]; /* Queues indices to use */ +- bool valid; /* check if RSS rule is valid */ + /* + * For IPv6 SCTP packets type, check whether the NIC hardware support + * RSS hash using the src/dst port as the input tuple. For Kunpeng920 +@@ -89,6 +164,8 @@ static inline uint32_t roundup_pow_of_two(uint32_t x) return 1UL << fls(x - 1); } -+extern const uint8_t hns3_hash_key[]; ++extern const uint8_t hns3_hash_key[HNS3_RSS_KEY_SIZE]; + struct hns3_adapter; int hns3_dev_rss_hash_update(struct rte_eth_dev *dev, -@@ -107,9 +108,7 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, +@@ -107,10 +184,12 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, int hns3_rss_reset_indir_table(struct hns3_hw *hw); int hns3_config_rss(struct hns3_adapter *hns); void hns3_rss_uninit(struct hns3_adapter *hns); -int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, - struct hns3_rss_tuple_cfg *tuple, - uint64_t rss_hf); +-int hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key); +-int hns3_restore_rss_filter(struct rte_eth_dev *dev); ++bool hns3_check_rss_types_valid(struct hns3_hw *hw, uint64_t types); +int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf); - int hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key); - int hns3_restore_rss_filter(struct rte_eth_dev *dev); ++int hns3_set_rss_tuple_field(struct hns3_hw *hw, uint64_t tuple_fields); ++int hns3_get_rss_tuple_field(struct hns3_hw *hw, uint64_t *tuple_fields); ++uint64_t hns3_rss_calc_tuple_filed(uint64_t rss_hf); ++int hns3_update_rss_algo_key(struct hns3_hw *hw, uint8_t hash_algo, ++ uint8_t *key, uint8_t key_len); + #endif /* _HNS3_RSS_H_ */ diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c -index f365daadf8..403f811a51 100644 +index f365daadf8..f841e44154 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx.c +++ b/dpdk/drivers/net/hns3/hns3_rxtx.c -@@ -776,7 +776,7 @@ hns3vf_reset_all_tqps(struct hns3_hw *hw) +@@ -50,6 +50,8 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) + rxq->sw_ring[i].mbuf = NULL; + } + } ++ for (i = 0; i < rxq->rx_rearm_nb; i++) ++ rxq->sw_ring[(rxq->rx_rearm_start + i) % rxq->nb_rx_desc].mbuf = NULL; + } + + for (i = 0; i < rxq->bulk_mbuf_num; i++) +@@ -587,7 +589,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) +- hns3_err(hw, "TQP enable fail, ret = %d", ret); ++ hns3_err(hw, "TQP %s fail, ret = %d", enable ? "enable" : "disable", ret); + + return ret; + } +@@ -776,7 +778,7 @@ hns3vf_reset_all_tqps(struct hns3_hw *hw) int ret; int i; @@ -24752,7 +48547,16 @@ index f365daadf8..403f811a51 100644 ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, sizeof(msg_data), true, &reset_status, sizeof(reset_status)); -@@ -1763,7 +1763,8 @@ hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size, +@@ -1644,7 +1646,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, + + ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); + if (ret) { +- hns3_err(hw, "Fail to configure fake rx queues: %d", ret); ++ hns3_err(hw, "Fail to configure fake tx queues: %d", ret); + goto cfg_fake_tx_q_fail; + } + +@@ -1763,7 +1765,8 @@ hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size, return -EINVAL; } @@ -24762,7 +48566,20 @@ index f365daadf8..403f811a51 100644 min_vec_bds = HNS3_DEFAULT_RXQ_REARM_THRESH + HNS3_DEFAULT_RX_BURST; if (nb_desc < min_vec_bds || -@@ -1903,7 +1904,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, +@@ -1793,6 +1796,12 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, + return -EINVAL; + } + ++ if (conf->rx_free_thresh >= nb_desc) { ++ hns3_err(hw, "rx_free_thresh (%u) must be less than %u", ++ conf->rx_free_thresh, nb_desc); ++ return -EINVAL; ++ } ++ + if (conf->rx_drop_en == 0) + hns3_warn(hw, "if no descriptors available, packets are always " + "dropped and rx_drop_en (1) is fixed on"); +@@ -1903,7 +1912,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdevice driver. And the * related PF configuration is delivered through the mailbox and finally @@ -24771,7 +48588,25 @@ index f365daadf8..403f811a51 100644 */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == -@@ -2388,14 +2389,14 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) +@@ -2000,7 +2009,7 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_SCTP, + RTE_PTYPE_INNER_L4_ICMP, +- RTE_PTYPE_TUNNEL_VXLAN, ++ RTE_PTYPE_TUNNEL_GRENAT, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_UNKNOWN + }; +@@ -2097,7 +2106,7 @@ hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl) + tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT; + + tbl->ol4table[0] = RTE_PTYPE_UNKNOWN; +- tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN; ++ tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_GRENAT; + tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE; + } + +@@ -2388,14 +2397,14 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) return rte_mbuf_raw_alloc(rxq->mb_pool); } @@ -24790,7 +48625,7 @@ index f365daadf8..403f811a51 100644 if (hns3_timestamp_rx_dynflag > 0) { *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, rte_mbuf_timestamp_t *) = timestamp; -@@ -2469,7 +2470,8 @@ hns3_recv_pkts_simple(void *rx_queue, +@@ -2469,7 +2478,8 @@ hns3_recv_pkts_simple(void *rx_queue, rxe->mbuf = nmb; if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) @@ -24800,7 +48635,7 @@ index f365daadf8..403f811a51 100644 dma_addr = rte_mbuf_data_iova_default(nmb); rxdp->addr = rte_cpu_to_le_64(dma_addr); -@@ -2540,6 +2542,7 @@ hns3_recv_scattered_pkts(void *rx_queue, +@@ -2540,6 +2550,7 @@ hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf *rxm; struct rte_eth_dev *dev; uint32_t bd_base_info; @@ -24808,7 +48643,7 @@ index f365daadf8..403f811a51 100644 uint32_t l234_info; uint32_t gro_size; uint32_t ol_info; -@@ -2649,6 +2652,9 @@ hns3_recv_scattered_pkts(void *rx_queue, +@@ -2649,6 +2660,9 @@ hns3_recv_scattered_pkts(void *rx_queue, rxm = rxe->mbuf; rxe->mbuf = nmb; @@ -24818,7 +48653,7 @@ index f365daadf8..403f811a51 100644 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->rx.bd_base_info = 0; rxdp->addr = dma_addr; -@@ -2671,7 +2677,7 @@ hns3_recv_scattered_pkts(void *rx_queue, +@@ -2671,7 +2685,7 @@ hns3_recv_scattered_pkts(void *rx_queue, } if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) @@ -24827,7 +48662,15 @@ index f365daadf8..403f811a51 100644 /* * The last buffer of the received packet. packet len from -@@ -3043,7 +3049,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, +@@ -2791,6 +2805,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + { hns3_recv_scattered_pkts, "Scalar Scattered" }, + { hns3_recv_pkts_vec, "Vector Neon" }, + { hns3_recv_pkts_vec_sve, "Vector Sve" }, ++ { hns3_dummy_rxtx_burst, "Dummy" }, + }; + + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; +@@ -3043,7 +3058,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdev driver. And the * related PF configuration is delivered through the mailbox and finally @@ -24836,7 +48679,95 @@ index f365daadf8..403f811a51 100644 */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state == -@@ -3208,7 +3214,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, +@@ -3077,51 +3092,40 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, + return 0; + } + +-static int ++static void + hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) + { + uint16_t tx_next_clean = txq->next_to_clean; +- uint16_t tx_next_use = txq->next_to_use; +- struct hns3_entry *tx_entry = &txq->sw_ring[tx_next_clean]; ++ uint16_t tx_next_use = txq->next_to_use; ++ uint16_t tx_bd_ready = txq->tx_bd_ready; ++ uint16_t tx_bd_max = txq->nb_tx_desc; ++ struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean]; + struct hns3_desc *desc = &txq->tx_ring[tx_next_clean]; +- int i; +- +- if (tx_next_use >= tx_next_clean && +- tx_next_use < tx_next_clean + txq->tx_rs_thresh) +- return -1; ++ struct rte_mbuf *mbuf; + +- /* +- * All mbufs can be released only when the VLD bits of all +- * descriptors in a batch are cleared. +- */ +- for (i = 0; i < txq->tx_rs_thresh; i++) { +- if (desc[i].tx.tp_fe_sc_vld_ra_ri & +- rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B))) +- return -1; +- } ++ while ((!(desc->tx.tp_fe_sc_vld_ra_ri & ++ rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) && ++ tx_next_use != tx_next_clean) { ++ mbuf = tx_bak_pkt->mbuf; ++ if (mbuf) { ++ rte_pktmbuf_free_seg(mbuf); ++ tx_bak_pkt->mbuf = NULL; ++ } + +- for (i = 0; i < txq->tx_rs_thresh; i++) { +- rte_pktmbuf_free_seg(tx_entry[i].mbuf); +- tx_entry[i].mbuf = NULL; ++ desc++; ++ tx_bak_pkt++; ++ tx_next_clean++; ++ tx_bd_ready++; ++ ++ if (tx_next_clean >= tx_bd_max) { ++ tx_next_clean = 0; ++ desc = txq->tx_ring; ++ tx_bak_pkt = txq->sw_ring; ++ } + } + +- /* Update numbers of available descriptor due to buffer freed */ +- txq->tx_bd_ready += txq->tx_rs_thresh; +- txq->next_to_clean += txq->tx_rs_thresh; +- if (txq->next_to_clean >= txq->nb_tx_desc) +- txq->next_to_clean = 0; +- +- return 0; +-} +- +-static inline int +-hns3_tx_free_required_buffer(struct hns3_tx_queue *txq, uint16_t required_bds) +-{ +- while (required_bds > txq->tx_bd_ready) { +- if (hns3_tx_free_useless_buffer(txq) != 0) +- return -1; +- } +- return 0; ++ txq->next_to_clean = tx_next_clean; ++ txq->tx_bd_ready = tx_bd_ready; + } + + int +@@ -3131,6 +3135,9 @@ hns3_config_gro(struct hns3_hw *hw, bool en) + struct hns3_cmd_desc desc; + int ret; + ++ if (!hns3_dev_get_support(hw, GRO)) ++ return 0; ++ + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hns3_cfg_gro_status_cmd *)desc.data; + +@@ -3208,7 +3215,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, * in Tx direction based on hns3 network engine. So when the number of * VLANs in the packets represented by rxm plus the number of VLAN * offload by hardware such as PVID etc, exceeds two, the packets will @@ -24845,7 +48776,7 @@ index f365daadf8..403f811a51 100644 * by hardware. When the PF PVID is enabled by calling the API function * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3 * PF kernel ether driver, the outer VLAN tag will always be the PVID. -@@ -3393,7 +3399,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, +@@ -3393,7 +3400,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, /* * The inner l2 length of mbuf is the sum of outer l4 length, * tunneling header length and inner l2 length for a tunnel @@ -24854,7 +48785,7 @@ index f365daadf8..403f811a51 100644 * length is contained in the field of outer L4 length. * Therefore, driver need to calculate the outer L4 length and * inner L2 length. -@@ -3409,7 +3415,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, +@@ -3409,7 +3416,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); /* @@ -24863,7 +48794,7 @@ index f365daadf8..403f811a51 100644 * fill the NVGRE header length to the outer L4 field. */ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, -@@ -3452,7 +3458,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, +@@ -3452,7 +3459,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, * there is a need that switching between them. To avoid multiple * calculations, the length of the L2 header include the outer and @@ -24872,7 +48803,7 @@ index f365daadf8..403f811a51 100644 */ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) { /* -@@ -3632,7 +3638,7 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +@@ -3632,7 +3639,7 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { struct rte_udp_hdr *udp_hdr; /* @@ -24881,7 +48812,7 @@ index f365daadf8..403f811a51 100644 * header for TSO packets */ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) -@@ -3657,7 +3663,7 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +@@ -3657,7 +3664,7 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { struct rte_udp_hdr *udp_hdr; /* @@ -24890,7 +48821,7 @@ index f365daadf8..403f811a51 100644 * header for TSO packets */ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) -@@ -4044,7 +4050,7 @@ static inline void +@@ -4044,7 +4051,7 @@ static inline void hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { #define PER_LOOP_NUM 4 @@ -24899,7 +48830,7 @@ index f365daadf8..403f811a51 100644 uint64_t dma_addr; uint32_t i; -@@ -4055,6 +4061,8 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +@@ -4055,6 +4062,8 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; @@ -24908,7 +48839,7 @@ index f365daadf8..403f811a51 100644 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } } -@@ -4062,7 +4070,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +@@ -4062,7 +4071,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) static inline void hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { @@ -24917,7 +48848,7 @@ index f365daadf8..403f811a51 100644 uint64_t dma_addr; dma_addr = rte_mbuf_data_iova(*pkts); -@@ -4071,6 +4079,8 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) +@@ -4071,6 +4080,8 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; @@ -24926,7 +48857,103 @@ index f365daadf8..403f811a51 100644 txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } -@@ -4312,10 +4322,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) +@@ -4127,14 +4138,16 @@ hns3_xmit_pkts_simple(void *tx_queue, + } + + txq->tx_bd_ready -= nb_pkts; +- if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) { ++ if (txq->next_to_use + nb_pkts >= txq->nb_tx_desc) { + nb_tx = txq->nb_tx_desc - txq->next_to_use; + hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx); + txq->next_to_use = 0; + } + +- hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); +- txq->next_to_use += nb_pkts - nb_tx; ++ if (nb_pkts > nb_tx) { ++ hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); ++ txq->next_to_use += nb_pkts - nb_tx; ++ } + + hns3_write_txq_tail_reg(txq, nb_pkts); + +@@ -4158,8 +4171,7 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + uint16_t nb_tx; + uint16_t i; + +- if (txq->tx_bd_ready < txq->tx_free_thresh) +- (void)hns3_tx_free_useless_buffer(txq); ++ hns3_tx_free_useless_buffer(txq); + + tx_next_use = txq->next_to_use; + tx_bd_max = txq->nb_tx_desc; +@@ -4174,14 +4186,10 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + nb_buf = tx_pkt->nb_segs; + + if (nb_buf > txq->tx_bd_ready) { +- /* Try to release the required MBUF, but avoid releasing +- * all MBUFs, otherwise, the MBUFs will be released for +- * a long time and may cause jitter. +- */ +- if (hns3_tx_free_required_buffer(txq, nb_buf) != 0) { +- txq->dfx_stats.queue_full_cnt++; +- goto end_of_tx; +- } ++ txq->dfx_stats.queue_full_cnt++; ++ if (nb_tx == 0) ++ return 0; ++ goto end_of_tx; + } + + /* +@@ -4287,24 +4295,31 @@ int + hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) + { ++ static const struct { ++ eth_tx_burst_t pkt_burst; ++ const char *info; ++ } burst_infos[] = { ++ { hns3_xmit_pkts_simple, "Scalar Simple" }, ++ { hns3_xmit_pkts, "Scalar" }, ++ { hns3_xmit_pkts_vec, "Vector Neon" }, ++ { hns3_xmit_pkts_vec_sve, "Vector Sve" }, ++ { hns3_dummy_rxtx_burst, "Dummy" }, ++ }; ++ + eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; +- const char *info = NULL; +- +- if (pkt_burst == hns3_xmit_pkts_simple) +- info = "Scalar Simple"; +- else if (pkt_burst == hns3_xmit_pkts) +- info = "Scalar"; +- else if (pkt_burst == hns3_xmit_pkts_vec) +- info = "Vector Neon"; +- else if (pkt_burst == hns3_xmit_pkts_vec_sve) +- info = "Vector Sve"; +- +- if (info == NULL) +- return -EINVAL; ++ int ret = -EINVAL; ++ unsigned int i; + +- snprintf(mode->info, sizeof(mode->info), "%s", info); ++ for (i = 0; i < RTE_DIM(burst_infos); i++) { ++ if (pkt_burst == burst_infos[i].pkt_burst) { ++ snprintf(mode->info, sizeof(mode->info), "%s", ++ burst_infos[i].info); ++ ret = 0; ++ break; ++ } ++ } + +- return 0; ++ return ret; + } + + static bool +@@ -4312,21 +4327,12 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; @@ -24937,7 +48964,77 @@ index f365daadf8..403f811a51 100644 return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)); } -@@ -4408,7 +4414,23 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev) + static bool + hns3_get_tx_prep_needed(struct rte_eth_dev *dev) + { +-#ifdef RTE_LIBRTE_ETHDEV_DEBUG +- RTE_SET_USED(dev); +- /* always perform tx_prepare when debug */ +- return true; +-#else + #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ +@@ -4340,27 +4346,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO) + + uint64_t tx_offload = dev->data->dev_conf.txmode.offloads; ++ + if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK) + return true; + + return false; +-#endif + } + +-eth_tx_burst_t +-hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) ++static eth_tx_prep_t ++hns3_get_tx_prepare(struct rte_eth_dev *dev) ++{ ++ return hns3_get_tx_prep_needed(dev) ? hns3_prep_pkts : NULL; ++} ++ ++static eth_tx_burst_t ++hns3_get_tx_function(struct rte_eth_dev *dev) + { + struct hns3_adapter *hns = dev->data->dev_private; + bool vec_allowed, sve_allowed, simple_allowed; +- bool vec_support, tx_prepare_needed; ++ bool vec_support; + + vec_support = hns3_tx_check_vec_support(dev) == 0; + vec_allowed = vec_support && hns3_get_default_vec_support(); + sve_allowed = vec_support && hns3_get_sve_support(); + simple_allowed = hns3_tx_check_simple_support(dev); +- tx_prepare_needed = hns3_get_tx_prep_needed(dev); +- +- *prep = NULL; + + if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) + return hns3_xmit_pkts_vec; +@@ -4368,19 +4377,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) + return hns3_xmit_pkts_vec_sve; + if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed) + return hns3_xmit_pkts_simple; +- if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) { +- if (tx_prepare_needed) +- *prep = hns3_prep_pkts; ++ if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) + return hns3_xmit_pkts; +- } + + if (vec_allowed) + return hns3_xmit_pkts_vec; + if (simple_allowed) + return hns3_xmit_pkts_simple; + +- if (tx_prepare_needed) +- *prep = hns3_prep_pkts; + return hns3_xmit_pkts; + } + +@@ -4408,11 +4412,26 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev) rx_mode.info, tx_mode.info); } @@ -24962,42 +49059,150 @@ index f365daadf8..403f811a51 100644 { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct hns3_adapter *hns = eth_dev->data->dev_private; -@@ -4429,6 +4451,8 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +- eth_tx_prep_t prep = NULL; + + if (hns->hw.adapter_state == HNS3_NIC_STARTED && + __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { +@@ -4420,15 +4439,17 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) + eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; + eth_dev->tx_pkt_burst = hw->set_link_down ? + hns3_dummy_rxtx_burst : +- hns3_get_tx_function(eth_dev, &prep); +- eth_dev->tx_pkt_prepare = prep; ++ hns3_get_tx_function(eth_dev); ++ eth_dev->tx_pkt_prepare = hns3_get_tx_prepare(eth_dev); + eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status; +- hns3_trace_rxtx_function(eth_dev); + } else { + eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst; eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst; eth_dev->tx_pkt_prepare = NULL; } + ++ hns3_trace_rxtx_function(eth_dev); + hns3_eth_dev_fp_ops_config(eth_dev); } void -@@ -4591,7 +4615,7 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4478,6 +4499,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to start Rx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); + if (ret) { + hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", +@@ -4486,6 +4514,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return ret; + } + ++ if (rxq->sw_ring[0].mbuf != NULL) ++ hns3_rx_queue_release_mbufs(rxq); ++ + ret = hns3_init_rxq(hns, rx_queue_id); + if (ret) { + hns3_err(hw, "fail to init Rx queue %u, ret = %d.", +@@ -4524,6 +4555,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to stop Rx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + hns3_enable_rxq(rxq, false); + + hns3_rx_queue_release_mbufs(rxq); +@@ -4546,6 +4584,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to start Tx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); + if (ret) { + hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", +@@ -4572,6 +4617,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + return -ENOTSUP; + + rte_spinlock_lock(&hw->lock); ++ ++ if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) { ++ hns3_err(hw, "fail to stop Tx queue during resetting."); ++ rte_spinlock_unlock(&hw->lock); ++ return -EIO; ++ } ++ + hns3_enable_txq(txq, false); + hns3_tx_queue_release_mbufs(txq); + /* +@@ -4591,22 +4643,43 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) static int hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt) { - uint16_t round_free_cnt; -+ uint16_t round_cnt; ++ uint16_t next_to_clean = txq->next_to_clean; ++ uint16_t next_to_use = txq->next_to_use; ++ uint16_t tx_bd_ready = txq->tx_bd_ready; ++ struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean]; ++ struct hns3_desc *desc = &txq->tx_ring[next_to_clean]; uint32_t idx; if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) -@@ -4600,13 +4624,13 @@ hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt) - if (txq->tx_rs_thresh == 0) - return 0; + free_cnt = txq->nb_tx_desc; +- if (txq->tx_rs_thresh == 0) +- return 0; +- - round_free_cnt = roundup(free_cnt, txq->tx_rs_thresh); - for (idx = 0; idx < round_free_cnt; idx += txq->tx_rs_thresh) { -+ round_cnt = rounddown(free_cnt, txq->tx_rs_thresh); -+ for (idx = 0; idx < round_cnt; idx += txq->tx_rs_thresh) { - if (hns3_tx_free_useless_buffer(txq) != 0) +- if (hns3_tx_free_useless_buffer(txq) != 0) ++ for (idx = 0; idx < free_cnt; idx++) { ++ if (next_to_clean == next_to_use) break; ++ if (desc->tx.tp_fe_sc_vld_ra_ri & ++ rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B))) ++ break; ++ if (tx_pkt->mbuf != NULL) { ++ rte_pktmbuf_free_seg(tx_pkt->mbuf); ++ tx_pkt->mbuf = NULL; ++ } ++ next_to_clean++; ++ tx_bd_ready++; ++ tx_pkt++; ++ desc++; ++ if (next_to_clean == txq->nb_tx_desc) { ++ tx_pkt = txq->sw_ring; ++ desc = txq->tx_ring; ++ next_to_clean = 0; ++ } ++ } ++ ++ if (idx > 0) { ++ txq->next_to_clean = next_to_clean; ++ txq->tx_bd_ready = tx_bd_ready; } - return RTE_MIN(idx, free_cnt); -+ return idx; ++ return (int)idx; } int -@@ -4729,6 +4753,11 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) +@@ -4729,6 +4802,11 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) { dev->tx_pkt_burst = hns3_dummy_rxtx_burst; dev->tx_pkt_prepare = NULL; @@ -25009,19 +49214,52 @@ index f365daadf8..403f811a51 100644 rte_wmb(); /* Disable tx datapath on secondary process. */ hns3_mp_req_stop_tx(dev); -@@ -4743,5 +4772,10 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) - - dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep); - dev->tx_pkt_prepare = prep; +@@ -4739,9 +4817,40 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) + void + hns3_start_tx_datapath(struct rte_eth_dev *dev) + { +- eth_tx_prep_t prep = NULL; ++ dev->tx_pkt_burst = hns3_get_tx_function(dev); ++ dev->tx_pkt_prepare = hns3_get_tx_prepare(dev); + hns3_eth_dev_fp_ops_config(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return; -+ + +- dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep); +- dev->tx_pkt_prepare = prep; hns3_mp_req_start_tx(dev); } ++ ++void ++hns3_stop_rxtx_datapath(struct rte_eth_dev *dev) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ ++ hns3_set_rxtx_function(dev); ++ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ return; ++ ++ rte_wmb(); ++ /* Disable datapath on secondary process. */ ++ hns3_mp_req_stop_rxtx(dev); ++ /* Prevent crashes when queues are still in use. */ ++ rte_delay_ms(hw->cfg_max_queues); ++} ++ ++void ++hns3_start_rxtx_datapath(struct rte_eth_dev *dev) ++{ ++ hns3_set_rxtx_function(dev); ++ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) ++ return; ++ ++ hns3_mp_req_start_rxtx(dev); ++} diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.h b/dpdk/drivers/net/hns3/hns3_rxtx.h -index 5423568cd0..0e412d07b3 100644 +index 5423568cd0..a5260e8850 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx.h +++ b/dpdk/drivers/net/hns3/hns3_rxtx.h @@ -349,7 +349,7 @@ struct hns3_rx_queue { @@ -25051,11 +49289,28 @@ index 5423568cd0..0e412d07b3 100644 * Note: we don't need add statistic counter because latest BD which * with FE bit will mark HNS3_RXD_L2E_B bit. */ +@@ -729,8 +729,6 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, + const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); + void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); + void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); +-eth_tx_burst_t hns3_get_tx_function(struct rte_eth_dev *dev, +- eth_tx_prep_t *prep); + uint16_t hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused); +@@ -768,5 +766,7 @@ int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); + void hns3_tx_push_init(struct rte_eth_dev *dev); + void hns3_stop_tx_datapath(struct rte_eth_dev *dev); + void hns3_start_tx_datapath(struct rte_eth_dev *dev); ++void hns3_stop_rxtx_datapath(struct rte_eth_dev *dev); ++void hns3_start_rxtx_datapath(struct rte_eth_dev *dev); + + #endif /* _HNS3_RXTX_H_ */ diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec.c b/dpdk/drivers/net/hns3/hns3_rxtx_vec.c -index 455110361a..73f0ab6bc8 100644 +index 455110361a..5c172b1abf 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx_vec.c +++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec.c -@@ -17,15 +17,17 @@ int +@@ -17,15 +17,20 @@ int hns3_tx_check_vec_support(struct rte_eth_dev *dev) { struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; @@ -25063,45 +49318,243 @@ index 455110361a..73f0ab6bc8 100644 - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hns3_dev_get_support(hw, PTP)) - return -ENOTSUP; -+ struct hns3_adapter *hns = dev->data->dev_private; -+ struct hns3_pf *pf = &hns->pf; ++ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; /* Only support RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */ if (txmode->offloads != RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) return -ENOTSUP; -+ /* Vec is not supported when PTP enabled */ -+ if (pf->ptp_enable) ++ /* ++ * PTP function requires the cooperation of Rx and Tx. ++ * Tx vector isn't supported if RTE_ETH_RX_OFFLOAD_TIMESTAMP is set ++ * in Rx offloads. ++ */ ++ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) + return -ENOTSUP; + return 0; } -@@ -232,10 +234,8 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev) +@@ -61,6 +66,11 @@ hns3_rxq_rearm_mbuf(struct hns3_rx_queue *rxq) + + if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, + HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) { ++ /* ++ * Clear VLD bit for the first descriptor rearmed in case ++ * of going to receive packets later. ++ */ ++ rxdp[0].rx.bd_base_info = 0; + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + return; + } +@@ -231,11 +241,8 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev) + struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; uint64_t offloads_mask = RTE_ETH_RX_OFFLOAD_TCP_LRO | - RTE_ETH_RX_OFFLOAD_VLAN; +- RTE_ETH_RX_OFFLOAD_VLAN; - - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hns3_dev_get_support(hw, PTP)) - return -ENOTSUP; -+ struct hns3_adapter *hns = dev->data->dev_private; -+ struct hns3_pf *pf = &hns->pf; ++ RTE_ETH_RX_OFFLOAD_VLAN | ++ RTE_ETH_RX_OFFLOAD_TIMESTAMP; if (dev->data->scattered_rx) return -ENOTSUP; -@@ -249,5 +249,9 @@ hns3_rx_check_vec_support(struct rte_eth_dev *dev) - if (hns3_rxq_iterate(dev, hns3_rxq_vec_check, NULL) != 0) - return -ENOTSUP; +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h b/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h +index 0edd4756f1..2d1ecf93e6 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h ++++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h +@@ -142,8 +142,8 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + /* mask to shuffle from desc to mbuf's rx_descriptor_fields1 */ + uint8x16_t shuf_desc_fields_msk = { + 0xff, 0xff, 0xff, 0xff, /* packet type init zero */ +- 22, 23, 0xff, 0xff, /* rx.pkt_len to rte_mbuf.pkt_len */ +- 20, 21, /* size to rte_mbuf.data_len */ ++ 20, 21, 0xff, 0xff, /* rx.pkt_len to rte_mbuf.pkt_len */ ++ 22, 23, /* size to rte_mbuf.data_len */ + 0xff, 0xff, /* rte_mbuf.vlan_tci init zero */ + 8, 9, 10, 11, /* rx.rss_hash to rte_mbuf.hash.rss */ + }; +@@ -180,19 +180,12 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + bd_vld = vset_lane_u16(rxdp[2].rx.bdtype_vld_udp0, bd_vld, 2); + bd_vld = vset_lane_u16(rxdp[3].rx.bdtype_vld_udp0, bd_vld, 3); -+ /* Vec is not supported when PTP enabled */ -+ if (pf->ptp_enable) -+ return -ENOTSUP; -+ - return 0; - } +- /* load 2 mbuf pointer */ +- mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); +- + bd_vld = vshl_n_u16(bd_vld, + HNS3_UINT16_BIT - 1 - HNS3_RXD_VLD_B); + bd_vld = vreinterpret_u16_s16( + vshr_n_s16(vreinterpret_s16_u16(bd_vld), + HNS3_UINT16_BIT - 1)); + stat = ~vget_lane_u64(vreinterpret_u64_u16(bd_vld), 0); +- +- /* load 2 mbuf pointer again */ +- mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); +- + if (likely(stat == 0)) + bd_valid_num = HNS3_DEFAULT_DESCS_PER_LOOP; + else +@@ -200,20 +193,20 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + if (bd_valid_num == 0) + break; + +- /* use offset to control below data load oper ordering */ +- offset = rxq->offset_table[bd_valid_num]; ++ /* load 4 mbuf pointer */ ++ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); ++ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); + +- /* store 2 mbuf pointer into rx_pkts */ ++ /* store 4 mbuf pointer into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); ++ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); + +- /* read first two descs */ ++ /* use offset to control below data load oper ordering */ ++ offset = rxq->offset_table[bd_valid_num]; ++ ++ /* read 4 descs */ + descs[0] = vld2q_u64((uint64_t *)(rxdp + offset)); + descs[1] = vld2q_u64((uint64_t *)(rxdp + offset + 1)); +- +- /* store 2 mbuf pointer into rx_pkts again */ +- vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); +- +- /* read remains two descs */ + descs[2] = vld2q_u64((uint64_t *)(rxdp + offset + 2)); + descs[3] = vld2q_u64((uint64_t *)(rxdp + offset + 3)); + +@@ -221,56 +214,47 @@ hns3_recv_burst_vec(struct hns3_rx_queue *__restrict rxq, + pkt_mbuf1.val[1] = vreinterpretq_u8_u64(descs[0].val[1]); + pkt_mbuf2.val[0] = vreinterpretq_u8_u64(descs[1].val[0]); + pkt_mbuf2.val[1] = vreinterpretq_u8_u64(descs[1].val[1]); ++ pkt_mbuf3.val[0] = vreinterpretq_u8_u64(descs[2].val[0]); ++ pkt_mbuf3.val[1] = vreinterpretq_u8_u64(descs[2].val[1]); ++ pkt_mbuf4.val[0] = vreinterpretq_u8_u64(descs[3].val[0]); ++ pkt_mbuf4.val[1] = vreinterpretq_u8_u64(descs[3].val[1]); + +- /* pkt 1,2 convert format from desc to pktmbuf */ ++ /* 4 packets convert format from desc to pktmbuf */ + pkt_mb1 = vqtbl2q_u8(pkt_mbuf1, shuf_desc_fields_msk); + pkt_mb2 = vqtbl2q_u8(pkt_mbuf2, shuf_desc_fields_msk); ++ pkt_mb3 = vqtbl2q_u8(pkt_mbuf3, shuf_desc_fields_msk); ++ pkt_mb4 = vqtbl2q_u8(pkt_mbuf4, shuf_desc_fields_msk); + +- /* store the first 8 bytes of pkt 1,2 mbuf's rearm_data */ +- *(uint64_t *)&sw_ring[pos + 0].mbuf->rearm_data = +- rxq->mbuf_initializer; +- *(uint64_t *)&sw_ring[pos + 1].mbuf->rearm_data = +- rxq->mbuf_initializer; +- +- /* pkt 1,2 remove crc */ ++ /* 4 packets remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); + pkt_mb1 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); + pkt_mb2 = vreinterpretq_u8_u16(tmp); ++ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); ++ pkt_mb3 = vreinterpretq_u8_u16(tmp); ++ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); ++ pkt_mb4 = vreinterpretq_u8_u16(tmp); + +- pkt_mbuf3.val[0] = vreinterpretq_u8_u64(descs[2].val[0]); +- pkt_mbuf3.val[1] = vreinterpretq_u8_u64(descs[2].val[1]); +- pkt_mbuf4.val[0] = vreinterpretq_u8_u64(descs[3].val[0]); +- pkt_mbuf4.val[1] = vreinterpretq_u8_u64(descs[3].val[1]); +- +- /* pkt 3,4 convert format from desc to pktmbuf */ +- pkt_mb3 = vqtbl2q_u8(pkt_mbuf3, shuf_desc_fields_msk); +- pkt_mb4 = vqtbl2q_u8(pkt_mbuf4, shuf_desc_fields_msk); +- +- /* pkt 1,2 save to rx_pkts mbuf */ ++ /* save packet info to rx_pkts mbuf */ + vst1q_u8((void *)&sw_ring[pos + 0].mbuf->rx_descriptor_fields1, + pkt_mb1); + vst1q_u8((void *)&sw_ring[pos + 1].mbuf->rx_descriptor_fields1, + pkt_mb2); ++ vst1q_u8((void *)&sw_ring[pos + 2].mbuf->rx_descriptor_fields1, ++ pkt_mb3); ++ vst1q_u8((void *)&sw_ring[pos + 3].mbuf->rx_descriptor_fields1, ++ pkt_mb4); + +- /* pkt 3,4 remove crc */ +- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); +- pkt_mb3 = vreinterpretq_u8_u16(tmp); +- tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); +- pkt_mb4 = vreinterpretq_u8_u16(tmp); +- +- /* store the first 8 bytes of pkt 3,4 mbuf's rearm_data */ ++ /* store the first 8 bytes of packets mbuf's rearm_data */ ++ *(uint64_t *)&sw_ring[pos + 0].mbuf->rearm_data = ++ rxq->mbuf_initializer; ++ *(uint64_t *)&sw_ring[pos + 1].mbuf->rearm_data = ++ rxq->mbuf_initializer; + *(uint64_t *)&sw_ring[pos + 2].mbuf->rearm_data = + rxq->mbuf_initializer; + *(uint64_t *)&sw_ring[pos + 3].mbuf->rearm_data = + rxq->mbuf_initializer; + +- /* pkt 3,4 save to rx_pkts mbuf */ +- vst1q_u8((void *)&sw_ring[pos + 2].mbuf->rx_descriptor_fields1, +- pkt_mb3); +- vst1q_u8((void *)&sw_ring[pos + 3].mbuf->rx_descriptor_fields1, +- pkt_mb4); +- + rte_prefetch_non_temporal(rxdp + HNS3_DEFAULT_DESCS_PER_LOOP); + + parse_retcode = hns3_desc_parse_field(rxq, &sw_ring[pos], +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c b/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c +index be1fdbcdf0..abab91c4ab 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c ++++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c +@@ -248,6 +248,11 @@ hns3_rxq_rearm_mbuf_sve(struct hns3_rx_queue *rxq) + + if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, + HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) { ++ /* ++ * Clear VLD bit for the first descriptor rearmed in case ++ * of going to receive packets later. ++ */ ++ rxdp[0].rx.bd_base_info = 0; + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + return; + } +@@ -435,9 +440,8 @@ hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq, + offsets, svdup_n_u64(valid_bit)); + + /* Increment bytes counter */ +- uint32_t idx; +- for (idx = 0; idx < svcntd(); idx++) +- txq->basic_stats.bytes += pkts[idx]->pkt_len; ++ txq->basic_stats.bytes += ++ (svaddv_u64(pg, data_len) >> HNS3_UINT16_BIT); + + /* update index for next loop */ + i += svcntd(); +@@ -465,14 +469,16 @@ hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue, + return 0; + } + +- if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) { ++ if (txq->next_to_use + nb_pkts >= txq->nb_tx_desc) { + nb_tx = txq->nb_tx_desc - txq->next_to_use; + hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx); + txq->next_to_use = 0; + } + +- hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); +- txq->next_to_use += nb_pkts - nb_tx; ++ if (nb_pkts > nb_tx) { ++ hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); ++ txq->next_to_use += nb_pkts - nb_tx; ++ } + + txq->tx_bd_ready -= nb_pkts; + hns3_write_txq_tail_reg(txq, nb_pkts); diff --git a/dpdk/drivers/net/hns3/hns3_stats.c b/dpdk/drivers/net/hns3/hns3_stats.c -index 0fe853d626..1b0464f3f7 100644 +index 0fe853d626..79e4063b00 100644 --- a/dpdk/drivers/net/hns3/hns3_stats.c +++ b/dpdk/drivers/net/hns3/hns3_stats.c @@ -307,24 +307,21 @@ static const struct hns3_xstats_name_offset hns3_imissed_stats_strings[] = { @@ -25129,7 +49582,7 @@ index 0fe853d626..1b0464f3f7 100644 uint64_t *desc_data; - uint16_t i, k, n; + uint32_t desc_num; -+ uint16_t i; ++ uint32_t i; int ret; + /* The first desc has a 64-bit header, so need to consider it. */ @@ -25137,7 +49590,7 @@ index 0fe853d626..1b0464f3f7 100644 desc = rte_malloc("hns3_mac_desc", desc_num * sizeof(struct hns3_cmd_desc), 0); if (desc == NULL) { -@@ -340,65 +337,71 @@ hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num) +@@ -340,83 +337,73 @@ hns3_update_mac_stats(struct hns3_hw *hw, const uint32_t desc_num) return ret; } @@ -25230,32 +49683,20 @@ index 0fe853d626..1b0464f3f7 100644 - *desc_num = 1 + ((reg_num - 3) >> 2) + - (uint32_t)(((reg_num - 3) & 0x3) ? 1 : 0); + *reg_num += HNS3_MAC_STATS_RSV_REG_NUM_ON_HIP08_B; -+ -+ return 0; -+} -+ -+int -+hns3_query_mac_stats_reg_num(struct hns3_hw *hw) -+{ -+ uint32_t mac_stats_reg_num = 0; -+ int ret; -+ -+ ret = hns3_mac_query_reg_num(hw, &mac_stats_reg_num); -+ if (ret) -+ return ret; -+ -+ hw->mac_stats_reg_num = mac_stats_reg_num; -+ if (hw->mac_stats_reg_num > sizeof(hw->mac_stats) / sizeof(uint64_t)) -+ hns3_warn(hw, "MAC stats reg number from firmware is greater than stats iterms in driver."); return 0; } -@@ -408,15 +411,8 @@ hns3_query_update_mac_stats(struct rte_eth_dev *dev) + +-static int +-hns3_query_update_mac_stats(struct rte_eth_dev *dev) ++int ++hns3_query_mac_stats_reg_num(struct hns3_hw *hw) { - struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; - uint32_t desc_num; -- int ret; ++ uint32_t mac_stats_reg_num = 0; + int ret; - ret = hns3_mac_query_reg_num(dev, &desc_num); - if (ret == 0) @@ -25263,11 +49704,19 @@ index 0fe853d626..1b0464f3f7 100644 - else - hns3_err(hw, "Query mac reg num fail : %d", ret); - return ret; -+ return hns3_update_mac_stats(hw); ++ ret = hns3_mac_query_reg_num(hw, &mac_stats_reg_num); ++ if (ret) ++ return ret; ++ ++ hw->mac_stats_reg_num = mac_stats_reg_num; ++ if (hw->mac_stats_reg_num > sizeof(hw->mac_stats) / sizeof(uint64_t)) ++ hns3_warn(hw, "MAC stats reg number from firmware is greater than stats iterms in driver."); ++ ++ return 0; } static int -@@ -544,7 +540,7 @@ hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw) +@@ -544,7 +531,7 @@ hns3_update_port_tx_ssu_drop_stats(struct hns3_hw *hw) return 0; } @@ -25276,7 +49725,7 @@ index 0fe853d626..1b0464f3f7 100644 hns3_update_imissed_stats(struct hns3_hw *hw, bool is_clear) { struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); -@@ -588,6 +584,28 @@ hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear) +@@ -588,6 +575,28 @@ hns3_update_oerror_stats(struct hns3_hw *hw, bool is_clear) return 0; } @@ -25305,7 +49754,7 @@ index 0fe853d626..1b0464f3f7 100644 /* * Query tqp tx queue statistics ,opcode id: 0x0B03. * Query tqp rx queue statistics ,opcode id: 0x0B13. -@@ -608,16 +626,15 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) +@@ -608,16 +617,15 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) struct hns3_tqp_stats *stats = &hw->tqp_stats; struct hns3_rx_queue *rxq; struct hns3_tx_queue *txq; @@ -25325,7 +49774,7 @@ index 0fe853d626..1b0464f3f7 100644 } rte_stats->imissed = imissed_stats->rpu_rx_drop_cnt + imissed_stats->ssu_rx_drop_cnt; -@@ -628,15 +645,9 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) +@@ -628,15 +636,9 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) if (rxq == NULL) continue; @@ -25342,7 +49791,7 @@ index 0fe853d626..1b0464f3f7 100644 rte_stats->ibytes += rxq->basic_stats.bytes; } -@@ -646,17 +657,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) +@@ -646,17 +648,14 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) if (txq == NULL) continue; @@ -25363,7 +49812,7 @@ index 0fe853d626..1b0464f3f7 100644 } rte_stats->oerrors = hw->oerror_stats; -@@ -672,7 +680,10 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) +@@ -672,7 +671,10 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats) rte_stats->oerrors; rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed; @@ -25375,7 +49824,7 @@ index 0fe853d626..1b0464f3f7 100644 } int -@@ -685,6 +696,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) +@@ -685,6 +687,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) uint16_t i; int ret; @@ -25383,7 +49832,7 @@ index 0fe853d626..1b0464f3f7 100644 /* * Note: Reading hardware statistics of imissed registers will * clear them. -@@ -692,7 +704,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) +@@ -692,7 +695,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) ret = hns3_update_imissed_stats(hw, true); if (ret) { hns3_err(hw, "clear imissed stats failed, ret = %d", ret); @@ -25392,7 +49841,7 @@ index 0fe853d626..1b0464f3f7 100644 } /* -@@ -701,9 +713,8 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) +@@ -701,9 +704,8 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) */ ret = hns3_update_oerror_stats(hw, true); if (ret) { @@ -25404,7 +49853,7 @@ index 0fe853d626..1b0464f3f7 100644 } for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { -@@ -745,7 +756,10 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) +@@ -745,18 +747,20 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev) hns3_tqp_stats_clear(hw); @@ -25416,7 +49865,30 @@ index 0fe853d626..1b0464f3f7 100644 } static int -@@ -912,7 +926,6 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +-hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev) ++hns3_mac_stats_reset(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = dev->data->dev_private; +- struct hns3_hw *hw = &hns->hw; + struct hns3_mac_stats *mac_stats = &hw->mac_stats; + int ret; + +- ret = hns3_query_update_mac_stats(dev); ++ /* Clear hardware MAC statistics by reading it. */ ++ ret = hns3_update_mac_stats(hw); + if (ret) { + hns3_err(hw, "Clear Mac stats fail : %d", ret); + return ret; +@@ -767,7 +771,7 @@ hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev) + return 0; + } + +-static int ++static uint16_t + hns3_get_imissed_stats_num(struct hns3_adapter *hns) + { + #define NO_IMISSED_STATS_NUM 0 +@@ -912,7 +916,6 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, struct hns3_rx_basic_stats *rxq_stats; struct hns3_rx_queue *rxq; uint16_t i, j; @@ -25424,7 +49896,7 @@ index 0fe853d626..1b0464f3f7 100644 char *val; for (i = 0; i < dev->data->nb_rx_queues; i++) { -@@ -920,16 +933,10 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -920,16 +923,10 @@ hns3_rxq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, if (rxq == NULL) continue; @@ -25442,7 +49914,7 @@ index 0fe853d626..1b0464f3f7 100644 /* * If HW statistics are reset by stats_reset, but a lot of -@@ -959,7 +966,6 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -959,7 +956,6 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, struct hns3_tx_basic_stats *txq_stats; struct hns3_tx_queue *txq; uint16_t i, j; @@ -25450,7 +49922,7 @@ index 0fe853d626..1b0464f3f7 100644 char *val; for (i = 0; i < dev->data->nb_tx_queues; i++) { -@@ -967,9 +973,7 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -967,9 +963,7 @@ hns3_txq_basic_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, if (txq == NULL) continue; @@ -25461,7 +49933,16 @@ index 0fe853d626..1b0464f3f7 100644 txq_stats = &txq->basic_stats; txq_stats->packets = stats->rcb_tx_ring_pktnum[i]; -@@ -1024,9 +1028,13 @@ hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -999,7 +993,7 @@ hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rx_missed_stats *imissed_stats = &hw->imissed_stats; +- int imissed_stats_num; ++ uint16_t imissed_stats_num; + int cnt = *count; + char *addr; + uint16_t i; +@@ -1024,9 +1018,13 @@ hns3_imissed_stats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, * @praram xstats * A pointer to a table of structure of type *rte_eth_xstat* * to be filled with device statistics ids and values. @@ -25476,7 +49957,7 @@ index 0fe853d626..1b0464f3f7 100644 * @return * 0 on fail, count(The size of the statistics elements) on success. */ -@@ -1045,15 +1053,13 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -1045,22 +1043,20 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, int count; int ret; @@ -25493,15 +49974,16 @@ index 0fe853d626..1b0464f3f7 100644 hns3_tqp_basic_stats_get(dev, xstats, &count); if (!hns->is_vf) { -@@ -1061,6 +1067,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, - ret = hns3_query_update_mac_stats(dev); +- /* Update Mac stats */ +- ret = hns3_query_update_mac_stats(dev); ++ ret = hns3_update_mac_stats(hw); if (ret < 0) { hns3_err(hw, "Update Mac stats fail : %d", ret); + rte_spinlock_unlock(&hw->stats_lock); return ret; } -@@ -1075,8 +1082,8 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -1075,8 +1071,8 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, ret = hns3_update_imissed_stats(hw, false); if (ret) { @@ -25512,7 +49994,7 @@ index 0fe853d626..1b0464f3f7 100644 return ret; } -@@ -1107,6 +1114,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -1107,6 +1103,7 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, hns3_tqp_dfx_stats_get(dev, xstats, &count); hns3_queue_stats_get(dev, xstats, &count); @@ -25520,7 +50002,16 @@ index 0fe853d626..1b0464f3f7 100644 return count; } -@@ -1289,7 +1297,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, +@@ -1173,7 +1170,7 @@ hns3_imissed_stats_name_get(struct rte_eth_dev *dev, + { + struct hns3_adapter *hns = dev->data->dev_private; + uint32_t cnt = *count; +- int imissed_stats_num; ++ uint16_t imissed_stats_num; + uint16_t i; + + imissed_stats_num = hns3_get_imissed_stats_num(hns); +@@ -1289,7 +1286,7 @@ hns3_dev_xstats_get_names(struct rte_eth_dev *dev, * A pointer to an ids array passed by application. This tells which * statistics values function should retrieve. This parameter * can be set to NULL if size is 0. In this case function will retrieve @@ -25529,7 +50020,7 @@ index 0fe853d626..1b0464f3f7 100644 * @param values * A pointer to a table to be filled with device statistics values. * @param size -@@ -1457,6 +1465,7 @@ int +@@ -1457,6 +1454,7 @@ int hns3_dev_xstats_reset(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; @@ -25537,7 +50028,7 @@ index 0fe853d626..1b0464f3f7 100644 int ret; /* Clear tqp stats */ -@@ -1464,23 +1473,25 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev) +@@ -1464,23 +1462,24 @@ hns3_dev_xstats_reset(struct rte_eth_dev *dev) if (ret) return ret; @@ -25551,10 +50042,11 @@ index 0fe853d626..1b0464f3f7 100644 - return 0; + goto out; - /* HW registers are cleared on read */ - ret = hns3_mac_stats_reset(dev); +- /* HW registers are cleared on read */ +- ret = hns3_mac_stats_reset(dev); - if (ret) - return ret; ++ ret = hns3_mac_stats_reset(hw); - return 0; +out: @@ -25568,7 +50060,7 @@ index 0fe853d626..1b0464f3f7 100644 hns3_tqp_stats_init(struct hns3_hw *hw) { struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats; -@@ -1504,7 +1515,7 @@ hns3_tqp_stats_init(struct hns3_hw *hw) +@@ -1504,7 +1503,7 @@ hns3_tqp_stats_init(struct hns3_hw *hw) return 0; } @@ -25577,7 +50069,7 @@ index 0fe853d626..1b0464f3f7 100644 hns3_tqp_stats_uninit(struct hns3_hw *hw) { struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats; -@@ -1525,3 +1536,64 @@ hns3_tqp_stats_clear(struct hns3_hw *hw) +@@ -1525,3 +1524,73 @@ hns3_tqp_stats_clear(struct hns3_hw *hw) memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num); memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num); } @@ -25585,6 +50077,7 @@ index 0fe853d626..1b0464f3f7 100644 +int +hns3_stats_init(struct hns3_hw *hw) +{ ++ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + int ret; + + rte_spinlock_init(&hw->stats_lock); @@ -25595,6 +50088,14 @@ index 0fe853d626..1b0464f3f7 100644 + return ret; + } + ++ if (!hns->is_vf) { ++ ret = hns3_mac_stats_reset(hw); ++ if (ret) { ++ hns3_err(hw, "reset mac stats failed, ret = %d", ret); ++ return ret; ++ } ++ } ++ + return hns3_tqp_stats_init(hw); +} + @@ -25697,11 +50198,252 @@ index d1230f94cb..b5cd6188b4 100644 +void hns3_update_hw_stats(struct hns3_hw *hw); #endif /* _HNS3_STATS_H_ */ +diff --git a/dpdk/drivers/net/hns3/hns3_tm.c b/dpdk/drivers/net/hns3/hns3_tm.c +index e1089b6bd0..d969164014 100644 +--- a/dpdk/drivers/net/hns3/hns3_tm.c ++++ b/dpdk/drivers/net/hns3/hns3_tm.c +@@ -739,7 +739,7 @@ hns3_tm_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, + } + + static void +-hns3_tm_nonleaf_level_capsbilities_get(struct rte_eth_dev *dev, ++hns3_tm_nonleaf_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap) + { +@@ -818,7 +818,7 @@ hns3_tm_level_capabilities_get(struct rte_eth_dev *dev, + memset(cap, 0, sizeof(struct rte_tm_level_capabilities)); + + if (level_id != HNS3_TM_NODE_LEVEL_QUEUE) +- hns3_tm_nonleaf_level_capsbilities_get(dev, level_id, cap); ++ hns3_tm_nonleaf_level_capabilities_get(dev, level_id, cap); + else + hns3_tm_leaf_level_capabilities_get(dev, cap); + +@@ -1081,21 +1081,6 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev, + return -EINVAL; + } + +-static int +-hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, +- int clear_on_fail, +- struct rte_tm_error *error) +-{ +- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- int ret; +- +- rte_spinlock_lock(&hw->lock); +- ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); +- rte_spinlock_unlock(&hw->lock); +- +- return ret; +-} +- + static int + hns3_tm_node_shaper_do_update(struct hns3_hw *hw, + uint32_t node_id, +@@ -1195,6 +1180,148 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev, + return 0; + } + ++static int ++hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev, ++ struct rte_tm_capabilities *cap, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_capabilities_get(dev, cap, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev, ++ uint32_t shaper_profile_id, ++ struct rte_tm_shaper_params *profile, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_shaper_profile_add(dev, shaper_profile_id, profile, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_shaper_profile_del_wrap(struct rte_eth_dev *dev, ++ uint32_t shaper_profile_id, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_shaper_profile_del(dev, shaper_profile_id, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_add_wrap(struct rte_eth_dev *dev, uint32_t node_id, ++ uint32_t parent_node_id, uint32_t priority, ++ uint32_t weight, uint32_t level_id, ++ struct rte_tm_node_params *params, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_add(dev, node_id, parent_node_id, priority, ++ weight, level_id, params, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_delete_wrap(struct rte_eth_dev *dev, ++ uint32_t node_id, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_delete(dev, node_id, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_type_get_wrap(struct rte_eth_dev *dev, ++ uint32_t node_id, ++ int *is_leaf, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_type_get(dev, node_id, is_leaf, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_level_capabilities_get_wrap(struct rte_eth_dev *dev, ++ uint32_t level_id, ++ struct rte_tm_level_capabilities *cap, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_level_capabilities_get(dev, level_id, cap, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_node_capabilities_get_wrap(struct rte_eth_dev *dev, ++ uint32_t node_id, ++ struct rte_tm_node_capabilities *cap, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_node_capabilities_get(dev, node_id, cap, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ ++static int ++hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev, ++ int clear_on_fail, ++ struct rte_tm_error *error) ++{ ++ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int ret; ++ ++ rte_spinlock_lock(&hw->lock); ++ ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error); ++ rte_spinlock_unlock(&hw->lock); ++ ++ return ret; ++} ++ + static int + hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, + uint32_t node_id, +@@ -1213,14 +1340,14 @@ hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev, + } + + static const struct rte_tm_ops hns3_tm_ops = { +- .capabilities_get = hns3_tm_capabilities_get, +- .shaper_profile_add = hns3_tm_shaper_profile_add, +- .shaper_profile_delete = hns3_tm_shaper_profile_del, +- .node_add = hns3_tm_node_add, +- .node_delete = hns3_tm_node_delete, +- .node_type_get = hns3_tm_node_type_get, +- .level_capabilities_get = hns3_tm_level_capabilities_get, +- .node_capabilities_get = hns3_tm_node_capabilities_get, ++ .capabilities_get = hns3_tm_capabilities_get_wrap, ++ .shaper_profile_add = hns3_tm_shaper_profile_add_wrap, ++ .shaper_profile_delete = hns3_tm_shaper_profile_del_wrap, ++ .node_add = hns3_tm_node_add_wrap, ++ .node_delete = hns3_tm_node_delete_wrap, ++ .node_type_get = hns3_tm_node_type_get_wrap, ++ .level_capabilities_get = hns3_tm_level_capabilities_get_wrap, ++ .node_capabilities_get = hns3_tm_node_capabilities_get_wrap, + .hierarchy_commit = hns3_tm_hierarchy_commit_wrap, + .node_shaper_update = hns3_tm_node_shaper_update_wrap, + }; diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c -index c0bfff43ee..1d417dbf8a 100644 +index c0bfff43ee..d829467f41 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev.c +++ b/dpdk/drivers/net/i40e/i40e_ethdev.c -@@ -2483,7 +2483,7 @@ i40e_dev_start(struct rte_eth_dev *dev) +@@ -2447,10 +2447,21 @@ i40e_dev_start(struct rte_eth_dev *dev) + } + } + ++ /* Disable mac loopback mode */ ++ if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE) { ++ ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MODE_NONE, NULL); ++ if (ret != I40E_SUCCESS) { ++ PMD_DRV_LOG(ERR, "fail to set loopback link"); ++ goto tx_err; ++ } ++ } ++ + /* Enable mac loopback mode */ +- if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE || +- dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) { +- ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL); ++ if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_EN) { ++ if (hw->mac.type == I40E_MAC_X722) ++ ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC_LOCAL_X722, NULL); ++ else ++ ret = i40e_aq_set_lb_modes(hw, I40E_AQ_LB_MAC, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "fail to set loopback link"); + goto tx_err; +@@ -2483,7 +2494,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (ret != I40E_SUCCESS) PMD_DRV_LOG(WARNING, "Fail to set phy mask"); @@ -25710,7 +50452,7 @@ index c0bfff43ee..1d417dbf8a 100644 i40e_dev_link_update(dev, 0); } -@@ -3555,7 +3555,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, +@@ -3555,7 +3566,7 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, count++; } @@ -25719,7 +50461,7 @@ index c0bfff43ee..1d417dbf8a 100644 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { strlcpy(xstats_names[count].name, rte_i40e_hw_port_strings[i].name, -@@ -3613,7 +3613,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -3613,7 +3624,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, count++; } @@ -25728,7 +50470,7 @@ index c0bfff43ee..1d417dbf8a 100644 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_i40e_hw_port_strings[i].offset); -@@ -5544,7 +5544,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) +@@ -5544,7 +5555,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) &ets_sla_config, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, @@ -25737,7 +50479,31 @@ index c0bfff43ee..1d417dbf8a 100644 hw->aq.asq_last_status); return ret; } -@@ -6822,7 +6822,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) +@@ -5975,14 +5986,16 @@ i40e_vsi_setup(struct i40e_pf *pf, + } + } + +- /* MAC/VLAN configuration */ +- rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); +- filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; ++ if (vsi->type != I40E_VSI_FDIR) { ++ /* MAC/VLAN configuration for non-FDIR VSI*/ ++ rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); ++ filter.filter_type = I40E_MACVLAN_PERFECT_MATCH; + +- ret = i40e_vsi_add_mac(vsi, &filter); +- if (ret != I40E_SUCCESS) { +- PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); +- goto fail_msix_alloc; ++ ret = i40e_vsi_add_mac(vsi, &filter); ++ if (ret != I40E_SUCCESS) { ++ PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter"); ++ goto fail_msix_alloc; ++ } + } + + /* Get VSI BW information */ +@@ -6822,7 +6835,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) * @param handle * Pointer to interrupt handle. * @param param @@ -25746,7 +50512,7 @@ index c0bfff43ee..1d417dbf8a 100644 * * @return * void -@@ -9719,7 +9719,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, +@@ -9719,7 +9732,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input, return 0; } @@ -25756,10 +50522,20 @@ index c0bfff43ee..1d417dbf8a 100644 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule, const struct i40e_ethertype_filter_input *input) diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.h b/dpdk/drivers/net/i40e/i40e_ethdev.h -index 2d182f8000..a1ebdc093c 100644 +index 2d182f8000..c4abbae6c3 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev.h +++ b/dpdk/drivers/net/i40e/i40e_ethdev.h -@@ -897,7 +897,7 @@ struct i40e_tunnel_filter { +@@ -48,6 +48,9 @@ + #define I40E_MAX_VF 128 + /*flag of no loopback*/ + #define I40E_AQ_LB_MODE_NONE 0x0 ++#define I40E_AQ_LB_MODE_EN 0x01 ++#define I40E_AQ_LB_MAC 0x01 ++#define I40E_AQ_LB_MAC_LOCAL_X722 0x04 + /* + * vlan_id is a 12 bit number. + * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. +@@ -897,7 +900,7 @@ struct i40e_tunnel_filter { TAILQ_ENTRY(i40e_tunnel_filter) rules; struct i40e_tunnel_filter_input input; uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */ @@ -25768,7 +50544,7 @@ index 2d182f8000..a1ebdc093c 100644 uint16_t queue; /* Queue assigned to when match */ }; -@@ -966,7 +966,7 @@ struct i40e_tunnel_filter_conf { +@@ -966,7 +969,7 @@ struct i40e_tunnel_filter_conf { uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */ uint16_t queue_id; /**< Queue assigned to if match. */ uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */ @@ -25777,7 +50553,7 @@ index 2d182f8000..a1ebdc093c 100644 }; TAILQ_HEAD(i40e_flow_list, rte_flow); -@@ -1100,7 +1100,7 @@ struct i40e_vf_msg_cfg { +@@ -1100,7 +1103,7 @@ struct i40e_vf_msg_cfg { /* * If message statistics from a VF exceed the maximal limitation, * the PF will ignore any new message from that VF for @@ -25786,7 +50562,7 @@ index 2d182f8000..a1ebdc093c 100644 */ uint32_t ignore_second; }; -@@ -1257,7 +1257,7 @@ struct i40e_adapter { +@@ -1257,7 +1260,7 @@ struct i40e_adapter { }; /** @@ -25795,7 +50571,7 @@ index 2d182f8000..a1ebdc093c 100644 */ struct i40e_vf_representor { uint16_t switch_domain_id; -@@ -1265,7 +1265,7 @@ struct i40e_vf_representor { +@@ -1265,7 +1268,7 @@ struct i40e_vf_representor { uint16_t vf_id; /**< Virtual Function ID */ struct i40e_adapter *adapter; @@ -25804,6 +50580,15 @@ index 2d182f8000..a1ebdc093c 100644 struct i40e_eth_stats stats_offset; /**< Zero-point of VF statistics*/ }; +@@ -1494,7 +1497,7 @@ i40e_calc_itr_interval(bool is_pf, bool is_multi_drv) + uint16_t interval = 0; + + if (is_multi_drv) { +- interval = I40E_QUEUE_ITR_INTERVAL_MAX; ++ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + } else { + if (is_pf) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; diff --git a/dpdk/drivers/net/i40e/i40e_fdir.c b/dpdk/drivers/net/i40e/i40e_fdir.c index df2a5aaecc..8caedea14e 100644 --- a/dpdk/drivers/net/i40e/i40e_fdir.c @@ -25854,10 +50639,25 @@ index df2a5aaecc..8caedea14e 100644 } diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c -index c9676caab5..4f3808cb5f 100644 +index c9676caab5..b4cdefafa5 100644 --- a/dpdk/drivers/net/i40e/i40e_flow.c +++ b/dpdk/drivers/net/i40e/i40e_flow.c -@@ -3043,7 +3043,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, +@@ -1991,6 +1991,14 @@ i40e_flow_parse_attr(const struct rte_flow_attr *attr, + return -rte_errno; + } + ++ /* Not supported */ ++ if (attr->transfer) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, ++ attr, "Not support transfer."); ++ return -rte_errno; ++ } ++ + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, +@@ -3043,7 +3051,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -25866,7 +50666,7 @@ index c9676caab5..4f3808cb5f 100644 return -rte_errno; } -@@ -3142,8 +3142,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, +@@ -3142,8 +3150,11 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, /* Check if the input set is valid */ if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR, input_set) != 0) { @@ -25880,6 +50680,34 @@ index c9676caab5..4f3808cb5f 100644 } filter->input.flow_ext.input_set = input_set; +diff --git a/dpdk/drivers/net/i40e/i40e_hash.c b/dpdk/drivers/net/i40e/i40e_hash.c +index 8962e9d97a..0c84818977 100644 +--- a/dpdk/drivers/net/i40e/i40e_hash.c ++++ b/dpdk/drivers/net/i40e/i40e_hash.c +@@ -384,8 +384,10 @@ i40e_hash_get_pattern_type(const struct rte_flow_item pattern[], + } + + prev_item_type = last_item_type; +- assert(last_item_type < (enum rte_flow_item_type) +- RTE_DIM(pattern_item_header)); ++ if (last_item_type >= (enum rte_flow_item_type) ++ RTE_DIM(pattern_item_header)) ++ goto not_sup; ++ + item_hdr = pattern_item_header[last_item_type]; + assert(item_hdr); + +@@ -657,10 +659,6 @@ i40e_hash_config_pctype_symmetric(struct i40e_hw *hw, + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; + uint32_t reg; + +- /* For X722, get translated pctype in fd pctype register */ +- if (hw->mac.type == I40E_MAC_X722) +- pctype = i40e_read_rx_ctl(hw, I40E_GLQF_FD_PCTYPES(pctype)); +- + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype)); + if (symmetric) { + if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) diff --git a/dpdk/drivers/net/i40e/i40e_pf.c b/dpdk/drivers/net/i40e/i40e_pf.c index ccb3924a5f..15d9ff868f 100644 --- a/dpdk/drivers/net/i40e/i40e_pf.c @@ -25920,10 +50748,22 @@ index ccb3924a5f..15d9ff868f 100644 goto send_msg; } diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c -index e4cb33dc3c..9a00a9b71e 100644 +index e4cb33dc3c..8a277dfe31 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx.c -@@ -609,7 +609,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) +@@ -304,10 +304,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, + union i40e_tx_offload tx_offload) + { + /* Set MACLEN */ +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) +- *td_offset |= (tx_offload.outer_l2_len >> 1) +- << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; +- else ++ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) + *td_offset |= (tx_offload.l2_len >> 1) + << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + +@@ -609,7 +606,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq) rxdp[i].read.pkt_addr = dma_addr; } @@ -25932,7 +50772,7 @@ index e4cb33dc3c..9a00a9b71e 100644 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); rxq->rx_free_trigger = -@@ -995,7 +995,7 @@ i40e_recv_scattered_pkts(void *rx_queue, +@@ -995,7 +992,7 @@ i40e_recv_scattered_pkts(void *rx_queue, * threshold of the queue, advance the Receive Descriptor Tail (RDT) * register. Update the RDT with the value of the last processed RX * descriptor minus 1, to guarantee that the RDT register is never @@ -25941,6 +50781,20 @@ index e4cb33dc3c..9a00a9b71e 100644 * from the hardware point of view. */ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); +@@ -1171,9 +1168,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + + /* Fill in tunneling parameters if necessary */ + cd_tunneling_params = 0; +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ++ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { ++ td_offset |= (tx_offload.outer_l2_len >> 1) ++ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + i40e_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); ++ } + /* Enable checksum offloading */ + if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) + i40e_txd_enable_checksum(ol_flags, &td_cmd, @@ -1467,7 +1467,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq, i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); @@ -25977,7 +50831,20 @@ index e4cb33dc3c..9a00a9b71e 100644 tx_queue_id); /* -@@ -1930,7 +1930,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, +@@ -1917,6 +1917,12 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); ++ ++ if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { ++ PMD_DRV_LOG(ERR, "Failed vector rx setup."); ++ return -EINVAL; ++ } ++ + return 0; + } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) { + PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor" +@@ -1930,7 +1936,7 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "Can't use default burst."); return -EINVAL; } @@ -25986,7 +50853,7 @@ index e4cb33dc3c..9a00a9b71e 100644 if (!dev->data->scattered_rx && use_scattered_rx) { PMD_DRV_LOG(ERR, "Scattered rx is required."); return -EINVAL; -@@ -2014,7 +2014,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, +@@ -2014,7 +2020,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_deferred_start = rx_conf->rx_deferred_start; rxq->offloads = offloads; @@ -25995,7 +50862,7 @@ index e4cb33dc3c..9a00a9b71e 100644 len = I40E_MAX_RING_DESC; /** -@@ -2322,7 +2322,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -2322,7 +2328,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, */ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); @@ -26004,7 +50871,16 @@ index e4cb33dc3c..9a00a9b71e 100644 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; if (tx_conf->tx_rs_thresh > 0) -@@ -2991,7 +2991,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) +@@ -2904,6 +2910,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ++ I40E_RX_MAX_DATA_BUF_SIZE); + rxq->hs_mode = i40e_header_split_none; + break; + } +@@ -2991,7 +2999,7 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) if (rxq->max_pkt_len > buf_size) dev_data->scattered_rx = 1; @@ -26013,8 +50889,31 @@ index e4cb33dc3c..9a00a9b71e 100644 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); return 0; +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.h b/dpdk/drivers/net/i40e/i40e_rxtx.h +index 5e6eecc501..a8686224e5 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx.h ++++ b/dpdk/drivers/net/i40e/i40e_rxtx.h +@@ -21,6 +21,9 @@ + /* In none-PXE mode QLEN must be whole number of 32 descriptors. */ + #define I40E_ALIGN_RING_DESC 32 + ++/* Max data buffer size must be 16K - 128 bytes */ ++#define I40E_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128) ++ + #define I40E_MIN_RING_DESC 64 + #define I40E_MAX_RING_DESC 4096 + +@@ -166,7 +169,7 @@ struct i40e_tx_queue { + bool q_set; /**< indicate if tx queue has been configured */ + bool tx_deferred_start; /**< don't start this queue in dev start */ + uint8_t dcb_tc; /**< Traffic class of tx queue */ +- uint64_t offloads; /**< Tx offload flags of RTE_ETH_RX_OFFLOAD_* */ ++ uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */ + const struct rte_memzone *mz; + }; + diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c -index d0bf86dfba..00a015013e 100644 +index d0bf86dfba..3d2cbe03fb 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_altivec.c @@ -27,10 +27,10 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) @@ -26445,7 +51344,16 @@ index d0bf86dfba..00a015013e 100644 nb_pkts_recd += var; if (likely(var != RTE_I40E_DESCS_PER_LOOP)) break; -@@ -533,9 +533,9 @@ vtx1(volatile struct i40e_tx_desc *txdp, +@@ -448,8 +448,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + + /* Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST +- * numbers of DD bits + */ + uint16_t + i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -533,9 +531,9 @@ vtx1(volatile struct i40e_tx_desc *txdp, ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); @@ -26457,8 +51365,53 @@ index d0bf86dfba..00a015013e 100644 } static inline void +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c +index 2e8a3f0df6..2ad9a920a1 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c +@@ -906,16 +906,13 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + struct rte_mempool_cache *cache = rte_mempool_default_cache(mp, + rte_lcore_id()); + +- if (!cache || cache->len == 0) +- goto normal; +- +- cache_objs = &cache->objs[cache->len]; +- +- if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) { +- rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n); ++ if (!cache || n > RTE_MEMPOOL_CACHE_MAX_SIZE) { ++ rte_mempool_generic_put(mp, (void *)txep, n, cache); + goto done; + } + ++ cache_objs = &cache->objs[cache->len]; ++ + /* The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it +@@ -947,7 +944,6 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + goto done; + } + +-normal: + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h b/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h +index f9a7f46550..489ea0b1f6 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_common.h +@@ -201,6 +201,7 @@ i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq) + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; ++ rxq->rx_using_sse = 1; + return 0; + } + diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c -index b951ea2dc3..507468531f 100644 +index b951ea2dc3..f274af2c6a 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -151,7 +151,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4], @@ -26479,8 +51432,17 @@ index b951ea2dc3..507468531f 100644 if (unlikely(stat == 0)) { nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP; } else { +@@ -436,8 +436,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *__rte_restrict rxq, + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST +- * numbers of DD bits + */ + uint16_t + i40e_recv_pkts_vec(void *__rte_restrict rx_queue, diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c -index 497b2404c6..3782e8052f 100644 +index 497b2404c6..63806a723d 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -282,7 +282,7 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp, @@ -26510,6 +51472,37 @@ index 497b2404c6..3782e8052f 100644 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != RTE_I40E_DESCS_PER_LOOP)) +@@ -595,8 +595,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet +- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST +- * numbers of DD bits + */ + uint16_t + i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +diff --git a/dpdk/drivers/net/i40e/i40e_vf_representor.c b/dpdk/drivers/net/i40e/i40e_vf_representor.c +index 7f8e81858e..bcd445bcdd 100644 +--- a/dpdk/drivers/net/i40e/i40e_vf_representor.c ++++ b/dpdk/drivers/net/i40e/i40e_vf_representor.c +@@ -29,8 +29,6 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + struct rte_eth_dev_info *dev_info) + { + struct i40e_vf_representor *representor = ethdev->data->dev_private; +- struct rte_eth_dev_data *pf_dev_data = +- representor->adapter->pf.dev_data; + + /* get dev info for the vdev */ + dev_info->device = ethdev->device; +@@ -104,7 +102,7 @@ i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev, + }; + + dev_info->switch_info.name = +- rte_eth_devices[pf_dev_data->port_id].device->name; ++ rte_eth_devices[ethdev->data->port_id].device->name; + dev_info->switch_info.domain_id = representor->switch_domain_id; + dev_info->switch_info.port_id = representor->vf_id; + diff --git a/dpdk/drivers/net/i40e/rte_pmd_i40e.c b/dpdk/drivers/net/i40e/rte_pmd_i40e.c index a492959b75..35829a1eea 100644 --- a/dpdk/drivers/net/i40e/rte_pmd_i40e.c @@ -26524,7 +51517,7 @@ index a492959b75..35829a1eea 100644 ets_data.tc_bw_share_credits[i] = veb->bw_info.bw_ets_share_credits[i]; diff --git a/dpdk/drivers/net/iavf/iavf.h b/dpdk/drivers/net/iavf/iavf.h -index 0bb5698583..29692e3994 100644 +index 0bb5698583..58c3afe567 100644 --- a/dpdk/drivers/net/iavf/iavf.h +++ b/dpdk/drivers/net/iavf/iavf.h @@ -18,7 +18,7 @@ @@ -26536,7 +51529,15 @@ index 0bb5698583..29692e3994 100644 #define IAVF_BUF_SIZE_MIN 1024 #define IAVF_FRAME_SIZE_MAX 9728 #define IAVF_QUEUE_BASE_ADDR_UNIT 128 -@@ -296,6 +296,7 @@ struct iavf_adapter { +@@ -242,6 +242,7 @@ struct iavf_info { + struct iavf_qv_map *qv_map; /* queue vector mapping */ + struct iavf_flow_list flow_list; + rte_spinlock_t flow_ops_lock; ++ rte_spinlock_t aq_lock; + struct iavf_parser_list rss_parser_list; + struct iavf_parser_list dist_parser_list; + struct iavf_parser_list ipsec_crypto_parser_list; +@@ -296,6 +297,7 @@ struct iavf_adapter { bool tx_vec_allowed; uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned; bool stopped; @@ -26545,10 +51546,19 @@ index 0bb5698583..29692e3994 100644 struct iavf_devargs devargs; }; diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c -index 377d7bc7a6..f835457e4f 100644 +index 377d7bc7a6..8f4c6de0ac 100644 --- a/dpdk/drivers/net/iavf/iavf_ethdev.c +++ b/dpdk/drivers/net/iavf/iavf_ethdev.c -@@ -229,9 +229,15 @@ static const struct eth_dev_ops iavf_eth_dev_ops = { +@@ -125,6 +125,8 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); + static int iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); ++static void iavf_dev_interrupt_handler(void *param); ++static void iavf_disable_irq0(struct iavf_hw *hw); + static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops); + static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, +@@ -229,9 +231,15 @@ static const struct eth_dev_ops iavf_eth_dev_ops = { }; static int @@ -26565,7 +51575,7 @@ index 377d7bc7a6..f835457e4f 100644 if (!arg) return -EINVAL; -@@ -342,6 +348,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev, +@@ -342,6 +350,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev, return -EINVAL; } @@ -26575,7 +51585,7 @@ index 377d7bc7a6..f835457e4f 100644 /* flush previous addresses */ err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, false); -@@ -516,7 +525,7 @@ iavf_init_rss(struct iavf_adapter *adapter) +@@ -516,7 +527,7 @@ iavf_init_rss(struct iavf_adapter *adapter) j = 0; vf->rss_lut[i] = j; } @@ -26584,7 +51594,7 @@ index 377d7bc7a6..f835457e4f 100644 ret = iavf_configure_rss_lut(adapter); if (ret) return ret; -@@ -613,6 +622,9 @@ iavf_dev_configure(struct rte_eth_dev *dev) +@@ -613,6 +624,9 @@ iavf_dev_configure(struct rte_eth_dev *dev) dev->data->nb_tx_queues); int ret; @@ -26594,7 +51604,7 @@ index 377d7bc7a6..f835457e4f 100644 ad->rx_bulk_alloc_allowed = true; /* Initialize to TRUE. If any of Rx queues doesn't meet the * vector Rx/Tx preconditions, it will be reset. -@@ -831,7 +843,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, +@@ -831,7 +845,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, "vector %u are mapping to all Rx queues", vf->msix_base); } else { @@ -26603,7 +51613,7 @@ index 377d7bc7a6..f835457e4f 100644 * multi interrupts, then the vec is from 1 */ vf->nb_msix = -@@ -896,28 +908,38 @@ iavf_start_queues(struct rte_eth_dev *dev) +@@ -896,28 +910,38 @@ iavf_start_queues(struct rte_eth_dev *dev) struct iavf_rx_queue *rxq; struct iavf_tx_queue *txq; int i; @@ -26652,7 +51662,7 @@ index 377d7bc7a6..f835457e4f 100644 } static int -@@ -932,6 +954,9 @@ iavf_dev_start(struct rte_eth_dev *dev) +@@ -932,6 +956,9 @@ iavf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -26662,27 +51672,39 @@ index 377d7bc7a6..f835457e4f 100644 adapter->stopped = 0; vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD; -@@ -1009,6 +1034,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1009,6 +1036,12 @@ iavf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); ++ if (vf->vf_reset) ++ return 0; ++ + if (adapter->closed) + return -1; + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) && dev->data->dev_conf.intr_conf.rxq != 0) rte_intr_disable(intr_handle); -@@ -1030,9 +1058,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1016,8 +1049,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) + if (adapter->stopped == 1) + return 0; + +- iavf_stop_queues(dev); +- + /* Disable the interrupt for Rx */ + rte_intr_efd_disable(intr_handle); + /* Rx interrupt vector mapping free */ +@@ -1030,8 +1061,7 @@ iavf_dev_stop(struct rte_eth_dev *dev) iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, false); - /* free iAVF security device context all related resources */ - iavf_security_ctx_destroy(adapter); -- ++ iavf_stop_queues(dev); + adapter->stopped = 1; dev->data->dev_started = 0; - -@@ -1046,6 +1071,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1046,6 +1076,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = &adapter->vf; @@ -26692,8 +51714,28 @@ index 377d7bc7a6..f835457e4f 100644 dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV; dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV; dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN; -@@ -1286,6 +1314,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -1066,6 +1099,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | ++ RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | + RTE_ETH_RX_OFFLOAD_RSS_HASH; + + dev_info->tx_offload_capa = +@@ -1114,6 +1148,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + .nb_max = IAVF_MAX_RING_DESC, + .nb_min = IAVF_MIN_RING_DESC, + .nb_align = IAVF_ALIGN_RING_DESC, ++ .nb_mtu_seg_max = IAVF_TX_MAX_MTU_SEG, ++ .nb_seg_max = IAVF_MAX_RING_DESC, + }; + + return 0; +@@ -1284,8 +1320,12 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); ++ struct rte_eth_conf *dev_conf = &dev->data->dev_conf; int err; + if (adapter->closed) @@ -26702,7 +51744,31 @@ index 377d7bc7a6..f835457e4f 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { err = iavf_add_del_vlan_v2(adapter, vlan_id, on); if (err) -@@ -1362,6 +1393,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) +@@ -1299,6 +1339,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + err = iavf_add_del_vlan(adapter, vlan_id, on); + if (err) + return -EIO; ++ ++ /* For i40e kernel driver which only supports vlan(v1) VIRTCHNL OP, ++ * it will set strip on when setting filter on but dpdk side will not ++ * change strip flag. To be consistent with dpdk side, disable strip ++ * again. ++ * ++ * For i40e kernel driver which supports vlan v2, dpdk will invoke vlan v2 ++ * related function, so it won't go through here. ++ */ ++ if (adapter->hw.mac.type == IAVF_MAC_XL710 || ++ adapter->hw.mac.type == IAVF_MAC_X722_VF) { ++ if (on && !(dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) { ++ err = iavf_disable_vlan_strip(adapter); ++ if (err) ++ return -EIO; ++ } ++ } + return 0; + } + +@@ -1362,6 +1419,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct rte_eth_conf *dev_conf = &dev->data->dev_conf; int err; @@ -26712,7 +51778,7 @@ index 377d7bc7a6..f835457e4f 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) return iavf_dev_vlan_offload_set_v2(dev, mask); -@@ -1394,6 +1428,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -1394,6 +1454,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, uint16_t i, idx, shift; int ret; @@ -26722,7 +51788,7 @@ index 377d7bc7a6..f835457e4f 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1420,7 +1457,7 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -1420,7 +1483,7 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev, } rte_memcpy(vf->rss_lut, lut, reta_size); @@ -26731,7 +51797,7 @@ index 377d7bc7a6..f835457e4f 100644 ret = iavf_configure_rss_lut(adapter); if (ret) /* revert back */ rte_memcpy(vf->rss_lut, lut, reta_size); -@@ -1439,6 +1476,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev, +@@ -1439,6 +1502,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev, struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); uint16_t i, idx, shift; @@ -26741,7 +51807,7 @@ index 377d7bc7a6..f835457e4f 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1492,6 +1532,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev, +@@ -1492,6 +1558,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev, adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf; @@ -26751,7 +51817,7 @@ index 377d7bc7a6..f835457e4f 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1545,6 +1588,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, +@@ -1545,6 +1614,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); @@ -26761,7 +51827,7 @@ index 377d7bc7a6..f835457e4f 100644 if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; -@@ -1792,6 +1838,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -1792,6 +1864,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); uint16_t msix_intr; @@ -26771,7 +51837,7 @@ index 377d7bc7a6..f835457e4f 100644 msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle, queue_id); if (msix_intr == IAVF_MISC_VEC_ID) { -@@ -1833,7 +1882,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -1833,7 +1908,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) IAVF_WRITE_REG(hw, IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START), @@ -26780,7 +51846,7 @@ index 377d7bc7a6..f835457e4f 100644 IAVF_WRITE_FLUSH(hw); return 0; -@@ -2412,8 +2461,11 @@ static int +@@ -2412,8 +2487,11 @@ static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops) { @@ -26794,7 +51860,25 @@ index 377d7bc7a6..f835457e4f 100644 *ops = &iavf_flow_ops; return 0; -@@ -2554,7 +2606,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2539,26 +2617,43 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) + ret = iavf_security_ctx_create(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); +- return ret; ++ goto flow_init_err; + } + + ret = iavf_security_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources"); +- return ret; ++ goto security_init_err; + } + } + + iavf_default_rss_disable(adapter); + ++ iavf_dev_stats_reset(eth_dev); /* Start device watchdog */ iavf_dev_watchdog_enable(adapter); @@ -26803,7 +51887,27 @@ index 377d7bc7a6..f835457e4f 100644 return 0; -@@ -2582,7 +2634,16 @@ iavf_dev_close(struct rte_eth_dev *dev) ++security_init_err: ++ iavf_security_ctx_destroy(adapter); ++ + flow_init_err: ++ iavf_disable_irq0(hw); ++ ++ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { ++ /* disable uio intr before callback unregiser */ ++ rte_intr_disable(pci_dev->intr_handle); ++ ++ /* unregister callback func from eal lib */ ++ rte_intr_callback_unregister(pci_dev->intr_handle, ++ iavf_dev_interrupt_handler, eth_dev); ++ } else { ++ rte_eal_alarm_cancel(iavf_dev_alarm_handler, eth_dev); ++ } ++ + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + +@@ -2582,7 +2677,16 @@ iavf_dev_close(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -26820,7 +51924,26 @@ index 377d7bc7a6..f835457e4f 100644 iavf_flow_flush(dev, NULL); iavf_flow_uninit(adapter); -@@ -2636,6 +2697,7 @@ iavf_dev_close(struct rte_eth_dev *dev) +@@ -2595,6 +2699,18 @@ iavf_dev_close(struct rte_eth_dev *dev) + if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) + iavf_config_promisc(adapter, false, false); + ++ /* ++ * Release redundant queue resource when close the dev ++ * so that other vfs can re-use the queues. ++ */ ++ if (vf->lv_enabled) { ++ ret = iavf_request_queues(dev, IAVF_MAX_NUM_QUEUES_DFLT); ++ if (ret) ++ PMD_DRV_LOG(ERR, "Reset the num of queues failed"); ++ ++ vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; ++ } ++ + iavf_shutdown_adminq(hw); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable uio intr before callback unregister */ +@@ -2636,6 +2752,7 @@ iavf_dev_close(struct rte_eth_dev *dev) * the bus master bit will not be disabled, and this call will have no * effect. */ @@ -26829,10 +51952,70 @@ index 377d7bc7a6..f835457e4f 100644 vf->vf_reset = false; diff --git a/dpdk/drivers/net/iavf/iavf_fdir.c b/dpdk/drivers/net/iavf/iavf_fdir.c -index b63aaca91d..6b847894d8 100644 +index b63aaca91d..c30853dd94 100644 --- a/dpdk/drivers/net/iavf/iavf_fdir.c +++ b/dpdk/drivers/net/iavf/iavf_fdir.c -@@ -1185,8 +1185,22 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, +@@ -817,6 +817,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, + return -rte_errno; + } + ++ /* Mask for IPv4 src/dst addrs not supported */ ++ if (ipv4_mask->hdr.src_addr && ++ ipv4_mask->hdr.src_addr != UINT32_MAX) ++ return -rte_errno; ++ if (ipv4_mask->hdr.dst_addr && ++ ipv4_mask->hdr.dst_addr != UINT32_MAX) ++ return -rte_errno; ++ + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_TOS; +@@ -1007,6 +1015,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, + return -rte_errno; + } + ++ /* Mask for UDP src/dst ports not supported */ ++ if (udp_mask->hdr.src_port && ++ udp_mask->hdr.src_port != UINT16_MAX) ++ return -rte_errno; ++ if (udp_mask->hdr.dst_port && ++ udp_mask->hdr.dst_port != UINT16_MAX) ++ return -rte_errno; ++ + if (udp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); +@@ -1056,6 +1072,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, + return -rte_errno; + } + ++ /* Mask for TCP src/dst ports not supported */ ++ if (tcp_mask->hdr.src_port && ++ tcp_mask->hdr.src_port != UINT16_MAX) ++ return -rte_errno; ++ if (tcp_mask->hdr.dst_port && ++ tcp_mask->hdr.dst_port != UINT16_MAX) ++ return -rte_errno; ++ + if (tcp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); +@@ -1099,6 +1123,14 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, + return -rte_errno; + } + ++ /* Mask for SCTP src/dst ports not supported */ ++ if (sctp_mask->hdr.src_port && ++ sctp_mask->hdr.src_port != UINT16_MAX) ++ return -rte_errno; ++ if (sctp_mask->hdr.dst_port && ++ sctp_mask->hdr.dst_port != UINT16_MAX) ++ return -rte_errno; ++ + if (sctp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_SCTP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); +@@ -1185,8 +1217,22 @@ iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, GTPU_DWN, QFI); } @@ -26857,8 +52040,26 @@ index b63aaca91d..6b847894d8 100644 } hdrs->count = ++layer; +diff --git a/dpdk/drivers/net/iavf/iavf_generic_flow.c b/dpdk/drivers/net/iavf/iavf_generic_flow.c +index 2befa125ac..01e7b8724d 100644 +--- a/dpdk/drivers/net/iavf/iavf_generic_flow.c ++++ b/dpdk/drivers/net/iavf/iavf_generic_flow.c +@@ -2220,11 +2220,12 @@ iavf_flow_create(struct rte_eth_dev *dev, + } + + flow->engine = engine; ++ rte_spinlock_lock(&vf->flow_ops_lock); + TAILQ_INSERT_TAIL(&vf->flow_list, flow, node); ++ rte_spinlock_unlock(&vf->flow_ops_lock); + PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type); + + free_flow: +- rte_spinlock_unlock(&vf->flow_ops_lock); + return flow; + } + diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c -index 884169e061..75f05ee558 100644 +index 884169e061..9ab09778e2 100644 --- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c +++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c @@ -69,7 +69,7 @@ struct iavf_security_session { @@ -26888,7 +52089,28 @@ index 884169e061..75f05ee558 100644 sess->icv_sz = conf->crypto_xform->auth.digest_length; } else { sess->block_sz = get_cipher_blocksize(iavf_sctx, -@@ -726,7 +726,7 @@ iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev, +@@ -708,25 +708,17 @@ iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev, + if (unlikely(sess == NULL || sess->adapter != adapter)) + return false; + +- /* SPI value must be non-zero */ +- if (spi == 0) ++ /* SPI value must be non-zero and must match flow SPI*/ ++ if (spi == 0 || (htonl(sess->sa.spi) != spi)) + return false; +- /* Session SPI must patch flow SPI*/ +- else if (sess->sa.spi == spi) { +- return true; +- /** +- * TODO: We should add a way of tracking valid hw SA indices to +- * make validation less brittle +- */ +- } + +- return true; ++ return true; + } + /** * Send virtual channel security policy add request to IES driver. * @@ -26897,7 +52119,7 @@ index 884169e061..75f05ee558 100644 * order, but DPDK APIs are network order, therefore we need to do a htonl * conversion of these parameters. */ -@@ -736,7 +736,9 @@ iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter, +@@ -736,7 +728,9 @@ iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter, uint8_t is_v4, rte_be32_t v4_dst_addr, uint8_t *v6_dst_addr, @@ -26908,7 +52130,7 @@ index 884169e061..75f05ee558 100644 { struct inline_ipsec_msg *request = NULL, *response = NULL; size_t request_len, response_len; -@@ -781,6 +783,8 @@ iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter, +@@ -781,6 +775,8 @@ iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter, /** Traffic Class/Congestion Domain currently not support */ request->ipsec_data.sp_cfg->set_tc = 0; request->ipsec_data.sp_cfg->cgd = 0; @@ -26917,7 +52139,30 @@ index 884169e061..75f05ee558 100644 response_len = sizeof(struct inline_ipsec_msg) + sizeof(struct virtchnl_ipsec_sp_cfg_resp); -@@ -994,7 +998,7 @@ iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter, +@@ -843,6 +839,7 @@ iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter, + /* set request params */ + request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx; + request->ipsec_data.sa_update->esn_hi = sess->esn.hi; ++ request->ipsec_data.sa_update->esn_low = sess->esn.low; + + /* send virtual channel request to add SA to hardware database */ + rc = iavf_ipsec_crypto_request(adapter, +@@ -889,11 +886,12 @@ iavf_ipsec_crypto_session_update(void *device, + * iavf_security_session for outbound SA for use + * in *iavf_ipsec_crypto_pkt_metadata_set* function. + */ ++ iavf_sess->esn.hi = conf->ipsec.esn.hi; ++ iavf_sess->esn.low = conf->ipsec.esn.low; + if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) + rc = iavf_ipsec_crypto_sa_update_esn(adapter, + iavf_sess); +- else +- iavf_sess->esn.hi = conf->ipsec.esn.hi; ++ + } + + return rc; +@@ -994,7 +992,7 @@ iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter, request->req_id = (uint16_t)0xDEADBEEF; /** @@ -26926,7 +52171,7 @@ index 884169e061..75f05ee558 100644 * field is zero, all SA's associated with VF will be deleted. */ if (sess) { -@@ -1114,11 +1118,14 @@ iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m, +@@ -1114,11 +1112,14 @@ iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m, * ipv4/6 hdr + ext hdrs */ @@ -26945,7 +52190,7 @@ index 884169e061..75f05ee558 100644 return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len + esp_hlen + l3_len + l4_len + esp_tlen); -@@ -1147,7 +1154,7 @@ iavf_ipsec_crypto_pkt_metadata_set(void *device, +@@ -1147,7 +1148,7 @@ iavf_ipsec_crypto_pkt_metadata_set(void *device, md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset, struct iavf_ipsec_crypto_pkt_metadata *); @@ -26954,7 +52199,7 @@ index 884169e061..75f05ee558 100644 memcpy(md, &iavf_sess->pkt_metadata_template, sizeof(struct iavf_ipsec_crypto_pkt_metadata)); -@@ -1352,10 +1359,12 @@ iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx +@@ -1352,10 +1353,12 @@ iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx capabilities = rte_zmalloc("crypto_cap", sizeof(struct rte_cryptodev_capabilities) * (number_of_capabilities + 1), 0); @@ -26968,7 +52213,7 @@ index 884169e061..75f05ee558 100644 * algorithm. */ for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) { -@@ -1454,7 +1463,7 @@ iavf_ipsec_crypto_capabilities_get(void *device) +@@ -1454,7 +1457,7 @@ iavf_ipsec_crypto_capabilities_get(void *device) /** * Update the security capabilities struct with the runtime discovered * crypto capabilities, except for last element of the array which is @@ -26977,7 +52222,7 @@ index 884169e061..75f05ee558 100644 */ for (i = 0; i < ((sizeof(iavf_security_capabilities) / sizeof(iavf_security_capabilities[0])) - 1); i++) { -@@ -1545,29 +1554,90 @@ iavf_security_ctx_destroy(struct iavf_adapter *adapter) +@@ -1545,29 +1548,90 @@ iavf_security_ctx_destroy(struct iavf_adapter *adapter) if (iavf_sctx == NULL) return -ENODEV; @@ -27075,7 +52320,7 @@ index 884169e061..75f05ee558 100644 } #define IAVF_IPSEC_INSET_ESP (\ -@@ -1623,6 +1693,7 @@ struct iavf_ipsec_flow_item { +@@ -1623,6 +1687,7 @@ struct iavf_ipsec_flow_item { struct rte_ipv6_hdr ipv6_hdr; }; struct rte_udp_hdr udp_hdr; @@ -27083,7 +52328,7 @@ index 884169e061..75f05ee558 100644 }; static void -@@ -1735,6 +1806,7 @@ iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev, +@@ -1735,6 +1800,7 @@ iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev, parse_udp_item((const struct rte_flow_item_udp *) pattern[2].spec, &ipsec_flow->udp_hdr); @@ -27091,7 +52336,22 @@ index 884169e061..75f05ee558 100644 ipsec_flow->spi = ((const struct rte_flow_item_esp *) pattern[3].spec)->hdr.spi; -@@ -1804,7 +1876,9 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, +@@ -1790,6 +1856,7 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, + struct rte_flow_error *error) + { + struct iavf_ipsec_flow_item *ipsec_flow = meta; ++ int flow_id = -1; + if (!ipsec_flow) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, +@@ -1798,30 +1865,33 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, + } + + if (ipsec_flow->is_ipv4) { +- ipsec_flow->id = +- iavf_ipsec_crypto_inbound_security_policy_add(ad, ++ flow_id = iavf_ipsec_crypto_inbound_security_policy_add(ad, + ipsec_flow->spi, 1, ipsec_flow->ipv4_hdr.dst_addr, NULL, @@ -27100,9 +52360,10 @@ index 884169e061..75f05ee558 100644 + ipsec_flow->is_udp, + ipsec_flow->udp_hdr.dst_port); } else { - ipsec_flow->id = - iavf_ipsec_crypto_inbound_security_policy_add(ad, -@@ -1812,7 +1886,9 @@ iavf_ipsec_flow_create(struct iavf_adapter *ad, +- ipsec_flow->id = +- iavf_ipsec_crypto_inbound_security_policy_add(ad, ++ flow_id = iavf_ipsec_crypto_inbound_security_policy_add(ad, + ipsec_flow->spi, 0, 0, ipsec_flow->ipv6_hdr.dst_addr, @@ -27112,7 +52373,18 @@ index 884169e061..75f05ee558 100644 + ipsec_flow->udp_hdr.dst_port); } - if (ipsec_flow->id < 1) { +- if (ipsec_flow->id < 1) { ++ if (flow_id < 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to add SA."); + return -rte_errno; + } + ++ ipsec_flow->id = flow_id; + flow->rule = ipsec_flow; + + return 0; diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.h b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.h index 4e4c8798ec..8ea0f9540e 100644 --- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.h @@ -27138,7 +52410,7 @@ index 4e4c8798ec..8ea0f9540e 100644 /** * Delete inbound security policy rule from hardware diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.c b/dpdk/drivers/net/iavf/iavf_rxtx.c -index 154472c50f..3a0dfca2a7 100644 +index 154472c50f..c932b7859e 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx.c @@ -363,12 +363,24 @@ release_txq_mbufs(struct iavf_tx_queue *txq) @@ -27253,7 +52525,15 @@ index 154472c50f..3a0dfca2a7 100644 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; if (nb_desc % IAVF_ALIGN_RING_DESC != 0 || -@@ -648,8 +665,8 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -634,6 +651,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + + len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, IAVF_RX_MAX_DATA_BUF_SIZE); + + /* Allocate the software ring. */ + len = nb_desc + IAVF_RX_MAX_BURST; +@@ -648,8 +666,8 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return -ENOMEM; } @@ -27264,7 +52544,7 @@ index 154472c50f..3a0dfca2a7 100644 */ len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST; ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc), -@@ -673,7 +690,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -673,7 +691,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id); @@ -27273,7 +52553,7 @@ index 154472c50f..3a0dfca2a7 100644 if (check_rx_bulk_allow(rxq) == true) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " -@@ -714,6 +731,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -714,6 +732,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); @@ -27283,7 +52563,7 @@ index 154472c50f..3a0dfca2a7 100644 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; if (nb_desc % IAVF_ALIGN_RING_DESC != 0 || -@@ -810,7 +830,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -810,7 +831,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx); @@ -27292,7 +52572,28 @@ index 154472c50f..3a0dfca2a7 100644 if (check_tx_vec_allow(txq) == false) { struct iavf_adapter *ad = -@@ -942,7 +962,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -926,6 +947,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + { + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); ++ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_rx_queue *rxq; + int err; + +@@ -934,7 +956,11 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + +- err = iavf_switch_queue(adapter, rx_queue_id, true, false); ++ if (!vf->lv_enabled) ++ err = iavf_switch_queue(adapter, rx_queue_id, true, false); ++ else ++ err = iavf_switch_queue_lv(adapter, rx_queue_id, true, false); ++ + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); +@@ -942,7 +968,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) } rxq = dev->data->rx_queues[rx_queue_id]; @@ -27301,7 +52602,28 @@ index 154472c50f..3a0dfca2a7 100644 reset_rx_queue(rxq); dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; -@@ -970,7 +990,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -954,6 +980,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + { + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); ++ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_tx_queue *txq; + int err; + +@@ -962,7 +989,11 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + +- err = iavf_switch_queue(adapter, tx_queue_id, false, false); ++ if (!vf->lv_enabled) ++ err = iavf_switch_queue(adapter, tx_queue_id, false, false); ++ else ++ err = iavf_switch_queue_lv(adapter, tx_queue_id, false, false); ++ + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); +@@ -970,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } txq = dev->data->tx_queues[tx_queue_id]; @@ -27310,7 +52632,7 @@ index 154472c50f..3a0dfca2a7 100644 reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; -@@ -985,7 +1005,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +@@ -985,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) if (!q) return; @@ -27319,7 +52641,7 @@ index 154472c50f..3a0dfca2a7 100644 rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); -@@ -999,7 +1019,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +@@ -999,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) if (!q) return; @@ -27328,7 +52650,7 @@ index 154472c50f..3a0dfca2a7 100644 rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); -@@ -1033,7 +1053,7 @@ iavf_stop_queues(struct rte_eth_dev *dev) +@@ -1033,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev) txq = dev->data->tx_queues[i]; if (!txq) continue; @@ -27337,7 +52659,7 @@ index 154472c50f..3a0dfca2a7 100644 reset_tx_queue(txq); dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } -@@ -1041,7 +1061,7 @@ iavf_stop_queues(struct rte_eth_dev *dev) +@@ -1041,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; if (!rxq) continue; @@ -27346,7 +52668,30 @@ index 154472c50f..3a0dfca2a7 100644 reset_rx_queue(rxq); dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } -@@ -1484,7 +1504,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue, +@@ -1245,7 +1276,9 @@ iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0) + return 0; + + if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) { +- flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD); ++ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD); + return flags; + } + +@@ -1262,6 +1295,11 @@ iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0) + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + ++ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) ++ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; ++ else ++ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; ++ + return flags; + } + +@@ -1484,7 +1522,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue, iavf_flex_rxd_to_vlan_tci(rxm, &rxd); iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd, &rxq->stats.ipsec_crypto); @@ -27355,7 +52700,7 @@ index 154472c50f..3a0dfca2a7 100644 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); rxm->ol_flags |= pkt_flags; -@@ -1628,7 +1648,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -1628,7 +1666,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, iavf_flex_rxd_to_vlan_tci(first_seg, &rxd); iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd, &rxq->stats.ipsec_crypto); @@ -27364,7 +52709,7 @@ index 154472c50f..3a0dfca2a7 100644 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); first_seg->ol_flags |= pkt_flags; -@@ -1819,7 +1839,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) +@@ -1819,7 +1857,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) struct rte_mbuf *mb; uint16_t stat_err0; uint16_t pkt_len; @@ -27373,7 +52718,7 @@ index 154472c50f..3a0dfca2a7 100644 int32_t i, j, nb_rx = 0; uint64_t pkt_flags; const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; -@@ -1844,9 +1864,27 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) +@@ -1844,9 +1882,27 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) rte_smp_rmb(); @@ -27404,7 +52749,7 @@ index 154472c50f..3a0dfca2a7 100644 nb_rx += nb_dd; -@@ -1868,7 +1906,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) +@@ -1868,7 +1924,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]); iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j], &rxq->stats.ipsec_crypto); @@ -27413,7 +52758,7 @@ index 154472c50f..3a0dfca2a7 100644 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0); -@@ -1898,7 +1936,7 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq) +@@ -1898,7 +1954,7 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq) uint16_t pkt_len; uint64_t qword1; uint32_t rx_status; @@ -27422,7 +52767,7 @@ index 154472c50f..3a0dfca2a7 100644 int32_t i, j, nb_rx = 0; uint64_t pkt_flags; const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; -@@ -1929,9 +1967,27 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq) +@@ -1929,9 +1985,27 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq) rte_smp_rmb(); @@ -27453,7 +52798,71 @@ index 154472c50f..3a0dfca2a7 100644 nb_rx += nb_dd; -@@ -2439,6 +2495,14 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc, +@@ -2252,7 +2326,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field, + total_length -= m->outer_l3_len; + } + +-#ifdef RTE_LIBRTE_IAVF_DEBUG_TX ++#ifdef RTE_ETHDEV_DEBUG_TX + if (!m->l4_len || !m->tso_segsz) + PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d", + m->l4_len, m->tso_segsz); +@@ -2366,13 +2440,19 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, + l2tag1 |= m->vlan_tci; + } + ++ if ((m->ol_flags & ++ (IAVF_TX_CKSUM_OFFLOAD_MASK | RTE_MBUF_F_TX_SEC_OFFLOAD)) == 0) ++ goto skip_cksum; ++ + /* Set MACLEN */ + offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L3 checksum offloading inner */ +- if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) { +- command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; +- offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; ++ if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { ++ if (m->ol_flags & RTE_MBUF_F_TX_IPV4) { ++ command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; ++ offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; ++ } + } else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) { + command |= IAVF_TX_DESC_CMD_IIPT_IPV4; + offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; +@@ -2381,10 +2461,21 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, + offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } + +- if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { +- command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; ++ if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) { ++ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ++ command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; ++ else ++ command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (m->l4_len >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; ++ ++ *qw1 = rte_cpu_to_le_64((((uint64_t)command << ++ IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | ++ (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & ++ IAVF_TXD_DATA_QW1_OFFSET_MASK) | ++ ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)); ++ ++ return; + } + + /* Enable L4 checksum offloads */ +@@ -2406,6 +2497,7 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, + break; + } + ++skip_cksum: + *qw1 = rte_cpu_to_le_64((((uint64_t)command << + IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | + (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & +@@ -2439,6 +2531,14 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc, if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) hdrlen += ipseclen; bufsz = hdrlen + tlen; @@ -27468,7 +52877,7 @@ index 154472c50f..3a0dfca2a7 100644 } else { bufsz = m->data_len; } -@@ -2484,12 +2548,6 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -2484,12 +2584,6 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) desc_idx = txq->tx_tail; txe = &txe_ring[desc_idx]; @@ -27481,7 +52890,25 @@ index 154472c50f..3a0dfca2a7 100644 for (idx = 0; idx < nb_pkts; idx++) { volatile struct iavf_tx_desc *ddesc; struct iavf_ipsec_crypto_pkt_metadata *ipsec_md; -@@ -2694,6 +2752,10 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -2579,7 +2673,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + txe->last_id = desc_idx_last; + desc_idx = txe->next_id; + txe = txn; +- } ++ } + + if (nb_desc_ipsec) { + volatile struct iavf_tx_ipsec_desc *ipsec_desc = +@@ -2592,7 +2686,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; +- } ++ } + + iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen); + +@@ -2694,6 +2788,10 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, struct iavf_tx_queue *txq = tx_queue; struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -27492,7 +52919,29 @@ index 154472c50f..3a0dfca2a7 100644 for (i = 0; i < nb_pkts; i++) { m = tx_pkts[i]; -@@ -2750,14 +2812,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev) +@@ -2706,7 +2804,8 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + return i; + } + } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) || +- (m->tso_segsz > IAVF_MAX_TSO_MSS)) { ++ (m->tso_segsz > IAVF_MAX_TSO_MSS) || ++ (m->nb_segs > txq->nb_tx_desc)) { + /* MSS outside the range are considered malicious */ + rte_errno = EINVAL; + return i; +@@ -2717,6 +2816,11 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + return i; + } + ++ if (m->pkt_len < IAVF_TX_MIN_PKT_LEN) { ++ rte_errno = EINVAL; ++ return i; ++ } ++ + #ifdef RTE_ETHDEV_DEBUG_TX + ret = rte_validate_tx_offload(m); + if (ret != 0) { +@@ -2750,14 +2854,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -27523,7 +52972,7 @@ index 154472c50f..3a0dfca2a7 100644 check_ret = iavf_rx_vec_dev_check(dev); if (check_ret >= 0 && -@@ -2774,10 +2849,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev) +@@ -2774,10 +2891,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev) use_avx512 = true; #endif @@ -27534,7 +52983,7 @@ index 154472c50f..3a0dfca2a7 100644 for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; (void)iavf_rxq_vec_setup(rxq); -@@ -2881,7 +2952,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev) +@@ -2881,7 +2994,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev) if (dev->data->scattered_rx) { PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).", dev->data->port_id); @@ -27543,7 +52992,7 @@ index 154472c50f..3a0dfca2a7 100644 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd; else dev->rx_pkt_burst = iavf_recv_scattered_pkts; -@@ -2892,7 +2963,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev) +@@ -2892,7 +3005,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev) } else { PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).", dev->data->port_id); @@ -27552,11 +53001,97 @@ index 154472c50f..3a0dfca2a7 100644 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd; else dev->rx_pkt_burst = iavf_recv_pkts; +@@ -2987,14 +3100,14 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq, + uint32_t free_cnt) + { + struct iavf_tx_entry *swr_ring = txq->sw_ring; +- uint16_t i, tx_last, tx_id; ++ uint16_t tx_last, tx_id; + uint16_t nb_tx_free_last; + uint16_t nb_tx_to_clean; +- uint32_t pkt_cnt; ++ uint32_t pkt_cnt = 0; + +- /* Start free mbuf from the next of tx_tail */ +- tx_last = txq->tx_tail; +- tx_id = swr_ring[tx_last].next_id; ++ /* Start free mbuf from tx_tail */ ++ tx_id = txq->tx_tail; ++ tx_last = tx_id; + + if (txq->nb_free == 0 && iavf_xmit_cleanup(txq)) + return 0; +@@ -3007,10 +3120,8 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq, + /* Loop through swr_ring to count the amount of + * freeable mubfs and packets. + */ +- for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { +- for (i = 0; i < nb_tx_to_clean && +- pkt_cnt < free_cnt && +- tx_id != tx_last; i++) { ++ while (pkt_cnt < free_cnt) { ++ do { + if (swr_ring[tx_id].mbuf != NULL) { + rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); + swr_ring[tx_id].mbuf = NULL; +@@ -3023,7 +3134,7 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq, + } + + tx_id = swr_ring[tx_id].next_id; +- } ++ } while (--nb_tx_to_clean && pkt_cnt < free_cnt && tx_id != tx_last); + + if (txq->rs_thresh > txq->nb_tx_desc - + txq->nb_free || tx_id == tx_last) diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.h b/dpdk/drivers/net/iavf/iavf_rxtx.h -index b610176b30..48cc0da6f5 100644 +index b610176b30..c428082080 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx.h +++ b/dpdk/drivers/net/iavf/iavf_rxtx.h -@@ -187,6 +187,7 @@ struct iavf_rx_queue { +@@ -16,6 +16,9 @@ + /* used for Rx Bulk Allocate */ + #define IAVF_RX_MAX_BURST 32 + ++/* Max data buffer size must be 16K - 128 bytes */ ++#define IAVF_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128) ++ + /* used for Vector PMD */ + #define IAVF_VPMD_RX_MAX_BURST 32 + #define IAVF_VPMD_TX_MAX_BURST 32 +@@ -24,13 +27,13 @@ + #define IAVF_VPMD_TX_MAX_FREE_BUF 64 + + #define IAVF_TX_NO_VECTOR_FLAGS ( \ ++ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ ++ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ + RTE_ETH_TX_OFFLOAD_TCP_TSO | \ + RTE_ETH_TX_OFFLOAD_SECURITY) + + #define IAVF_TX_VECTOR_OFFLOAD ( \ +- RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ +- RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ +@@ -53,6 +56,8 @@ + #define IAVF_TSO_MAX_SEG UINT8_MAX + #define IAVF_TX_MAX_MTU_SEG 8 + ++#define IAVF_TX_MIN_PKT_LEN 17 ++ + #define IAVF_TX_CKSUM_OFFLOAD_MASK ( \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ +@@ -67,7 +72,7 @@ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG | \ +- RTE_ETH_TX_OFFLOAD_SECURITY) ++ RTE_MBUF_F_TX_SEC_OFFLOAD) + + #define IAVF_TX_OFFLOAD_NOTSUP_MASK \ + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK) +@@ -187,6 +192,7 @@ struct iavf_rx_queue { struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ struct rte_mbuf fake_mbuf; /* dummy mbuf */ uint8_t rxdid; @@ -27564,7 +53099,7 @@ index b610176b30..48cc0da6f5 100644 /* used for VPMD */ uint16_t rxrearm_nb; /* number of remaining to be re-armed */ -@@ -217,8 +218,6 @@ struct iavf_rx_queue { +@@ -217,8 +223,6 @@ struct iavf_rx_queue { uint8_t proto_xtr; /* protocol extraction type */ uint64_t xtr_ol_flag; /* flexible descriptor metadata extraction offload flag */ @@ -27573,7 +53108,7 @@ index b610176b30..48cc0da6f5 100644 struct iavf_rx_queue_stats stats; uint64_t offloads; }; -@@ -248,6 +247,7 @@ struct iavf_tx_queue { +@@ -248,6 +252,7 @@ struct iavf_tx_queue { uint16_t last_desc_cleaned; /* last desc have been cleaned*/ uint16_t free_thresh; uint16_t rs_thresh; @@ -27581,7 +53116,7 @@ index b610176b30..48cc0da6f5 100644 uint16_t port_id; uint16_t queue_id; -@@ -391,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec { +@@ -391,6 +396,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec { __le32 ipsec_said; }; @@ -27594,7 +53129,7 @@ index b610176b30..48cc0da6f5 100644 /* Receive Flex Descriptor profile IDs: There are a total * of 64 profiles where profile IDs 0/1 are for legacy; and * profiles 2-63 are flex profiles that can be programmed -@@ -694,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq); +@@ -694,6 +705,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq); uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type); void iavf_set_default_ptype_table(struct rte_eth_dev *dev); @@ -27604,11 +53139,330 @@ index b610176b30..48cc0da6f5 100644 static inline void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq, +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +index b6ef1aea77..2479c18210 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +@@ -622,43 +622,88 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = +- _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); ++ _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ +- const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- /* second 128-bits */ +- 0, 0, 0, 0, 0, 0, 0, 0, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1); ++ const __m256i l3_l4_flags_shuf = ++ _mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * second 128-bits ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = +- _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD | +- RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD); ++ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK | ++ RTE_MBUF_F_RX_L4_CKSUM_MASK | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, +@@ -836,6 +881,15 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); ++ __m256i l4_outer_mask = _mm256_set1_epi32(0x6); ++ __m256i l4_outer_flags = ++ _mm256_and_si256(l3_l4_flags, l4_outer_mask); ++ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); ++ ++ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); ++ ++ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); ++ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + + /* set rss and vlan flags */ +@@ -1020,7 +1074,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, + _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, +- 0, 0, 0, 0, ++ 0, 0, ++ RTE_MBUF_F_RX_VLAN | ++ RTE_MBUF_F_RX_VLAN_STRIPPED, ++ 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c -index 6ff38ac368..c975a5e7d7 100644 +index 6ff38ac368..9876c715e5 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c -@@ -1994,7 +1994,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -969,45 +969,105 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = +- _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); ++ _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); + #endif + #ifdef IAVF_RX_CSUM_OFFLOAD + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ +- const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- /* second 128-bits */ +- 0, 0, 0, 0, 0, 0, 0, 0, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1); ++ const __m256i l3_l4_flags_shuf = ++ _mm256_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * second 128-bits ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = +- _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD | +- RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD); ++ _mm256_set1_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK | ++ RTE_MBUF_F_RX_L4_CKSUM_MASK | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK); + #endif + #if defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD) + /** +@@ -1057,6 +1117,15 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); ++ __m256i l4_outer_mask = _mm256_set1_epi32(0x6); ++ __m256i l4_outer_flags = ++ _mm256_and_si256(l3_l4_flags, l4_outer_mask); ++ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); ++ ++ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); ++ ++ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); ++ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + #endif + #if defined(IAVF_RX_VLAN_OFFLOAD) || defined(IAVF_RX_RSS_OFFLOAD) +@@ -1269,7 +1338,10 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, + (0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, +- 0, 0, 0, 0, ++ 0, 0, ++ RTE_MBUF_F_RX_VLAN | ++ RTE_MBUF_F_RX_VLAN_STRIPPED, ++ 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, +@@ -1994,7 +2066,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false); } @@ -27617,7 +53471,7 @@ index 6ff38ac368..c975a5e7d7 100644 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq) { unsigned int i; -@@ -2014,14 +2014,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq) +@@ -2014,14 +2086,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq) } } @@ -27634,7 +53488,7 @@ index 6ff38ac368..c975a5e7d7 100644 } diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c -index 1bac59bf0e..4b23ca8d82 100644 +index 1bac59bf0e..3f1d5e2ce8 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -159,7 +159,7 @@ desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], @@ -27646,7 +53500,178 @@ index 1bac59bf0e..4b23ca8d82 100644 l3_l4e = _mm_and_si128(l3_l4e, cksum_mask); vlan0 = _mm_or_si128(vlan0, rss); -@@ -613,7 +613,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, +@@ -208,9 +208,15 @@ flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3) + return fdir_flags; + } + ++#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC ++static inline void ++flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], __m128i descs_bh[4], ++ struct rte_mbuf **rx_pkts) ++#else + static inline void + flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], + struct rte_mbuf **rx_pkts) ++#endif + { + const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + __m128i rearm0, rearm1, rearm2, rearm3; +@@ -222,39 +228,69 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], + * bit12 for RSS indication. + * bit13 for VLAN indication. + */ +- const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070, +- 0x3070, 0x3070); ++ const __m128i desc_mask = _mm_set_epi32(0x30f0, 0x30f0, ++ 0x30f0, 0x30f0); + + const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK | + RTE_MBUF_F_RX_L4_CKSUM_MASK | ++ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK | + RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD, + RTE_MBUF_F_RX_IP_CKSUM_MASK | + RTE_MBUF_F_RX_L4_CKSUM_MASK | ++ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK | + RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD, + RTE_MBUF_F_RX_IP_CKSUM_MASK | + RTE_MBUF_F_RX_L4_CKSUM_MASK | ++ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK | + RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD, + RTE_MBUF_F_RX_IP_CKSUM_MASK | + RTE_MBUF_F_RX_L4_CKSUM_MASK | ++ RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK | + RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD); + + /* map the checksum, rss and vlan fields to the checksum, rss + * and vlan flag + */ +- const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, +- /* shift right 1 bit to make sure it not exceed 255 */ +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD | +- RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, +- (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1); ++ const __m128i cksum_flags = ++ _mm_set_epi8((RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | ++ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ /** ++ * shift right 20 bits to use the low two bits to indicate ++ * outer checksum status ++ * shift right 1 bit to make sure it not exceed 255 ++ */ ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | ++ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_BAD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1, ++ (RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD >> 20 | RTE_MBUF_F_RX_L4_CKSUM_GOOD | ++ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1); ++ + + const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, +@@ -274,6 +310,13 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], + flags = _mm_shuffle_epi8(cksum_flags, tmp_desc); + /* then we shift left 1 bit */ + flags = _mm_slli_epi32(flags, 1); ++ __m128i l4_outer_mask = _mm_set_epi32(0x6, 0x6, 0x6, 0x6); ++ __m128i l4_outer_flags = _mm_and_si128(flags, l4_outer_mask); ++ l4_outer_flags = _mm_slli_epi32(l4_outer_flags, 20); ++ ++ __m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6); ++ __m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask); ++ flags = _mm_or_si128(l3_l4_flags, l4_outer_flags); + /* we need to mask out the redundant bits introduced by RSS or + * VLAN fields. + */ +@@ -286,6 +329,39 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], + /* merge the flags */ + flags = _mm_or_si128(flags, rss_vlan); + ++#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC ++ if (rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { ++ const __m128i l2tag2_mask = ++ _mm_set1_epi32(1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S); ++ ++ const __m128i vlan_tci0_1 = ++ _mm_unpacklo_epi32(descs_bh[0], descs_bh[1]); ++ const __m128i vlan_tci2_3 = ++ _mm_unpacklo_epi32(descs_bh[2], descs_bh[3]); ++ const __m128i vlan_tci0_3 = ++ _mm_unpacklo_epi64(vlan_tci0_1, vlan_tci2_3); ++ ++ __m128i vlan_bits = _mm_and_si128(vlan_tci0_3, l2tag2_mask); ++ ++ vlan_bits = _mm_srli_epi32(vlan_bits, ++ IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S); ++ ++ const __m128i vlan_flags_shuf = ++ _mm_set_epi8(0, 0, 0, 0, ++ 0, 0, 0, 0, ++ 0, 0, 0, 0, ++ 0, 0, ++ RTE_MBUF_F_RX_VLAN | ++ RTE_MBUF_F_RX_VLAN_STRIPPED, ++ 0); ++ ++ const __m128i vlan_flags = _mm_shuffle_epi8(vlan_flags_shuf, vlan_bits); ++ ++ /* merge with vlan_flags */ ++ flags = _mm_or_si128(flags, vlan_flags); ++ } ++#endif ++ + if (rxq->fdir_enabled) { + const __m128i fdir_id0_1 = + _mm_unpackhi_epi32(descs[0], descs[1]); +@@ -325,10 +401,10 @@ flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4], + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ +- rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10); +- rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10); +- rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10); +- rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10); ++ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x30); ++ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x30); ++ rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x30); ++ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x30); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != +@@ -613,7 +689,7 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); @@ -27655,7 +53680,106 @@ index 1bac59bf0e..4b23ca8d82 100644 var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; if (likely(var != IAVF_VPMD_DESCS_PER_LOOP)) -@@ -1200,37 +1200,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -748,6 +824,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, + pos += IAVF_VPMD_DESCS_PER_LOOP, + rxdp += IAVF_VPMD_DESCS_PER_LOOP) { + __m128i descs[IAVF_VPMD_DESCS_PER_LOOP]; ++#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC ++ __m128i descs_bh[IAVF_VPMD_DESCS_PER_LOOP]; ++#endif + __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + __m128i staterr, sterr_tmp1, sterr_tmp2; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ +@@ -806,8 +885,6 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + +- flex_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); +- + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); +@@ -821,36 +898,35 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ +- if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) { ++ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH || ++ rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { + /* load bottom half of every 32B desc */ +- const __m128i raw_desc_bh3 = +- _mm_load_si128 ++ descs_bh[3] = _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); +- const __m128i raw_desc_bh2 = +- _mm_load_si128 ++ descs_bh[2] = _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); +- const __m128i raw_desc_bh1 = +- _mm_load_si128 ++ descs_bh[1] = _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); +- const __m128i raw_desc_bh0 = +- _mm_load_si128 ++ descs_bh[0] = _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); ++ } + ++ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) { + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m128i rss_hash3 = +- _mm_slli_epi64(raw_desc_bh3, 32); ++ _mm_slli_epi64(descs_bh[3], 32); + __m128i rss_hash2 = +- _mm_slli_epi64(raw_desc_bh2, 32); ++ _mm_slli_epi64(descs_bh[2], 32); + __m128i rss_hash1 = +- _mm_slli_epi64(raw_desc_bh1, 32); ++ _mm_slli_epi64(descs_bh[1], 32); + __m128i rss_hash0 = +- _mm_slli_epi64(raw_desc_bh0, 32); ++ _mm_slli_epi64(descs_bh[0], 32); + + __m128i rss_hash_msk = + _mm_set_epi32(0xFFFFFFFF, 0, 0, 0); +@@ -869,6 +945,30 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, + pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1); + pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0); + } /* if() on RSS hash parsing */ ++ ++ if (rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { ++ /* L2TAG2_2 */ ++ __m128i vlan_tci3 = _mm_slli_si128(descs_bh[3], 4); ++ __m128i vlan_tci2 = _mm_slli_si128(descs_bh[2], 4); ++ __m128i vlan_tci1 = _mm_slli_si128(descs_bh[1], 4); ++ __m128i vlan_tci0 = _mm_slli_si128(descs_bh[0], 4); ++ ++ const __m128i vlan_tci_msk = _mm_set_epi32(0, 0xFFFF0000, 0, 0); ++ ++ vlan_tci3 = _mm_and_si128(vlan_tci3, vlan_tci_msk); ++ vlan_tci2 = _mm_and_si128(vlan_tci2, vlan_tci_msk); ++ vlan_tci1 = _mm_and_si128(vlan_tci1, vlan_tci_msk); ++ vlan_tci0 = _mm_and_si128(vlan_tci0, vlan_tci_msk); ++ ++ pkt_mb3 = _mm_or_si128(pkt_mb3, vlan_tci3); ++ pkt_mb2 = _mm_or_si128(pkt_mb2, vlan_tci2); ++ pkt_mb1 = _mm_or_si128(pkt_mb1, vlan_tci1); ++ pkt_mb0 = _mm_or_si128(pkt_mb0, vlan_tci0); ++ } ++ ++ flex_desc_to_olflags_v(rxq, descs, descs_bh, &rx_pkts[pos]); ++#else ++ flex_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); + #endif + + /* C.2 get 4 pkts staterr value */ +@@ -1200,37 +1300,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } @@ -27698,10 +53822,31 @@ index 1bac59bf0e..4b23ca8d82 100644 } diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c -index 145b059837..1bd3559ec2 100644 +index 145b059837..930a67f517 100644 --- a/dpdk/drivers/net/iavf/iavf_vchnl.c +++ b/dpdk/drivers/net/iavf/iavf_vchnl.c -@@ -265,6 +265,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, +@@ -255,6 +255,20 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args, + return err; + } + ++static int ++iavf_execute_vf_cmd_safe(struct iavf_adapter *adapter, ++ struct iavf_cmd_info *args, int async) ++{ ++ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); ++ int ret; ++ ++ rte_spinlock_lock(&vf->aq_lock); ++ ret = iavf_execute_vf_cmd(adapter, args, async); ++ rte_spinlock_unlock(&vf->aq_lock); ++ ++ return ret; ++} ++ + static void + iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, + uint16_t msglen) +@@ -265,6 +279,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg; @@ -27713,7 +53858,34 @@ index 145b059837..1bd3559ec2 100644 if (msglen < sizeof(struct virtchnl_pf_event)) { PMD_DRV_LOG(DEBUG, "Error event"); return; -@@ -461,7 +466,7 @@ iavf_check_api_version(struct iavf_adapter *adapter) +@@ -398,7 +417,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter) + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_ENABLE_VLAN_STRIPPING"); +@@ -419,7 +438,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter) + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " OP_DISABLE_VLAN_STRIPPING"); +@@ -448,7 +467,7 @@ iavf_check_api_version(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION"); + return err; +@@ -461,7 +480,7 @@ iavf_check_api_version(struct iavf_adapter *adapter) (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START && vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) { PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower" @@ -27722,7 +53894,7 @@ index 145b059837..1bd3559ec2 100644 VIRTCHNL_VERSION_MAJOR_START, VIRTCHNL_VERSION_MAJOR_START); return -1; -@@ -502,7 +507,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) +@@ -502,12 +521,12 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_VLAN_V2 | VIRTCHNL_VF_LARGE_NUM_QPAIRS | VIRTCHNL_VF_OFFLOAD_QOS | @@ -27731,7 +53903,76 @@ index 145b059837..1bd3559ec2 100644 args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); -@@ -777,6 +782,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, +@@ -552,7 +571,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_GET_SUPPORTED_RXDIDS"); +@@ -596,7 +615,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable) + args.in_args_size = sizeof(vlan_strip); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "fail to execute command %s", + enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" : +@@ -636,7 +655,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable) + args.in_args_size = sizeof(vlan_insert); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) + PMD_DRV_LOG(ERR, "fail to execute command %s", + enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" : +@@ -679,7 +698,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add) + args.in_args_size = sizeof(vlan_filter); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2"); +@@ -700,7 +719,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- ret = iavf_execute_vf_cmd(adapter, &args, 0); ++ ret = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"); +@@ -731,7 +750,7 @@ iavf_enable_queues(struct iavf_adapter *adapter) + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES"); +@@ -759,7 +778,7 @@ iavf_disable_queues(struct iavf_adapter *adapter) + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES"); +@@ -777,6 +796,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, struct iavf_cmd_info args; int err; @@ -27741,7 +53982,97 @@ index 145b059837..1bd3559ec2 100644 memset(&queue_select, 0, sizeof(queue_select)); queue_select.vsi_id = vf->vsi_res->vsi_id; if (rx) -@@ -1241,6 +1249,9 @@ iavf_query_stats(struct iavf_adapter *adapter, +@@ -792,7 +814,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, + args.in_args_size = sizeof(queue_select); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES"); +@@ -834,7 +856,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_ENABLE_QUEUES_V2"); +@@ -878,7 +900,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_DISABLE_QUEUES_V2"); +@@ -924,7 +946,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of %s", + on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); +@@ -956,7 +978,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_LUT"); +@@ -988,7 +1010,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_CONFIG_RSS_KEY"); +@@ -1080,7 +1102,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); +@@ -1121,7 +1143,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); + +@@ -1162,7 +1184,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + +@@ -1222,7 +1244,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETHER_ADDRESS" : +@@ -1241,6 +1263,9 @@ iavf_query_stats(struct iavf_adapter *adapter, struct iavf_cmd_info args; int err; @@ -27751,7 +54082,16 @@ index 145b059837..1bd3559ec2 100644 memset(&q_stats, 0, sizeof(q_stats)); q_stats.vsi_id = vf->vsi_res->vsi_id; args.ops = VIRTCHNL_OP_GET_STATS; -@@ -1269,6 +1280,9 @@ iavf_config_promisc(struct iavf_adapter *adapter, +@@ -1249,7 +1274,7 @@ iavf_query_stats(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); + *pstats = NULL; +@@ -1269,6 +1294,9 @@ iavf_config_promisc(struct iavf_adapter *adapter, struct iavf_cmd_info args; int err; @@ -27761,7 +54101,16 @@ index 145b059837..1bd3559ec2 100644 promisc.flags = 0; promisc.vsi_id = vf->vsi_res->vsi_id; -@@ -1312,6 +1326,9 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, +@@ -1284,7 +1312,7 @@ iavf_config_promisc(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, +@@ -1312,6 +1340,9 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, struct iavf_cmd_info args; int err; @@ -27771,15 +54120,250 @@ index 145b059837..1bd3559ec2 100644 list = (struct virtchnl_ether_addr_list *)cmd_buffer; list->vsi_id = vf->vsi_res->vsi_id; list->num_elements = 1; -@@ -1487,7 +1504,7 @@ iavf_fdir_check(struct iavf_adapter *adapter, +@@ -1324,7 +1355,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); +@@ -1351,7 +1382,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add) + args.in_args_size = sizeof(cmd_buffer); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command %s", + add ? "OP_ADD_VLAN" : "OP_DEL_VLAN"); +@@ -1378,7 +1409,7 @@ iavf_fdir_add(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER"); + return err; +@@ -1438,7 +1469,7 @@ iavf_fdir_del(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER"); + return err; +@@ -1485,9 +1516,9 @@ iavf_fdir_check(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; - err = iavf_execute_vf_cmd(adapter, &args, 0); +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); if (err) { - PMD_DRV_LOG(ERR, "fail to check flow direcotor rule"); + PMD_DRV_LOG(ERR, "fail to check flow director rule"); return err; } +@@ -1526,7 +1557,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of %s", +@@ -1549,7 +1580,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_GET_RSS_HENA_CAPS"); +@@ -1575,7 +1606,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, + "Failed to execute command of OP_SET_RSS_HENA"); +@@ -1596,7 +1627,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter) + args.in_args_size = 0; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, +@@ -1629,7 +1660,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_TC_MAP"); +@@ -1674,7 +1705,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, + i * sizeof(struct virtchnl_ether_addr); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", +@@ -1718,13 +1749,17 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable interrupt to avoid the admin queue message to be read + * before iavf_read_msg_from_pf. ++ * ++ * don't disable interrupt handler until ready to execute vf cmd. + */ ++ rte_spinlock_lock(&vf->aq_lock); + rte_intr_disable(pci_dev->intr_handle); + err = iavf_execute_vf_cmd(adapter, &args, 0); + rte_intr_enable(pci_dev->intr_handle); ++ rte_spinlock_unlock(&vf->aq_lock); + } else { + rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev); +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + rte_eal_alarm_set(IAVF_ALARM_INTERVAL, + iavf_dev_alarm_handler, dev); + } +@@ -1763,7 +1798,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 0); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION"); + return err; +@@ -1794,7 +1829,7 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter, + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + +- err = iavf_execute_vf_cmd(adapter, &args, 1); ++ err = iavf_execute_vf_cmd_safe(adapter, &args, 1); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command %s", + "OP_INLINE_IPSEC_CRYPTO"); +diff --git a/dpdk/drivers/net/ice/base/ice_bst_tcam.c b/dpdk/drivers/net/ice/base/ice_bst_tcam.c +index 306f62db2a..74a2de869e 100644 +--- a/dpdk/drivers/net/ice/base/ice_bst_tcam.c ++++ b/dpdk/drivers/net/ice/base/ice_bst_tcam.c +@@ -53,7 +53,7 @@ static void _bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index) + + /** + * ice_bst_tcam_dump - dump a boost tcam info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: boost tcam to dump + */ + void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item) +@@ -205,7 +205,7 @@ static void _bst_parse_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_bst_tcam_table_get - create a boost tcam table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw) + { +@@ -228,7 +228,7 @@ static void _parse_lbl_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_bst_lbl_table_get - create a boost label table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw) + { +diff --git a/dpdk/drivers/net/ice/base/ice_common.c b/dpdk/drivers/net/ice/base/ice_common.c +index ae55bebaa2..c6fc32fbc6 100644 +--- a/dpdk/drivers/net/ice/base/ice_common.c ++++ b/dpdk/drivers/net/ice/base/ice_common.c +@@ -475,7 +475,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) + case ICE_PHY_TYPE_LOW_1000BASE_LX: + case ICE_PHY_TYPE_LOW_10GBASE_SR: + case ICE_PHY_TYPE_LOW_10GBASE_LR: +- case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_40GBASE_SR4: +@@ -532,6 +531,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) + case ICE_PHY_TYPE_LOW_2500BASE_X: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: ++ case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: +@@ -3022,12 +3022,10 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, + bool ice_is_100m_speed_supported(struct ice_hw *hw) + { + switch (hw->device_id) { +- case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_SGMII: +- case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_SGMII: +- case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: ++ case ICE_DEV_ID_E823C_SGMII: + return true; + default: + return false; +@@ -3934,7 +3932,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw, + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm); + +- desc.datalen = data_size; ++ desc.datalen = CPU_TO_LE16(data_size); + ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), + ICE_NONDMA_TO_NONDMA); + cmd->start_address = CPU_TO_LE32(start_address); +@@ -5588,7 +5586,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); + cmd = &desc.params.read_write_gpio; +- cmd->gpio_ctrl_handle = gpio_ctrl_handle; ++ cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); + cmd->gpio_num = pin_idx; + cmd->gpio_val = value ? 1 : 0; + +@@ -5616,7 +5614,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); + cmd = &desc.params.read_write_gpio; +- cmd->gpio_ctrl_handle = gpio_ctrl_handle; ++ cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); + cmd->gpio_num = pin_idx; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +diff --git a/dpdk/drivers/net/ice/base/ice_dcb.c b/dpdk/drivers/net/ice/base/ice_dcb.c +index cb6c5ba182..3d630757f8 100644 +--- a/dpdk/drivers/net/ice/base/ice_dcb.c ++++ b/dpdk/drivers/net/ice/base/ice_dcb.c +@@ -1376,7 +1376,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) + tlv->ouisubtype = HTONL(ouisubtype); + + buf[0] = dcbcfg->pfc.pfccap & 0xF; +- buf[1] = dcbcfg->pfc.pfcena & 0xF; ++ buf[1] = dcbcfg->pfc.pfcena; + } + + /** diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c index 395787806b..3918169001 100644 --- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c @@ -27852,6 +54436,261 @@ index 59eeca0a30..09a02fe9ac 100644 ICE_PROF_NON_TUN = 0x1, ICE_PROF_TUN_UDP = 0x2, ICE_PROF_TUN_GRE = 0x4, +diff --git a/dpdk/drivers/net/ice/base/ice_flg_rd.c b/dpdk/drivers/net/ice/base/ice_flg_rd.c +index 833986cac3..80d3b51ad6 100644 +--- a/dpdk/drivers/net/ice/base/ice_flg_rd.c ++++ b/dpdk/drivers/net/ice/base/ice_flg_rd.c +@@ -9,7 +9,7 @@ + + /** + * ice_flg_rd_dump - dump a flag redirect item info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: flag redirect item to dump + */ + void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item) +@@ -40,7 +40,7 @@ static void _flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_flg_rd_table_get - create a flag redirect table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw) + { +diff --git a/dpdk/drivers/net/ice/base/ice_flow.c b/dpdk/drivers/net/ice/base/ice_flow.c +index bcbb9b12c4..da78a368ca 100644 +--- a/dpdk/drivers/net/ice/base/ice_flow.c ++++ b/dpdk/drivers/net/ice/base/ice_flow.c +@@ -1392,7 +1392,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, + case ICE_FLOW_FIELD_IDX_IPV4_TTL: + case ICE_FLOW_FIELD_IDX_IPV4_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; +- ++ if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE && ++ params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU && ++ seg == 1) ++ prot_id = ICE_PROT_IPV4_IL_IL; + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. +@@ -1411,7 +1414,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, + case ICE_FLOW_FIELD_IDX_IPV6_TTL: + case ICE_FLOW_FIELD_IDX_IPV6_PROT: + prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; +- ++ if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE && ++ params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU && ++ seg == 1) ++ prot_id = ICE_PROT_IPV6_IL_IL; + /* TTL and PROT share the same extraction seq. entry. + * Each is considered a sibling to the other in terms of sharing + * the same extraction sequence entry. +@@ -2546,7 +2552,7 @@ ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle, + u16 fdir_vsi_handle, struct ice_parser_profile *prof, + enum ice_block blk) + { +- int id = ice_find_first_bit(prof->ptypes, UINT16_MAX); ++ int id = ice_find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + struct ice_flow_prof_params *params; + u8 fv_words = hw->blk[blk].es.fvw; + enum ice_status status; +@@ -4092,6 +4098,8 @@ ice_rss_cfg_raw_symm(struct ice_hw *hw, + + switch (proto_id) { + case ICE_PROT_IPV4_OF_OR_S: ++ case ICE_PROT_IPV4_IL: ++ case ICE_PROT_IPV4_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV4_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + if (prof->fv[i].offset == +@@ -4107,6 +4115,8 @@ ice_rss_cfg_raw_symm(struct ice_hw *hw, + i++; + continue; + case ICE_PROT_IPV6_OF_OR_S: ++ case ICE_PROT_IPV6_IL: ++ case ICE_PROT_IPV6_IL_IL: + len = ICE_FLOW_FLD_SZ_IPV6_ADDR / + ICE_FLOW_FV_EXTRACT_SZ; + if (prof->fv[i].offset == +diff --git a/dpdk/drivers/net/ice/base/ice_imem.c b/dpdk/drivers/net/ice/base/ice_imem.c +index 2136e0393b..9a76d21ce5 100644 +--- a/dpdk/drivers/net/ice/base/ice_imem.c ++++ b/dpdk/drivers/net/ice/base/ice_imem.c +@@ -69,7 +69,7 @@ static void _imem_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index) + + /** + * ice_imem_dump - dump an imem item info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: imem item to dump + */ + void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item) +@@ -231,7 +231,7 @@ static void _imem_parse_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_imem_table_get - create an imem table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw) + { +diff --git a/dpdk/drivers/net/ice/base/ice_metainit.c b/dpdk/drivers/net/ice/base/ice_metainit.c +index 3f9e5d6833..a899125b37 100644 +--- a/dpdk/drivers/net/ice/base/ice_metainit.c ++++ b/dpdk/drivers/net/ice/base/ice_metainit.c +@@ -9,7 +9,7 @@ + + /** + * ice_metainit_dump - dump an metainit item info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: metainit item to dump + */ + void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item) +@@ -130,7 +130,7 @@ static void _metainit_parse_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_metainit_table_get - create a metainit table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw) + { +diff --git a/dpdk/drivers/net/ice/base/ice_mk_grp.c b/dpdk/drivers/net/ice/base/ice_mk_grp.c +index 4e9ab5c13a..814001c49e 100644 +--- a/dpdk/drivers/net/ice/base/ice_mk_grp.c ++++ b/dpdk/drivers/net/ice/base/ice_mk_grp.c +@@ -10,7 +10,7 @@ + + /** + * ice_mk_grp_dump - dump an marker group item info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: marker group item to dump + */ + void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item) +@@ -42,7 +42,7 @@ static void _mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_mk_grp_table_get - create a marker group table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw) + { +diff --git a/dpdk/drivers/net/ice/base/ice_parser.c b/dpdk/drivers/net/ice/base/ice_parser.c +index 9b106baff0..4d490dda7b 100644 +--- a/dpdk/drivers/net/ice/base/ice_parser.c ++++ b/dpdk/drivers/net/ice/base/ice_parser.c +@@ -106,7 +106,7 @@ void *ice_parser_sect_item_get(u32 sect_type, void *section, + * @item_size: item size in byte + * @length: number of items in the table to create + * @item_get: the function will be parsed to ice_pkg_enum_entry +- * @parser_item: the function to parse the item ++ * @parse_item: the function to parse the item + * @no_offset: ignore header offset, calculate index from 0 + */ + void *ice_parser_create_table(struct ice_hw *hw, u32 sect_type, +@@ -359,6 +359,7 @@ static void _bst_vm_set(struct ice_parser *psr, const char *prefix, bool on) + /** + * ice_parser_dvm_set - configure double vlan mode for parser + * @psr: pointer to a parser instance ++ * @on: true to turn on; false to turn off + */ + void ice_parser_dvm_set(struct ice_parser *psr, bool on) + { +@@ -478,8 +479,8 @@ static bool _nearest_proto_id(struct ice_parser_result *rslt, u16 offset, + * ice_parser_profile_init - initialize a FXP profile base on parser result + * @rslt: a instance of a parser result + * @pkt_buf: packet data buffer +- * @pkt_msk: packet mask buffer +- * @pkt_len: packet length ++ * @msk_buf: packet mask buffer ++ * @buf_len: packet length + * @blk: FXP pipeline stage + * @prefix_match: match protocol stack exactly or only prefix + * @prof: input/output parameter to save the profile +diff --git a/dpdk/drivers/net/ice/base/ice_pg_cam.c b/dpdk/drivers/net/ice/base/ice_pg_cam.c +index fe461ad849..73f7c34ffd 100644 +--- a/dpdk/drivers/net/ice/base/ice_pg_cam.c ++++ b/dpdk/drivers/net/ice/base/ice_pg_cam.c +@@ -50,7 +50,7 @@ static void _pg_cam_action_dump(struct ice_hw *hw, + + /** + * ice_pg_cam_dump - dump an parse graph cam info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: parse graph cam to dump + */ + void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item) +@@ -62,7 +62,7 @@ void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item) + + /** + * ice_pg_nm_cam_dump - dump an parse graph no match cam info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: parse graph no match cam to dump + */ + void ice_pg_nm_cam_dump(struct ice_hw *hw, struct ice_pg_nm_cam_item *item) +@@ -243,7 +243,7 @@ static void _pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_pg_cam_table_get - create a parse graph cam table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw) + { +@@ -257,7 +257,7 @@ struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw) + + /** + * ice_pg_sp_cam_table_get - create a parse graph spill cam table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw) + { +@@ -271,7 +271,7 @@ struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw) + + /** + * ice_pg_nm_cam_table_get - create a parse graph no match cam table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw) + { +@@ -285,7 +285,7 @@ struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw) + + /** + * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw) + { +diff --git a/dpdk/drivers/net/ice/base/ice_proto_grp.c b/dpdk/drivers/net/ice/base/ice_proto_grp.c +index 69d5d9a18a..b1d149b66a 100644 +--- a/dpdk/drivers/net/ice/base/ice_proto_grp.c ++++ b/dpdk/drivers/net/ice/base/ice_proto_grp.c +@@ -18,7 +18,7 @@ static void _proto_off_dump(struct ice_hw *hw, struct ice_proto_off *po, + + /** + * ice_proto_grp_dump - dump a proto group item info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: proto group item to dump + */ + void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item) +@@ -95,7 +95,7 @@ static void _proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_proto_grp_table_get - create a proto group table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw) + { diff --git a/dpdk/drivers/net/ice/base/ice_protocol_type.h b/dpdk/drivers/net/ice/base/ice_protocol_type.h index cef8354f77..d27ef46713 100644 --- a/dpdk/drivers/net/ice/base/ice_protocol_type.h @@ -27894,11 +54733,294 @@ index cef8354f77..d27ef46713 100644 u16 fv_mask[ICE_NUM_WORDS_RECIPE]; struct ice_pref_recipe_group r_group; }; +diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.c b/dpdk/drivers/net/ice/base/ice_ptp_hw.c +index 7e797c9511..3a47f8cebe 100644 +--- a/dpdk/drivers/net/ice/base/ice_ptp_hw.c ++++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.c +@@ -1634,7 +1634,7 @@ static enum ice_status ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) + #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */ + + /* Program the 10Gb/40Gb conversion ratio */ +- uix = DIV_64BIT(tu_per_sec * LINE_UI_10G_40G, 390625000); ++ uix = DIV_U64(tu_per_sec * LINE_UI_10G_40G, 390625000); + + status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, + uix); +@@ -1645,7 +1645,7 @@ static enum ice_status ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) + } + + /* Program the 25Gb/100Gb conversion ratio */ +- uix = DIV_64BIT(tu_per_sec * LINE_UI_25G_100G, 390625000); ++ uix = DIV_U64(tu_per_sec * LINE_UI_25G_100G, 390625000); + + status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L, + uix); +@@ -1727,8 +1727,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_PAR_TX_TUS */ + if (e822_vernier[link_spd].tx_par_clk) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].tx_par_clk); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].tx_par_clk); + else + phy_tus = 0; + +@@ -1739,8 +1739,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_PAR_RX_TUS */ + if (e822_vernier[link_spd].rx_par_clk) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].rx_par_clk); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].rx_par_clk); + else + phy_tus = 0; + +@@ -1751,8 +1751,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_PCS_TX_TUS */ + if (e822_vernier[link_spd].tx_pcs_clk) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].tx_pcs_clk); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].tx_pcs_clk); + else + phy_tus = 0; + +@@ -1763,8 +1763,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_PCS_RX_TUS */ + if (e822_vernier[link_spd].rx_pcs_clk) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].rx_pcs_clk); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].rx_pcs_clk); + else + phy_tus = 0; + +@@ -1775,8 +1775,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_DESK_PAR_TX_TUS */ + if (e822_vernier[link_spd].tx_desk_rsgb_par) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].tx_desk_rsgb_par); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].tx_desk_rsgb_par); + else + phy_tus = 0; + +@@ -1787,8 +1787,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_DESK_PAR_RX_TUS */ + if (e822_vernier[link_spd].rx_desk_rsgb_par) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].rx_desk_rsgb_par); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].rx_desk_rsgb_par); + else + phy_tus = 0; + +@@ -1799,8 +1799,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_DESK_PCS_TX_TUS */ + if (e822_vernier[link_spd].tx_desk_rsgb_pcs) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].tx_desk_rsgb_pcs); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].tx_desk_rsgb_pcs); + else + phy_tus = 0; + +@@ -1811,8 +1811,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) + + /* P_REG_DESK_PCS_RX_TUS */ + if (e822_vernier[link_spd].rx_desk_rsgb_pcs) +- phy_tus = DIV_64BIT(tu_per_sec, +- e822_vernier[link_spd].rx_desk_rsgb_pcs); ++ phy_tus = DIV_U64(tu_per_sec, ++ e822_vernier[link_spd].rx_desk_rsgb_pcs); + else + phy_tus = 0; + +@@ -1844,9 +1844,9 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) + * overflows 64 bit integer arithmetic, so break it up into two + * divisions by 1e4 first then by 1e7. + */ +- fixed_offset = DIV_64BIT(tu_per_sec, 10000); ++ fixed_offset = DIV_U64(tu_per_sec, 10000); + fixed_offset *= e822_vernier[link_spd].tx_fixed_delay; +- fixed_offset = DIV_64BIT(fixed_offset, 10000000); ++ fixed_offset = DIV_U64(fixed_offset, 10000000); + + return fixed_offset; + } +@@ -2074,9 +2074,9 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, + * divide by 125, and then handle remaining divisor based on the link + * speed pmd_adj_divisor value. + */ +- adj = DIV_64BIT(tu_per_sec, 125); ++ adj = DIV_U64(tu_per_sec, 125); + adj *= mult; +- adj = DIV_64BIT(adj, pmd_adj_divisor); ++ adj = DIV_U64(adj, pmd_adj_divisor); + + /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx + * cycle count is necessary. +@@ -2097,9 +2097,9 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, + if (rx_cycle) { + mult = (4 - rx_cycle) * 40; + +- cycle_adj = DIV_64BIT(tu_per_sec, 125); ++ cycle_adj = DIV_U64(tu_per_sec, 125); + cycle_adj *= mult; +- cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor); ++ cycle_adj = DIV_U64(cycle_adj, pmd_adj_divisor); + + adj += cycle_adj; + } +@@ -2119,9 +2119,9 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, + if (rx_cycle) { + mult = rx_cycle * 40; + +- cycle_adj = DIV_64BIT(tu_per_sec, 125); ++ cycle_adj = DIV_U64(tu_per_sec, 125); + cycle_adj *= mult; +- cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor); ++ cycle_adj = DIV_U64(cycle_adj, pmd_adj_divisor); + + adj += cycle_adj; + } +@@ -2157,9 +2157,9 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) + * overflows 64 bit integer arithmetic, so break it up into two + * divisions by 1e4 first then by 1e7. + */ +- fixed_offset = DIV_64BIT(tu_per_sec, 10000); ++ fixed_offset = DIV_U64(tu_per_sec, 10000); + fixed_offset *= e822_vernier[link_spd].rx_fixed_delay; +- fixed_offset = DIV_64BIT(fixed_offset, 10000000); ++ fixed_offset = DIV_U64(fixed_offset, 10000000); + + return fixed_offset; + } +diff --git a/dpdk/drivers/net/ice/base/ice_ptype_mk.c b/dpdk/drivers/net/ice/base/ice_ptype_mk.c +index 97c41cb586..9807e688b1 100644 +--- a/dpdk/drivers/net/ice/base/ice_ptype_mk.c ++++ b/dpdk/drivers/net/ice/base/ice_ptype_mk.c +@@ -9,7 +9,7 @@ + + /** + * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info_ +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @item: ptype marker tcam to dump + */ + void ice_ptype_mk_tcam_dump(struct ice_hw *hw, +@@ -41,7 +41,7 @@ static void _parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx, void *item, + + /** + * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw) + { diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c -index 2620892c9e..e697c579be 100644 +index 2620892c9e..f3655a820f 100644 --- a/dpdk/drivers/net/ice/base/ice_sched.c +++ b/dpdk/drivers/net/ice/base/ice_sched.c -@@ -4774,12 +4774,12 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, +@@ -1394,11 +1394,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) + clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> + GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; + +-#define PSM_CLK_SRC_367_MHZ 0x0 +-#define PSM_CLK_SRC_416_MHZ 0x1 +-#define PSM_CLK_SRC_446_MHZ 0x2 +-#define PSM_CLK_SRC_390_MHZ 0x3 +- + switch (clk_src) { + case PSM_CLK_SRC_367_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; +@@ -1412,11 +1407,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) + case PSM_CLK_SRC_390_MHZ: + hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; + break; +- default: +- ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", +- clk_src); +- /* fall back to a safe default */ +- hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; ++ ++ /* default condition is not required as clk_src is restricted ++ * to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask. ++ * The above switch statements cover the possible values of ++ * this variable. ++ */ + } + } + +@@ -3830,8 +3826,8 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) + u16 wakeup = 0; + + /* Get the wakeup integer value */ +- bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); +- wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec); ++ bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); ++ wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec); + if (wakeup_int > 63) { + wakeup = (u16)((1 << 15) | wakeup_int); + } else { +@@ -3839,18 +3835,18 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) + * Convert Integer value to a constant multiplier + */ + wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; +- wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER * +- hw->psm_clk_freq, bytes_per_sec); ++ wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER * ++ hw->psm_clk_freq, bytes_per_sec); + + /* Get Fraction value */ + wakeup_f = wakeup_a - wakeup_b; + + /* Round up the Fractional value via Ceil(Fractional value) */ +- if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2)) ++ if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2)) + wakeup_f += 1; + +- wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION, +- ICE_RL_PROF_MULTIPLIER); ++ wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION, ++ ICE_RL_PROF_MULTIPLIER); + wakeup |= (u16)(wakeup_int << 9); + wakeup |= (u16)(0x1ff & wakeup_f_int); + } +@@ -3882,20 +3878,20 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, + return status; + + /* Bytes per second from Kbps */ +- bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); ++ bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); + + /* encode is 6 bits but really useful are 5 bits */ + for (i = 0; i < 64; i++) { + u64 pow_result = BIT_ULL(i); + +- ts_rate = DIV_64BIT((s64)hw->psm_clk_freq, +- pow_result * ICE_RL_PROF_TS_MULTIPLIER); ++ ts_rate = DIV_S64((s64)hw->psm_clk_freq, ++ pow_result * ICE_RL_PROF_TS_MULTIPLIER); + if (ts_rate <= 0) + continue; + + /* Multiplier value */ +- mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, +- ts_rate); ++ mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, ++ ts_rate); + + /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ + mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); +@@ -4774,12 +4770,12 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, case ICE_AGG_TYPE_Q: /* The current implementation allows single queue to modify */ @@ -27913,8 +55035,24 @@ index 2620892c9e..e697c579be 100644 if (!child_node) break; node = child_node->parent; +diff --git a/dpdk/drivers/net/ice/base/ice_sched.h b/dpdk/drivers/net/ice/base/ice_sched.h +index 1441b5f191..22ed09d2b1 100644 +--- a/dpdk/drivers/net/ice/base/ice_sched.h ++++ b/dpdk/drivers/net/ice/base/ice_sched.h +@@ -35,6 +35,11 @@ + #define ICE_PSM_CLK_446MHZ_IN_HZ 446428571 + #define ICE_PSM_CLK_390MHZ_IN_HZ 390625000 + ++#define PSM_CLK_SRC_367_MHZ 0x0 ++#define PSM_CLK_SRC_416_MHZ 0x1 ++#define PSM_CLK_SRC_446_MHZ 0x2 ++#define PSM_CLK_SRC_390_MHZ 0x3 ++ + struct rl_profile_params { + u32 bw; /* in Kbps */ + u16 rl_multiplier; diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c -index 1fee790c25..c0df3a1815 100644 +index 1fee790c25..7c6a258255 100644 --- a/dpdk/drivers/net/ice/base/ice_switch.c +++ b/dpdk/drivers/net/ice/base/ice_switch.c @@ -2303,7 +2303,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, @@ -27926,7 +55064,40 @@ index 1fee790c25..c0df3a1815 100644 vlan = true; fv_word_idx++; } -@@ -6770,6 +6770,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { +@@ -4855,7 +4855,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list, + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); +- m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; ++ if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI) ++ m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; + /* update the src in case it is VSI num */ + if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) + return ICE_ERR_PARAM; +@@ -6214,6 +6215,13 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + + LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry, + list_entry) { ++ /* Avoid enabling or disabling vlan zero twice when in double ++ * vlan mode ++ */ ++ if (ice_is_dvm_ena(hw) && ++ list_itr->fltr_info.l_data.vlan.tpid == 0) ++ continue; ++ + vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; + if (rm_vlan_promisc) + status = _ice_clear_vsi_promisc(hw, vsi_handle, +@@ -6223,7 +6231,7 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, + status = _ice_set_vsi_promisc(hw, vsi_handle, + promisc_mask, vlan_id, + lport, sw); +- if (status) ++ if (status && status != ICE_ERR_ALREADY_EXISTS) + break; + } + +@@ -6770,6 +6778,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, { ICE_VLAN_EX, ICE_VLAN_OF_HW }, { ICE_VLAN_IN, ICE_VLAN_OL_HW }, @@ -27934,7 +55105,15 @@ index 1fee790c25..c0df3a1815 100644 }; /** -@@ -7488,9 +7489,10 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, +@@ -7320,7 +7329,6 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, + last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; + LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, + l_entry) { +- last_chain_entry->fv_idx[i] = entry->chain_idx; + buf[recps].content.lkup_indx[i] = entry->chain_idx; + buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF); + ice_set_bit(entry->rid, rm->r_bitmap); +@@ -7488,9 +7496,10 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, /** * ice_tun_type_match_word - determine if tun type needs a match mask * @tun_type: tunnel type @@ -27946,7 +55125,7 @@ index 1fee790c25..c0df3a1815 100644 { switch (tun_type) { case ICE_SW_TUN_VXLAN_GPE: -@@ -7506,15 +7508,23 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) +@@ -7506,15 +7515,23 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask) case ICE_SW_TUN_PPPOE_IPV4_QINQ: case ICE_SW_TUN_PPPOE_IPV6_QINQ: *mask = ICE_TUN_FLAG_MASK; @@ -27970,7 +55149,7 @@ index 1fee790c25..c0df3a1815 100644 return false; } } -@@ -7529,16 +7539,18 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo, +@@ -7529,16 +7546,18 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo, struct ice_prot_lkup_ext *lkup_exts) { u16 mask; @@ -27992,7 +55171,7 @@ index 1fee790c25..c0df3a1815 100644 lkup_exts->field_mask[word] = mask; } else { return ICE_ERR_MAX_LIMIT; -@@ -7779,6 +7791,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, +@@ -7779,6 +7798,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, bool ice_is_prof_rule(enum ice_sw_tunnel_type type) { switch (type) { @@ -28000,7 +55179,7 @@ index 1fee790c25..c0df3a1815 100644 case ICE_SW_TUN_PROFID_IPV6_ESP: case ICE_SW_TUN_PROFID_IPV6_AH: case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3: -@@ -7863,6 +7876,15 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, +@@ -7863,6 +7883,15 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, */ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap); @@ -28016,6 +55195,15 @@ index 1fee790c25..c0df3a1815 100644 status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list); if (status) goto err_unroll; +@@ -8776,7 +8805,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, + + /* A rule already exists with the new VSI being added */ + if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) +- return ICE_SUCCESS; ++ return ICE_ERR_ALREADY_EXISTS; + + /* Update the previously created VSI list set with + * the new VSI ID passed in diff --git a/dpdk/drivers/net/ice/base/ice_switch.h b/dpdk/drivers/net/ice/base/ice_switch.h index a2b3c80107..c67cd09d21 100644 --- a/dpdk/drivers/net/ice/base/ice_switch.h @@ -28029,11 +55217,144 @@ index a2b3c80107..c67cd09d21 100644 bool ignore_valid; u16 mask; bool mask_valid; +diff --git a/dpdk/drivers/net/ice/base/ice_type.h b/dpdk/drivers/net/ice/base/ice_type.h +index d81984633a..ad0d72ac15 100644 +--- a/dpdk/drivers/net/ice/base/ice_type.h ++++ b/dpdk/drivers/net/ice/base/ice_type.h +@@ -87,11 +87,37 @@ static inline bool ice_is_tc_ena(ice_bitmap_t bitmap, u8 tc) + return ice_is_bit_set(&bitmap, tc); + } + +-#define DIV_64BIT(n, d) ((n) / (d)) ++/** ++ * DIV_S64 - Divide signed 64-bit value with signed 64-bit divisor ++ * @dividend: value to divide ++ * @divisor: value to divide by ++ * ++ * Use DIV_S64 for any 64-bit divide which operates on signed 64-bit dividends. ++ * Do not use this for unsigned 64-bit dividends as it will not produce ++ * correct results if the dividend is larger than S64_MAX. ++ */ ++static inline s64 DIV_S64(s64 dividend, s64 divisor) ++{ ++ return dividend / divisor; ++} ++ ++/** ++ * DIV_U64 - Divide unsigned 64-bit value by unsigned 64-bit divisor ++ * @dividend: value to divide ++ * @divisor: value to divide by ++ * ++ * Use DIV_U64 for any 64-bit divide which operates on unsigned 64-bit ++ * dividends. Do not use this for signed 64-bit dividends as it will not ++ * handle negative values correctly. ++ */ ++static inline u64 DIV_U64(u64 dividend, u64 divisor) ++{ ++ return dividend / divisor; ++} + + static inline u64 round_up_64bit(u64 a, u32 b) + { +- return DIV_64BIT(((a) + (b) / 2), (b)); ++ return DIV_U64(((a) + (b) / 2), (b)); + } + + static inline u32 ice_round_to_num(u32 N, u32 R) +diff --git a/dpdk/drivers/net/ice/base/ice_xlt_kb.c b/dpdk/drivers/net/ice/base/ice_xlt_kb.c +index 4c1ab747cf..5efe209cad 100644 +--- a/dpdk/drivers/net/ice/base/ice_xlt_kb.c ++++ b/dpdk/drivers/net/ice/base/ice_xlt_kb.c +@@ -25,7 +25,7 @@ static void _xlt_kb_entry_dump(struct ice_hw *hw, + + /** + * ice_imem_dump - dump a xlt key build info +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + * @kb: key build to dump + */ + void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb) +@@ -154,7 +154,7 @@ static struct ice_xlt_kb *_xlt_kb_get(struct ice_hw *hw, u32 sect_type) + + /** + * ice_xlt_kb_get_sw - create switch xlt key build +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw) + { +@@ -163,7 +163,7 @@ struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw) + + /** + * ice_xlt_kb_get_acl - create acl xlt key build +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw) + { +@@ -172,7 +172,7 @@ struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw) + + /** + * ice_xlt_kb_get_fd - create fdir xlt key build +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw) + { +@@ -181,7 +181,7 @@ struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw) + + /** + * ice_xlt_kb_get_fd - create rss xlt key build +- * @ice_hw: pointer to the hardware structure ++ * @hw: pointer to the hardware structure + */ + struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw) + { diff --git a/dpdk/drivers/net/ice/ice_dcf.c b/dpdk/drivers/net/ice/ice_dcf.c -index cca1d7bf46..7f0c074b01 100644 +index cca1d7bf46..ad4bfa4b11 100644 --- a/dpdk/drivers/net/ice/ice_dcf.c +++ b/dpdk/drivers/net/ice/ice_dcf.c -@@ -864,7 +864,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) +@@ -32,6 +32,8 @@ + #define ICE_DCF_ARQ_MAX_RETRIES 200 + #define ICE_DCF_ARQ_CHECK_TIME 2 /* msecs */ + ++#define ICE_DCF_CHECK_INTERVAL 100 /* 100ms */ ++ + #define ICE_DCF_VF_RES_BUF_SZ \ + (sizeof(struct virtchnl_vf_resource) + \ + IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)) +@@ -616,6 +618,8 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) + rte_spinlock_init(&hw->vc_cmd_queue_lock); + TAILQ_INIT(&hw->vc_cmd_queue); + ++ __atomic_store_n(&hw->vsi_update_thread_num, 0, __ATOMIC_RELAXED); ++ + hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0); + if (hw->arq_buf == NULL) { + PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory"); +@@ -733,6 +737,11 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) + rte_intr_callback_unregister(intr_handle, + ice_dcf_dev_interrupt_handler, hw); + ++ /* Wait for all `ice-thread` threads to exit. */ ++ while (__atomic_load_n(&hw->vsi_update_thread_num, ++ __ATOMIC_ACQUIRE) != 0) ++ rte_delay_ms(ICE_DCF_CHECK_INTERVAL); ++ + ice_dcf_mode_disable(hw); + iavf_shutdown_adminq(&hw->avf); + +@@ -827,7 +836,8 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) + { + struct rte_eth_dev *dev = hw->eth_dev; + struct rte_eth_rss_conf *rss_conf; +- uint8_t i, j, nb_q; ++ uint8_t j, nb_q; ++ size_t i; + int ret; + + rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; +@@ -864,7 +874,7 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) j = 0; hw->rss_lut[i] = j; } @@ -28042,11 +55363,34 @@ index cca1d7bf46..7f0c074b01 100644 ret = ice_dcf_configure_rss_lut(hw); if (ret) return ret; +diff --git a/dpdk/drivers/net/ice/ice_dcf.h b/dpdk/drivers/net/ice/ice_dcf.h +index 6ec766ebda..84b8232b2a 100644 +--- a/dpdk/drivers/net/ice/ice_dcf.h ++++ b/dpdk/drivers/net/ice/ice_dcf.h +@@ -83,6 +83,8 @@ struct ice_dcf_hw { + void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw, + uint8_t *msg, uint16_t msglen); + ++ int vsi_update_thread_num; ++ + uint8_t *arq_buf; + + uint16_t num_vfs; diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.c b/dpdk/drivers/net/ice/ice_dcf_ethdev.c -index 28f7f7fb72..6e9e80c1df 100644 +index 28f7f7fb72..d2ea5aff43 100644 --- a/dpdk/drivers/net/ice/ice_dcf_ethdev.c +++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.c -@@ -203,7 +203,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, +@@ -65,7 +65,8 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) + + buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_hdr_len = 0; +- rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE); + max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, + dev->data->mtu + ICE_ETH_OVERHEAD); + +@@ -203,7 +204,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, "vector %u are mapping to all Rx queues", hw->msix_base); } else { @@ -28055,7 +55399,23 @@ index 28f7f7fb72..6e9e80c1df 100644 * multi interrupts, then the vec is from 1 */ hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors, -@@ -664,6 +664,8 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, +@@ -610,7 +611,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) + struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; + struct ice_adapter *ad = &dcf_ad->parent; +- struct ice_dcf_hw *hw = &dcf_ad->real_hw; + + if (ad->pf.adapter_stopped == 1) { + PMD_DRV_LOG(DEBUG, "Port is already stopped"); +@@ -628,7 +628,6 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev) + ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false); + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + ad->pf.adapter_stopped = 1; +- hw->tm_conf.committed = false; + + return 0; + } +@@ -664,6 +663,8 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, dev_info->reta_size = hw->vf_res->rss_lut_size; dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL; dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; @@ -28064,7 +55424,7 @@ index 28f7f7fb72..6e9e80c1df 100644 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP | -@@ -681,6 +683,7 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, +@@ -681,6 +682,7 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | @@ -28072,7 +55432,47 @@ index 28f7f7fb72..6e9e80c1df 100644 RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | -@@ -956,6 +959,13 @@ ice_dcf_link_update(struct rte_eth_dev *dev, +@@ -867,6 +869,26 @@ ice_dcf_free_repr_info(struct ice_dcf_adapter *dcf_adapter) + } + } + ++int ++ice_dcf_handle_vf_repr_close(struct ice_dcf_adapter *dcf_adapter, ++ uint16_t vf_id) ++{ ++ struct ice_dcf_repr_info *vf_rep_info; ++ ++ if (dcf_adapter->num_reprs >= vf_id) { ++ PMD_DRV_LOG(ERR, "Invalid VF id: %d", vf_id); ++ return -1; ++ } ++ ++ if (!dcf_adapter->repr_infos) ++ return 0; ++ ++ vf_rep_info = &dcf_adapter->repr_infos[vf_id]; ++ vf_rep_info->vf_rep_eth_dev = NULL; ++ ++ return 0; ++} ++ + static int + ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter) + { +@@ -890,11 +912,10 @@ ice_dcf_dev_close(struct rte_eth_dev *dev) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + ++ ice_dcf_vf_repr_notify_all(adapter, false); + (void)ice_dcf_dev_stop(dev); + + ice_free_queues(dev); +- +- ice_dcf_free_repr_info(adapter); + ice_dcf_uninit_parent_adapter(dev); + ice_dcf_uninit_hw(dev, &adapter->real_hw); + +@@ -956,6 +977,13 @@ ice_dcf_link_update(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &new_link); } @@ -28086,7 +55486,16 @@ index 28f7f7fb72..6e9e80c1df 100644 /* Add UDP tunneling port */ static int ice_dcf_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, -@@ -1105,6 +1115,7 @@ static int +@@ -1064,7 +1092,7 @@ ice_dcf_dev_reset(struct rte_eth_dev *dev) + ice_dcf_reset_hw(dev, hw); + } + +- ret = ice_dcf_dev_uninit(dev); ++ ret = ice_dcf_dev_close(dev); + if (ret) + return ret; + +@@ -1105,6 +1133,7 @@ static int ice_dcf_dev_init(struct rte_eth_dev *eth_dev) { struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; @@ -28094,7 +55503,7 @@ index 28f7f7fb72..6e9e80c1df 100644 eth_dev->dev_ops = &ice_dcf_eth_dev_ops; eth_dev->rx_pkt_burst = ice_dcf_recv_pkts; -@@ -1116,9 +1127,13 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) +@@ -1116,21 +1145,32 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev) adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg; if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) { PMD_INIT_LOG(ERR, "Failed to init DCF hardware"); @@ -28108,22 +55517,51 @@ index 28f7f7fb72..6e9e80c1df 100644 if (ice_dcf_init_parent_adapter(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Failed to init DCF parent adapter"); ice_dcf_uninit_hw(eth_dev, &adapter->real_hw); + return -1; + } + ++ ice_dcf_stats_reset(eth_dev); ++ ++ ice_dcf_vf_repr_notify_all(adapter, true); ++ + return 0; + } + + static int + ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev) + { ++ struct ice_dcf_adapter *adapter = eth_dev->data->dev_private; ++ ++ ice_dcf_free_repr_info(adapter); + ice_dcf_dev_close(eth_dev); + + return 0; diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.h b/dpdk/drivers/net/ice/ice_dcf_ethdev.h -index 8510e37119..11a1305038 100644 +index 8510e37119..b651d31d37 100644 --- a/dpdk/drivers/net/ice/ice_dcf_ethdev.h +++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.h -@@ -64,5 +64,6 @@ int ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param); +@@ -50,6 +50,7 @@ struct ice_dcf_vf_repr { + struct rte_ether_addr mac_addr; + uint16_t switch_domain_id; + uint16_t vf_id; ++ bool dcf_valid; + + struct ice_dcf_vlan outer_vlan_info; /* DCF always handle outer VLAN */ + }; +@@ -64,5 +65,8 @@ int ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param); int ice_dcf_vf_repr_uninit(struct rte_eth_dev *vf_rep_eth_dev); int ice_dcf_vf_repr_init_vlan(struct rte_eth_dev *vf_rep_eth_dev); void ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter); ++void ice_dcf_vf_repr_notify_all(struct ice_dcf_adapter *dcf_adapter, bool valid); ++int ice_dcf_handle_vf_repr_close(struct ice_dcf_adapter *dcf_adapter, uint16_t vf_id); +bool ice_dcf_adminq_need_retry(struct ice_adapter *ad); #endif /* _ICE_DCF_ETHDEV_H_ */ diff --git a/dpdk/drivers/net/ice/ice_dcf_parent.c b/dpdk/drivers/net/ice/ice_dcf_parent.c -index 1ff2c47172..2f96dedcce 100644 +index 1ff2c47172..ec01f28a51 100644 --- a/dpdk/drivers/net/ice/ice_dcf_parent.c +++ b/dpdk/drivers/net/ice/ice_dcf_parent.c -@@ -119,7 +119,9 @@ ice_dcf_vsi_update_service_handler(void *param) +@@ -119,7 +119,12 @@ ice_dcf_vsi_update_service_handler(void *param) { struct ice_dcf_reset_event_param *reset_param = param; struct ice_dcf_hw *hw = reset_param->dcf_hw; @@ -28131,10 +55569,13 @@ index 1ff2c47172..2f96dedcce 100644 + struct ice_dcf_adapter *adapter = + container_of(hw, struct ice_dcf_adapter, real_hw); + struct ice_adapter *parent_adapter = &adapter->parent; ++ ++ __atomic_fetch_add(&hw->vsi_update_thread_num, 1, ++ __ATOMIC_RELAXED); pthread_detach(pthread_self()); -@@ -127,11 +129,12 @@ ice_dcf_vsi_update_service_handler(void *param) +@@ -127,11 +132,12 @@ ice_dcf_vsi_update_service_handler(void *param) rte_spinlock_lock(&vsi_update_lock); @@ -28150,7 +55591,17 @@ index 1ff2c47172..2f96dedcce 100644 if (reset_param->vfr && adapter->repr_infos) { struct rte_eth_dev *vf_rep_eth_dev = -@@ -224,6 +227,9 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, +@@ -150,6 +156,9 @@ ice_dcf_vsi_update_service_handler(void *param) + + free(param); + ++ __atomic_fetch_sub(&hw->vsi_update_thread_num, 1, ++ __ATOMIC_RELEASE); ++ + return NULL; + } + +@@ -224,6 +233,9 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, uint8_t *msg, uint16_t msglen) { struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg; @@ -28160,7 +55611,7 @@ index 1ff2c47172..2f96dedcce 100644 if (msglen < sizeof(struct virtchnl_pf_event)) { PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen); -@@ -258,6 +264,8 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, +@@ -258,6 +270,8 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u", pf_msg->event_data.vf_vsi_map.vf_id, pf_msg->event_data.vf_vsi_map.vsi_id); @@ -28169,7 +55620,7 @@ index 1ff2c47172..2f96dedcce 100644 start_vsi_reset_thread(dcf_hw, true, pf_msg->event_data.vf_vsi_map.vf_id); break; -@@ -332,7 +340,7 @@ ice_dcf_init_parent_hw(struct ice_hw *hw) +@@ -332,7 +346,7 @@ ice_dcf_init_parent_hw(struct ice_hw *hw) goto err_unroll_alloc; /* Initialize port_info struct with link information */ @@ -28178,8 +55629,138 @@ index 1ff2c47172..2f96dedcce 100644 if (status) goto err_unroll_alloc; +diff --git a/dpdk/drivers/net/ice/ice_dcf_sched.c b/dpdk/drivers/net/ice/ice_dcf_sched.c +index a231c1e60b..b08bc5f1de 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_sched.c ++++ b/dpdk/drivers/net/ice/ice_dcf_sched.c +@@ -237,6 +237,7 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, + enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; + struct ice_dcf_tm_shaper_profile *shaper_profile = NULL; + struct ice_dcf_adapter *adapter = dev->data->dev_private; ++ struct ice_adapter *ad = &adapter->parent; + struct ice_dcf_hw *hw = &adapter->real_hw; + struct ice_dcf_tm_node *parent_node; + struct ice_dcf_tm_node *tm_node; +@@ -246,10 +247,10 @@ ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id, + if (!params || !error) + return -EINVAL; + +- /* if already committed */ +- if (hw->tm_conf.committed) { ++ /* if port is running */ ++ if (!ad->pf.adapter_stopped) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; +- error->message = "already committed"; ++ error->message = "port is running"; + return -EINVAL; + } + +@@ -400,16 +401,17 @@ ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id, + { + enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX; + struct ice_dcf_adapter *adapter = dev->data->dev_private; ++ struct ice_adapter *ad = &adapter->parent; + struct ice_dcf_hw *hw = &adapter->real_hw; + struct ice_dcf_tm_node *tm_node; + + if (!error) + return -EINVAL; + +- /* if already committed */ +- if (hw->tm_conf.committed) { ++ /* if port is running */ ++ if (!ad->pf.adapter_stopped) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; +- error->message = "already committed"; ++ error->message = "port is running"; + return -EINVAL; + } + +diff --git a/dpdk/drivers/net/ice/ice_dcf_vf_representor.c b/dpdk/drivers/net/ice/ice_dcf_vf_representor.c +index b9fcfc80ad..af281f069a 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_vf_representor.c ++++ b/dpdk/drivers/net/ice/ice_dcf_vf_representor.c +@@ -50,9 +50,28 @@ ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev) + return 0; + } + ++static void ++ice_dcf_vf_repr_notify_one(struct rte_eth_dev *dev, bool valid) ++{ ++ struct ice_dcf_vf_repr *repr = dev->data->dev_private; ++ ++ repr->dcf_valid = valid; ++} ++ + static int + ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev) + { ++ struct ice_dcf_vf_repr *repr = dev->data->dev_private; ++ struct ice_dcf_adapter *dcf_adapter; ++ int err; ++ ++ if (repr->dcf_valid) { ++ dcf_adapter = repr->dcf_eth_dev->data->dev_private; ++ err = ice_dcf_handle_vf_repr_close(dcf_adapter, repr->vf_id); ++ if (err) ++ PMD_DRV_LOG(ERR, "VF representor invalid"); ++ } ++ + return ice_dcf_vf_repr_uninit(dev); + } + +@@ -111,14 +130,15 @@ ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev, + static __rte_always_inline struct ice_dcf_hw * + ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr) + { +- struct ice_dcf_adapter *dcf_adapter = +- repr->dcf_eth_dev->data->dev_private; ++ struct ice_dcf_adapter *dcf_adapter; + +- if (!dcf_adapter) { ++ if (!repr->dcf_valid) { + PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n"); + return NULL; + } + ++ dcf_adapter = repr->dcf_eth_dev->data->dev_private; ++ + return &dcf_adapter->real_hw; + } + +@@ -414,6 +434,7 @@ ice_dcf_vf_repr_init(struct rte_eth_dev *vf_rep_eth_dev, void *init_param) + repr->dcf_eth_dev = param->dcf_eth_dev; + repr->switch_domain_id = param->switch_domain_id; + repr->vf_id = param->vf_id; ++ repr->dcf_valid = true; + repr->outer_vlan_info.port_vlan_ena = false; + repr->outer_vlan_info.stripping_ena = false; + repr->outer_vlan_info.tpid = RTE_ETHER_TYPE_VLAN; +@@ -488,3 +509,22 @@ ice_dcf_vf_repr_stop_all(struct ice_dcf_adapter *dcf_adapter) + vf_rep_eth_dev->data->dev_started = 0; + } + } ++ ++void ++ice_dcf_vf_repr_notify_all(struct ice_dcf_adapter *dcf_adapter, bool valid) ++{ ++ uint16_t vf_id; ++ struct rte_eth_dev *vf_rep_eth_dev; ++ ++ if (!dcf_adapter->repr_infos) ++ return; ++ ++ for (vf_id = 0; vf_id < dcf_adapter->real_hw.num_vfs; vf_id++) { ++ vf_rep_eth_dev = dcf_adapter->repr_infos[vf_id].vf_rep_eth_dev; ++ ++ if (!vf_rep_eth_dev) ++ continue; ++ ++ ice_dcf_vf_repr_notify_one(vf_rep_eth_dev, valid); ++ } ++} diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c -index 13a7a9702a..7df1b4ec19 100644 +index 13a7a9702a..5e84894e5f 100644 --- a/dpdk/drivers/net/ice/ice_ethdev.c +++ b/dpdk/drivers/net/ice/ice_ethdev.c @@ -1264,7 +1264,7 @@ ice_handle_aq_msg(struct rte_eth_dev *dev) @@ -28200,17 +55781,98 @@ index 13a7a9702a..7df1b4ec19 100644 * Currently vsi->nb_qps means it. * Correct it if any change. */ -@@ -3235,7 +3235,8 @@ static int ice_init_rss(struct ice_pf *pf) +@@ -2316,6 +2316,9 @@ ice_dev_init(struct rte_eth_dev *dev) + + pf->supported_rxdid = ice_get_supported_rxdid(hw); + ++ /* reset all stats of the device, including pf and main vsi */ ++ ice_stats_reset(dev); ++ + return 0; + + err_flow_init: +@@ -2456,12 +2459,17 @@ ice_dev_close(struct rte_eth_dev *dev) + return 0; + + /* Since stop will make link down, then the link event will be +- * triggered, disable the irq firstly to avoid the port_infoe etc +- * resources deallocation causing the interrupt service thread +- * crash. ++ * triggered, disable the irq firstly. + */ + ice_pf_disable_irq0(hw); + ++ /* Unregister callback func from eal lib, use sync version to ++ * make sure all active interrupt callbacks is done, then it's ++ * safe to free all resources. ++ */ ++ rte_intr_callback_unregister_sync(intr_handle, ++ ice_interrupt_handler, dev); ++ + ret = ice_dev_stop(dev); + + if (!ad->is_safe_mode) +@@ -2493,10 +2501,6 @@ ice_dev_close(struct rte_eth_dev *dev) + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + +- /* unregister callback func from eal lib */ +- rte_intr_callback_unregister(intr_handle, +- ice_interrupt_handler, dev); +- + return ret; + } + +@@ -3195,7 +3199,8 @@ static int ice_init_rss(struct ice_pf *pf) + + rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf; + nb_q = dev_data->nb_rx_queues; +- vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE; ++ vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + ++ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE; + vsi->rss_lut_size = pf->hash_lut_size; + + if (nb_q == 0) { +@@ -3235,7 +3240,11 @@ static int ice_init_rss(struct ice_pf *pf) RTE_MIN(rss_conf->rss_key_len, vsi->rss_key_size)); - rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size); + rte_memcpy(key.standard_rss_key, vsi->rss_key, -+ RTE_MIN(sizeof(key.standard_rss_key), vsi->rss_key_size)); ++ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); ++ rte_memcpy(key.extended_hash_key, ++ &vsi->rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE], ++ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE); ret = ice_aq_set_rss_key(hw, vsi->idx, &key); if (ret) goto out; -@@ -3576,7 +3577,7 @@ ice_dev_start(struct rte_eth_dev *dev) +@@ -3486,6 +3495,8 @@ ice_get_init_link_status(struct rte_eth_dev *dev) + + if (link_status.link_info & ICE_AQ_LINK_UP) + pf->init_link_up = true; ++ else ++ pf->init_link_up = false; + } + + static int +@@ -3556,6 +3567,16 @@ ice_dev_start(struct rte_eth_dev *dev) + } + } + ++ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { ++ /* Register mbuf field and flag for Rx timestamp */ ++ ret = rte_mbuf_dyn_rx_timestamp_register(&ice_timestamp_dynfield_offset, ++ &ice_timestamp_dynflag); ++ if (ret) { ++ PMD_DRV_LOG(ERR, "Cannot register mbuf field/flag for timestamp"); ++ goto tx_err; ++ } ++ } ++ + /* program Rx queues' context in hardware*/ + for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { + ret = ice_rx_queue_start(dev, nb_rxq); +@@ -3576,7 +3597,7 @@ ice_dev_start(struct rte_eth_dev *dev) goto rx_err; } @@ -28219,7 +55881,7 @@ index 13a7a9702a..7df1b4ec19 100644 if (ice_rxq_intr_setup(dev)) return -EIO; -@@ -3603,8 +3604,8 @@ ice_dev_start(struct rte_eth_dev *dev) +@@ -3603,8 +3624,8 @@ ice_dev_start(struct rte_eth_dev *dev) ice_dev_set_link_up(dev); @@ -28230,7 +55892,39 @@ index 13a7a9702a..7df1b4ec19 100644 pf->adapter_stopped = false; -@@ -5395,7 +5396,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, +@@ -3751,6 +3772,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + .nb_max = ICE_MAX_RING_DESC, + .nb_min = ICE_MIN_RING_DESC, + .nb_align = ICE_ALIGN_RING_DESC, ++ .nb_mtu_seg_max = ICE_TX_MTU_SEG_MAX, ++ .nb_seg_max = ICE_MAX_RING_DESC, + }; + + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | +@@ -3816,8 +3839,8 @@ ice_atomic_write_link_status(struct rte_eth_dev *dev, + static int + ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) + { +-#define CHECK_INTERVAL 100 /* 100ms */ +-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ ++#define CHECK_INTERVAL 50 /* 50ms */ ++#define MAX_REPEAT_TIME 40 /* 2s (40 * 50ms) in total */ + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_link_status link_status; + struct rte_eth_link link, old; +@@ -4622,10 +4645,8 @@ ice_rss_hash_update(struct rte_eth_dev *dev, + if (status) + return status; + +- if (rss_conf->rss_hf == 0) { ++ if (rss_conf->rss_hf == 0) + pf->rss_hf = 0; +- return 0; +- } + + /* RSS hash configuration */ + ice_rss_hash_set(pf, rss_conf->rss_hf); +@@ -5395,7 +5416,7 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, count++; } @@ -28239,7 +55933,7 @@ index 13a7a9702a..7df1b4ec19 100644 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { xstats[count].value = *(uint64_t *)((char *)hw_stats + -@@ -5426,7 +5427,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, +@@ -5426,7 +5447,7 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev, count++; } @@ -28248,7 +55942,7 @@ index 13a7a9702a..7df1b4ec19 100644 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) { strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name, sizeof(xstats_names[count].name)); -@@ -5454,6 +5455,8 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, +@@ -5454,6 +5475,8 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, { int ret = 0; struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -28257,7 +55951,7 @@ index 13a7a9702a..7df1b4ec19 100644 if (udp_tunnel == NULL) return -EINVAL; -@@ -5461,6 +5464,9 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, +@@ -5461,6 +5484,9 @@ ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, switch (udp_tunnel->prot_type) { case RTE_ETH_TUNNEL_TYPE_VXLAN: ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port); @@ -28267,7 +55961,7 @@ index 13a7a9702a..7df1b4ec19 100644 break; default: PMD_DRV_LOG(ERR, "Invalid tunnel type"); -@@ -5478,6 +5484,8 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, +@@ -5478,6 +5504,8 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, { int ret = 0; struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -28276,7 +55970,7 @@ index 13a7a9702a..7df1b4ec19 100644 if (udp_tunnel == NULL) return -EINVAL; -@@ -5485,6 +5493,9 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, +@@ -5485,6 +5513,9 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, switch (udp_tunnel->prot_type) { case RTE_ETH_TUNNEL_TYPE_VXLAN: ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0); @@ -28301,7 +55995,7 @@ index 2e3e45f3d7..ac56c3cc60 100644 bool rx_use_avx2; bool rx_use_avx512; diff --git a/dpdk/drivers/net/ice/ice_fdir_filter.c b/dpdk/drivers/net/ice/ice_fdir_filter.c -index 13a2ac42df..72c8bd8f02 100644 +index 13a2ac42df..ac03e59fb8 100644 --- a/dpdk/drivers/net/ice/ice_fdir_filter.c +++ b/dpdk/drivers/net/ice/ice_fdir_filter.c @@ -1828,7 +1828,6 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, @@ -28322,7 +56016,7 @@ index 13a2ac42df..72c8bd8f02 100644 raw_spec = item->spec; raw_mask = item->mask; -@@ -1870,11 +1872,11 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, +@@ -1870,13 +1872,13 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, break; /* convert raw spec & mask from byte string to int */ @@ -28335,8 +56029,11 @@ index 13a2ac42df..72c8bd8f02 100644 - uint16_t udp_port = 0; + uint8_t *tmp_spec, *tmp_mask; uint16_t tmp_val = 0; - uint8_t pkt_len = 0; +- uint8_t pkt_len = 0; ++ uint16_t pkt_len = 0; uint8_t tmp = 0; + int i, j; + @@ -1885,8 +1887,18 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, pkt_len) return -rte_errno; @@ -28471,20 +56168,27 @@ index 13a2ac42df..72c8bd8f02 100644 *input_set |= ICE_INSET_SCTP_SRC_PORT; if (sctp_mask->hdr.dst_port == UINT16_MAX) diff --git a/dpdk/drivers/net/ice/ice_generic_flow.c b/dpdk/drivers/net/ice/ice_generic_flow.c -index c673feb7a6..f9be3a5c94 100644 +index c673feb7a6..4bd0dce0ac 100644 --- a/dpdk/drivers/net/ice/ice_generic_flow.c +++ b/dpdk/drivers/net/ice/ice_generic_flow.c -@@ -1826,6 +1826,9 @@ ice_flow_init(struct ice_adapter *ad) +@@ -1826,6 +1826,16 @@ ice_flow_init(struct ice_adapter *ad) TAILQ_INIT(&pf->dist_parser_list); rte_spinlock_init(&pf->flow_ops_lock); + if (ice_parser_create(&ad->hw, &ad->psr) != ICE_SUCCESS) + PMD_INIT_LOG(WARNING, "Failed to initialize DDP parser, raw packet filter will not be supported"); + ++ if (ad->psr) { ++ if (ice_is_dvm_ena(&ad->hw)) ++ ice_parser_dvm_set(ad->psr, true); ++ else ++ ice_parser_dvm_set(ad->psr, false); ++ } ++ RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { if (engine->init == NULL) { PMD_INIT_LOG(ERR, "Invalid engine type (%d)", -@@ -1880,6 +1883,11 @@ ice_flow_uninit(struct ice_adapter *ad) +@@ -1880,6 +1890,11 @@ ice_flow_uninit(struct ice_adapter *ad) TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); rte_free(p_parser); } @@ -28496,7 +56200,22 @@ index c673feb7a6..f9be3a5c94 100644 } static struct ice_parser_list * -@@ -2515,7 +2523,9 @@ ice_flow_flush(struct rte_eth_dev *dev, +@@ -2005,6 +2020,14 @@ ice_flow_valid_attr(struct ice_adapter *ad, + return -rte_errno; + } + ++ /* Not supported */ ++ if (attr->transfer) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, ++ attr, "Not support transfer."); ++ return -rte_errno; ++ } ++ + /* Check pipeline mode support to set classification stage */ + if (ad->devargs.pipe_mode_support) { + if (attr->priority == 0) +@@ -2515,7 +2538,9 @@ ice_flow_flush(struct rte_eth_dev *dev, ret = ice_flow_destroy(dev, p_flow, error); if (ret) { PMD_DRV_LOG(ERR, "Failed to flush flows"); @@ -28508,7 +56227,7 @@ index c673feb7a6..f9be3a5c94 100644 } diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c -index afbb357fa3..f35727856e 100644 +index afbb357fa3..52646e9408 100644 --- a/dpdk/drivers/net/ice/ice_hash.c +++ b/dpdk/drivers/net/ice/ice_hash.c @@ -653,13 +653,15 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, @@ -28516,8 +56235,9 @@ index afbb357fa3..f35727856e 100644 struct ice_parser_profile prof; struct ice_parser_result rslt; - struct ice_parser *psr; ++ uint16_t spec_len, pkt_len; uint8_t *pkt_buf, *msk_buf; - uint8_t spec_len, pkt_len; +- uint8_t spec_len, pkt_len; uint8_t tmp_val = 0; uint8_t tmp_c = 0; int i, j; @@ -28542,7 +56262,7 @@ index afbb357fa3..f35727856e 100644 if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf, pkt_len, ICE_BLK_RSS, true, &prof)) diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c -index f6d8564ab8..71e5c6f5d6 100644 +index f6d8564ab8..7578bac03e 100644 --- a/dpdk/drivers/net/ice/ice_rxtx.c +++ b/dpdk/drivers/net/ice/ice_rxtx.c @@ -163,6 +163,8 @@ ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq, @@ -28563,7 +56283,43 @@ index f6d8564ab8..71e5c6f5d6 100644 #endif } -@@ -1118,7 +1122,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, +@@ -279,7 +283,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + rxq->rx_hdr_len = 0; +- rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); ++ rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE); + rxq->max_pkt_len = + RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, + frame_size); +@@ -293,7 +298,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) + return -EINVAL; + } + +- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { ++ if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + /* Register mbuf field and flag for Rx timestamp */ + err = rte_mbuf_dyn_rx_timestamp_register( + &ice_timestamp_dynfield_offset, +@@ -303,6 +308,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) + "Cannot register mbuf field/flag for timestamp"); + return -EINVAL; + } ++ rxq->ts_enable = true; + } + + memset(&rx_ctx, 0, sizeof(rx_ctx)); +@@ -588,6 +594,8 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) + return -EINVAL; + } + ++ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ++ rxq->ts_enable = true; + err = ice_program_hw_rx_queue(rxq); + if (err) { + PMD_DRV_LOG(ERR, "fail to program RX queue %u", +@@ -1118,7 +1126,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, rxq->proto_xtr = pf->proto_xtr != NULL ? pf->proto_xtr[queue_idx] : PROTO_XTR_NONE; @@ -28572,7 +56328,17 @@ index f6d8564ab8..71e5c6f5d6 100644 len = ICE_MAX_RING_DESC; /** -@@ -1248,7 +1252,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, +@@ -1193,7 +1201,8 @@ ice_rx_queue_release(void *rxq) + return; + } + +- q->rx_rel_mbufs(q); ++ if (q->rx_rel_mbufs != NULL) ++ q->rx_rel_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(q); +@@ -1248,7 +1257,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev, tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : ICE_DEFAULT_TX_FREE_THRESH); @@ -28581,7 +56347,17 @@ index f6d8564ab8..71e5c6f5d6 100644 tx_rs_thresh = (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ? nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH; -@@ -1554,6 +1558,9 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) +@@ -1403,7 +1412,8 @@ ice_tx_queue_release(void *txq) + return; + } + +- q->tx_rel_mbufs(q); ++ if (q->tx_rel_mbufs != NULL) ++ q->tx_rel_mbufs(q); + rte_free(q->sw_ring); + rte_memzone_free(q->mz); + rte_free(q); +@@ -1554,6 +1564,9 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) #if (ICE_LOOK_AHEAD != 8) #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n" #endif @@ -28591,7 +56367,7 @@ index f6d8564ab8..71e5c6f5d6 100644 static inline int ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) { -@@ -1567,9 +1574,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) +@@ -1567,9 +1580,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) uint64_t pkt_flags = 0; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC @@ -28603,7 +56379,7 @@ index f6d8564ab8..71e5c6f5d6 100644 struct ice_adapter *ad = rxq->vsi->adapter; #endif rxdp = &rxq->rx_ring[rxq->rx_tail]; -@@ -1581,8 +1589,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) +@@ -1581,8 +1595,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) return 0; @@ -28620,14 +56396,17 @@ index f6d8564ab8..71e5c6f5d6 100644 /** * Scan LOOK_AHEAD descriptors at a time to determine which -@@ -1618,14 +1632,26 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) +@@ -1617,15 +1637,28 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) + ice_rxd_to_vlan_tci(mb, &rxdp[j]); rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC - if (ice_timestamp_dynflag > 0) { +- if (ice_timestamp_dynflag > 0) { - ts_ns = ice_tstamp_convert_32b_64b(hw, ad, - rxq->hw_register_set, - rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high)); - rxq->hw_register_set = 0; ++ if (ice_timestamp_dynflag > 0 && ++ (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + rxq->time_high = + rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { @@ -28654,7 +56433,7 @@ index f6d8564ab8..71e5c6f5d6 100644 } if (ad->ptp_ena && ((mb->packet_type & -@@ -1634,6 +1660,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) +@@ -1634,6 +1667,10 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); mb->timesync = rxq->queue_id; pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; @@ -28665,7 +56444,7 @@ index f6d8564ab8..71e5c6f5d6 100644 } #endif mb->ol_flags |= pkt_flags; -@@ -1714,7 +1744,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) +@@ -1714,7 +1751,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) rxdp[i].read.pkt_addr = dma_addr; } @@ -28674,7 +56453,7 @@ index f6d8564ab8..71e5c6f5d6 100644 ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); rxq->rx_free_trigger = -@@ -1820,14 +1850,19 @@ ice_recv_scattered_pkts(void *rx_queue, +@@ -1820,14 +1857,19 @@ ice_recv_scattered_pkts(void *rx_queue, uint64_t pkt_flags; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC @@ -28698,18 +56477,28 @@ index f6d8564ab8..71e5c6f5d6 100644 while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; -@@ -1940,14 +1975,25 @@ ice_recv_scattered_pkts(void *rx_queue, +@@ -1929,6 +1971,10 @@ ice_recv_scattered_pkts(void *rx_queue, + } else + rxm->data_len = (uint16_t)(rx_packet_len - + RTE_ETHER_CRC_LEN); ++ } else if (rx_packet_len == 0) { ++ rte_pktmbuf_free_seg(rxm); ++ first_seg->nb_segs--; ++ last_seg->next = NULL; + } + + first_seg->port = rxq->port_id; +@@ -1939,15 +1985,27 @@ ice_recv_scattered_pkts(void *rx_queue, + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC - if (ice_timestamp_dynflag > 0) { +- if (ice_timestamp_dynflag > 0) { - ts_ns = ice_tstamp_convert_32b_64b(hw, ad, - rxq->hw_register_set, - rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high)); - rxq->hw_register_set = 0; -- *RTE_MBUF_DYNFIELD(first_seg, -- ice_timestamp_dynfield_offset, -- rte_mbuf_timestamp_t *) = ts_ns; -- first_seg->ol_flags |= ice_timestamp_dynflag; ++ if (ice_timestamp_dynflag > 0 && ++ (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { @@ -28725,14 +56514,17 @@ index f6d8564ab8..71e5c6f5d6 100644 + } + rxq->hw_time_update = rte_get_timer_cycles() / + (rte_get_timer_hz() / 1000); -+ *RTE_MBUF_DYNFIELD(rxm, + *RTE_MBUF_DYNFIELD(first_seg, +- ice_timestamp_dynfield_offset, +- rte_mbuf_timestamp_t *) = ts_ns; +- first_seg->ol_flags |= ice_timestamp_dynflag; + (ice_timestamp_dynfield_offset), + rte_mbuf_timestamp_t *) = ts_ns; + pkt_flags |= ice_timestamp_dynflag; } if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK) -@@ -1976,7 +2022,7 @@ ice_recv_scattered_pkts(void *rx_queue, +@@ -1976,7 +2034,7 @@ ice_recv_scattered_pkts(void *rx_queue, * threshold of the queue, advance the Receive Descriptor Tail (RDT) * register. Update the RDT with the value of the last processed RX * descriptor minus 1, to guarantee that the RDT register is never @@ -28741,7 +56533,7 @@ index f6d8564ab8..71e5c6f5d6 100644 * from the hardware point of view. */ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); -@@ -2314,14 +2360,19 @@ ice_recv_pkts(void *rx_queue, +@@ -2314,14 +2372,19 @@ ice_recv_pkts(void *rx_queue, uint64_t pkt_flags; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC @@ -28765,14 +56557,17 @@ index f6d8564ab8..71e5c6f5d6 100644 while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; -@@ -2375,14 +2426,25 @@ ice_recv_pkts(void *rx_queue, +@@ -2374,15 +2437,27 @@ ice_recv_pkts(void *rx_queue, + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC - if (ice_timestamp_dynflag > 0) { +- if (ice_timestamp_dynflag > 0) { - ts_ns = ice_tstamp_convert_32b_64b(hw, ad, - rxq->hw_register_set, - rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high)); - rxq->hw_register_set = 0; ++ if (ice_timestamp_dynflag > 0 && ++ (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { + rxq->time_high = + rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); + if (unlikely(is_tsinit)) { @@ -28798,7 +56593,7 @@ index f6d8564ab8..71e5c6f5d6 100644 } if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) == -@@ -2397,6 +2459,7 @@ ice_recv_pkts(void *rx_queue, +@@ -2397,6 +2472,7 @@ ice_recv_pkts(void *rx_queue, /* copy old mbuf to rx_pkts */ rx_pkts[nb_rx++] = rxm; } @@ -28806,7 +56601,37 @@ index f6d8564ab8..71e5c6f5d6 100644 rxq->rx_tail = rx_id; /** * If the number of free RX descriptors is greater than the RX free -@@ -2493,15 +2556,15 @@ ice_txd_enable_checksum(uint64_t ol_flags, +@@ -2441,6 +2517,7 @@ ice_parse_tunneling_params(uint64_t ol_flags, + /* for non UDP / GRE tunneling, set to 00b */ + break; + case RTE_MBUF_F_TX_TUNNEL_VXLAN: ++ case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: + case RTE_MBUF_F_TX_TUNNEL_GTP: + case RTE_MBUF_F_TX_TUNNEL_GENEVE: + *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; +@@ -2471,7 +2548,8 @@ ice_parse_tunneling_params(uint64_t ol_flags, + * Shall be set only if L4TUNT = 01b and EIPT is not zero + */ + if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && +- (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING)) ++ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) + *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; + } + +@@ -2482,10 +2560,7 @@ ice_txd_enable_checksum(uint64_t ol_flags, + union ice_tx_offload tx_offload) + { + /* Set MACLEN */ +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) +- *td_offset |= (tx_offload.outer_l2_len >> 1) +- << ICE_TX_DESC_LEN_MACLEN_S; +- else ++ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) + *td_offset |= (tx_offload.l2_len >> 1) + << ICE_TX_DESC_LEN_MACLEN_S; + +@@ -2493,15 +2568,15 @@ ice_txd_enable_checksum(uint64_t ol_flags, if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; *td_offset |= (tx_offload.l3_len >> 2) << @@ -28825,7 +56650,21 @@ index f6d8564ab8..71e5c6f5d6 100644 } if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { -@@ -3117,7 +3180,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq, +@@ -2746,9 +2821,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + + /* Fill in tunneling parameters if necessary */ + cd_tunneling_params = 0; +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ++ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { ++ td_offset |= (tx_offload.outer_l2_len >> 1) ++ << ICE_TX_DESC_LEN_MACLEN_S; + ice_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); ++ } + + /* Enable checksum offloading */ + if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) +@@ -3117,7 +3195,7 @@ tx_xmit_pkts(struct ice_tx_queue *txq, ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); @@ -28834,7 +56673,82 @@ index f6d8564ab8..71e5c6f5d6 100644 if (txq->tx_tail > txq->tx_next_rs) { txr[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << -@@ -3541,8 +3604,9 @@ static const struct { +@@ -3389,8 +3467,24 @@ ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq) + #define ICE_MIN_TSO_MSS 64 + #define ICE_MAX_TSO_MSS 9728 + #define ICE_MAX_TSO_FRAME_SIZE 262144 ++ ++/*Check for empty mbuf*/ ++static inline uint16_t ++ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) ++{ ++ struct rte_mbuf *txd = tx_pkt; ++ ++ while (txd != NULL) { ++ if (txd->data_len == 0) ++ return -1; ++ txd = txd->next; ++ } ++ ++ return 0; ++} ++ + uint16_t +-ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, ++ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) + { + int i, ret; +@@ -3401,9 +3495,23 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + m = tx_pkts[i]; + ol_flags = m->ol_flags; + +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && ++ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) && ++ /** ++ * No TSO case: nb->segs, pkt_len to not exceed ++ * the limites. ++ */ ++ (m->nb_segs > ICE_TX_MTU_SEG_MAX || ++ m->pkt_len > ICE_FRAME_SIZE_MAX)) { ++ rte_errno = EINVAL; ++ return i; ++ } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && ++ /** TSO case: tso_segsz, nb_segs, pkt_len not exceed ++ * the limits. ++ */ + (m->tso_segsz < ICE_MIN_TSO_MSS || + m->tso_segsz > ICE_MAX_TSO_MSS || ++ m->nb_segs > ++ ((struct ice_tx_queue *)tx_queue)->nb_tx_desc || + m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { + /** + * MSS outside the range are considered malicious +@@ -3412,6 +3520,11 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + return i; + } + ++ if (m->pkt_len < ICE_TX_MIN_PKT_LEN) { ++ rte_errno = EINVAL; ++ return i; ++ } ++ + #ifdef RTE_ETHDEV_DEBUG_TX + ret = rte_validate_tx_offload(m); + if (ret != 0) { +@@ -3424,6 +3537,11 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + rte_errno = -ret; + return i; + } ++ ++ if (ice_check_empty_mbuf(m) != 0) { ++ rte_errno = EINVAL; ++ return i; ++ } + } + return i; + } +@@ -3541,8 +3659,9 @@ static const struct { { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" }, #endif @@ -28847,23 +56761,50 @@ index f6d8564ab8..71e5c6f5d6 100644 }; diff --git a/dpdk/drivers/net/ice/ice_rxtx.h b/dpdk/drivers/net/ice/ice_rxtx.h -index bb18a01951..f5337d5284 100644 +index bb18a01951..3815c1cec3 100644 --- a/dpdk/drivers/net/ice/ice_rxtx.h +++ b/dpdk/drivers/net/ice/ice_rxtx.h -@@ -95,6 +95,9 @@ struct ice_rx_queue { +@@ -40,9 +40,16 @@ + + #define ICE_RXDID_COMMS_OVS 22 + ++#define ICE_TX_MIN_PKT_LEN 17 ++ + extern uint64_t ice_timestamp_dynflag; + extern int ice_timestamp_dynfield_offset; + ++/* Max data buffer size must be 16K - 128 bytes */ ++#define ICE_RX_MAX_DATA_BUF_SIZE (16 * 1024 - 128) ++ ++#define ICE_TX_MTU_SEG_MAX 8 ++ + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); + typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); + typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, +@@ -95,6 +102,10 @@ struct ice_rx_queue { uint32_t time_high; uint32_t hw_register_set; const struct rte_memzone *mz; + uint32_t hw_time_high; /* high 32 bits of timestamp */ + uint32_t hw_time_low; /* low 32 bits of timestamp */ + uint64_t hw_time_update; /* SW time of HW record updating */ ++ bool ts_enable; /* if rxq timestamp is enabled */ }; struct ice_tx_entry { diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h -index dfe60c81d9..2dd2d83650 100644 +index dfe60c81d9..ac939a3ba6 100644 --- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +@@ -72,7 +72,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; +- rte_memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); ++ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; + } + @@ -250,7 +250,8 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) #define ICE_TX_NO_VECTOR_FLAGS ( \ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ @@ -29033,7 +56974,7 @@ index 51fcabfb59..bff98df200 100644 igc->ntuple_filters[i].hash_val) /* filter be found, return index */ diff --git a/dpdk/drivers/net/igc/igc_txrx.c b/dpdk/drivers/net/igc/igc_txrx.c -index 339b0c9aa1..ffd219b0df 100644 +index 339b0c9aa1..160865e911 100644 --- a/dpdk/drivers/net/igc/igc_txrx.c +++ b/dpdk/drivers/net/igc/igc_txrx.c @@ -345,7 +345,7 @@ rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm, @@ -29054,7 +56995,15 @@ index 339b0c9aa1..ffd219b0df 100644 igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { -@@ -1397,7 +1397,7 @@ eth_igc_rx_queue_setup(struct rte_eth_dev *dev, +@@ -1291,6 +1291,7 @@ igc_rx_init(struct rte_eth_dev *dev) + dvmolr |= IGC_DVMOLR_STRCRC; + + IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + return 0; +@@ -1397,7 +1398,7 @@ eth_igc_rx_queue_setup(struct rte_eth_dev *dev, } /* prepare packets for transmit */ @@ -29063,7 +57012,7 @@ index 339b0c9aa1..ffd219b0df 100644 eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { -@@ -1604,7 +1604,7 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) +@@ -1604,7 +1605,7 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) return tmp; } @@ -29072,7 +57021,23 @@ index 339b0c9aa1..ffd219b0df 100644 igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct igc_tx_queue * const txq = tx_queue; -@@ -2099,7 +2099,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt) +@@ -1934,6 +1935,7 @@ igc_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + igc_tx_queue_release_mbufs(txq); + igc_reset_tx_queue(txq); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -1942,6 +1944,7 @@ igc_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + igc_rx_queue_release_mbufs(rxq); + igc_reset_rx_queue(rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + } +@@ -2099,7 +2102,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt) sw_ring[tx_id].mbuf = NULL; sw_ring[tx_id].last_id = tx_id; @@ -29081,7 +57046,7 @@ index 339b0c9aa1..ffd219b0df 100644 tx_id = sw_ring[tx_id].next_id; } while (tx_id != tx_next); -@@ -2133,7 +2133,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt) +@@ -2133,7 +2136,7 @@ eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt) * Walk the list and find the next mbuf, if any. */ do { @@ -29090,6 +57055,14 @@ index 339b0c9aa1..ffd219b0df 100644 tx_id = sw_ring[tx_id].next_id; if (sw_ring[tx_id].mbuf) +@@ -2187,6 +2190,7 @@ igc_tx_init(struct rte_eth_dev *dev) + IGC_TXDCTL_WTHRESH_MSK; + txdctl |= IGC_TXDCTL_QUEUE_ENABLE; + IGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + + igc_config_collision_dist(hw); diff --git a/dpdk/drivers/net/igc/igc_txrx.h b/dpdk/drivers/net/igc/igc_txrx.h index 535108a868..02a0a051bb 100644 --- a/dpdk/drivers/net/igc/igc_txrx.h @@ -29107,6 +57080,22 @@ index 535108a868..02a0a051bb 100644 #ifdef __cplusplus } #endif +diff --git a/dpdk/drivers/net/ionic/ionic_dev.c b/dpdk/drivers/net/ionic/ionic_dev.c +index 43e9ca3de3..5439b99b2d 100644 +--- a/dpdk/drivers/net/ionic/ionic_dev.c ++++ b/dpdk/drivers/net/ionic/ionic_dev.c +@@ -55,7 +55,10 @@ ionic_dev_setup(struct ionic_adapter *adapter) + ioread8(&idev->dev_info->fw_version[i]); + adapter->fw_version[IONIC_DEVINFO_FWVERS_BUFLEN - 1] = '\0'; + +- IONIC_PRINT(DEBUG, "Firmware version: %s", adapter->fw_version); ++ adapter->name = adapter->pci_dev->device.name; ++ ++ IONIC_PRINT(DEBUG, "%s firmware version: %s", ++ adapter->name, adapter->fw_version); + + /* BAR1: doorbells */ + bar++; diff --git a/dpdk/drivers/net/ionic/ionic_if.h b/dpdk/drivers/net/ionic/ionic_if.h index 693b44d764..45bad9b040 100644 --- a/dpdk/drivers/net/ionic/ionic_if.h @@ -29138,8 +57127,133 @@ index 693b44d764..45bad9b040 100644 * * When an interrupt is sent the interrupt * coalescing timer current value +diff --git a/dpdk/drivers/net/ionic/ionic_lif.c b/dpdk/drivers/net/ionic/ionic_lif.c +index 5e8fdf3893..799530f7f5 100644 +--- a/dpdk/drivers/net/ionic/ionic_lif.c ++++ b/dpdk/drivers/net/ionic/ionic_lif.c +@@ -132,7 +132,7 @@ ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats + + for (i = 0; i < lif->nrxqcqs; i++) { + struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats; +- stats->imissed += ++ stats->ierrors += + rx_stats->no_cb_arg + + rx_stats->bad_cq_status + + rx_stats->no_room + +@@ -144,10 +144,8 @@ ionic_lif_get_abs_stats(const struct ionic_lif *lif, struct rte_eth_stats *stats + ls->rx_mcast_drop_packets + + ls->rx_bcast_drop_packets; + +- stats->imissed += +- ls->rx_queue_empty + ++ stats->ierrors += + ls->rx_dma_error + +- ls->rx_queue_disabled + + ls->rx_desc_fetch_error + + ls->rx_desc_data_error; + +diff --git a/dpdk/drivers/net/ionic/ionic_rx_filter.c b/dpdk/drivers/net/ionic/ionic_rx_filter.c +index bf57a9fa52..4b2e907f9f 100644 +--- a/dpdk/drivers/net/ionic/ionic_rx_filter.c ++++ b/dpdk/drivers/net/ionic/ionic_rx_filter.c +@@ -63,8 +63,8 @@ ionic_rx_filter_save(struct ionic_lif *lif, uint32_t flow_id, + f->flow_id = flow_id; + f->filter_id = rte_le_to_cpu_32(ctx->comp.rx_filter_add.filter_id); + f->rxq_index = rxq_index; +- f->match = rte_le_to_cpu_16(f->cmd.match); + memcpy(&f->cmd, &ctx->cmd, sizeof(f->cmd)); ++ f->match = rte_le_to_cpu_16(f->cmd.match); + + switch (f->match) { + case IONIC_RX_FILTER_MATCH_VLAN: +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx.c b/dpdk/drivers/net/ionic/ionic_rxtx.c +index 9f602de6a9..665d085823 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx.c +@@ -300,18 +300,20 @@ ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, + bool start, bool done) + { + void **info; ++ uint64_t cmd; + uint8_t flags = 0; + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; + flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; + +- desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, ++ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, + flags, nsge, addr); +- desc->len = len; +- desc->vlan_tci = vlan_tci; +- desc->hdr_len = hdrlen; +- desc->mss = mss; ++ desc->cmd = rte_cpu_to_le_64(cmd); ++ desc->len = rte_cpu_to_le_16(len); ++ desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); ++ desc->hdr_len = rte_cpu_to_le_16(hdrlen); ++ desc->mss = rte_cpu_to_le_16(mss); + + if (done) { + info = IONIC_INFO_PTR(q, q->head_idx); +@@ -423,7 +425,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) + len = RTE_MIN(frag_left, left); + frag_left -= len; + elem->addr = next_addr; +- elem->len = len; ++ elem->len = rte_cpu_to_le_16(len); + elem++; + desc_nsge++; + } else { +@@ -470,7 +472,7 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) + bool encap; + bool has_vlan; + uint64_t ol_flags = txm->ol_flags; +- uint64_t addr; ++ uint64_t addr, cmd; + uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; + uint8_t flags = 0; + +@@ -505,9 +507,10 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) + + addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); + +- desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); +- desc->len = txm->data_len; +- desc->vlan_tci = txm->vlan_tci; ++ cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); ++ desc->cmd = rte_cpu_to_le_64(cmd); ++ desc->len = rte_cpu_to_le_16(txm->data_len); ++ desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); + + info[0] = txm; + +@@ -515,7 +518,7 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) + + txm_seg = txm->next; + while (txm_seg != NULL) { +- elem->len = txm_seg->data_len; ++ elem->len = rte_cpu_to_le_16(txm_seg->data_len); + elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); + elem++; + txm_seg = txm_seg->next; +@@ -840,12 +843,12 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq, + + /* RSS */ + pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; +- rxm->hash.rss = cq_desc->rss_hash; ++ rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); + + /* Vlan Strip */ + if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { + pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; +- rxm->vlan_tci = cq_desc->vlan_tci; ++ rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); + } + + /* Checksum */ diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c -index 964506c6db..014e438dd5 100644 +index 964506c6db..d36bf99fab 100644 --- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c +++ b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c @@ -483,7 +483,7 @@ static int ipn3ke_vswitch_probe(struct rte_afu_device *afu_dev) @@ -29151,6 +57265,15 @@ index 964506c6db..014e438dd5 100644 retval = -ENOMEM; return -ENOMEM; } +@@ -561,7 +561,7 @@ static int ipn3ke_vswitch_remove(struct rte_afu_device *afu_dev) + snprintf(name, sizeof(name), "net_%s_representor_%d", + afu_dev->device.name, i); + +- ethdev = rte_eth_dev_allocated(afu_dev->device.name); ++ ethdev = rte_eth_dev_allocated(name); + if (ethdev != NULL) + rte_eth_dev_destroy(ethdev, ipn3ke_rpst_uninit); + } diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h index 041f13d9c3..58fcc50c57 100644 --- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.h @@ -29187,10 +57310,50 @@ index f5867ca055..66ae31a5a9 100644 IPN3KE_CLF_RX_PARSE_CFG, 0, diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c -index de325c7d29..abbecfdf2e 100644 +index de325c7d29..b0ead3197b 100644 --- a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +++ b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c -@@ -2218,9 +2218,6 @@ ipn3ke_rpst_xstats_get +@@ -120,6 +120,7 @@ ipn3ke_rpst_dev_start(struct rte_eth_dev *dev) + uint64_t base_mac; + uint32_t val; + char attr_name[IPN3KE_RAWDEV_ATTR_LEN_MAX]; ++ uint16_t i; + + rawdev = hw->rawdev; + +@@ -190,6 +191,11 @@ ipn3ke_rpst_dev_start(struct rte_eth_dev *dev) + + ipn3ke_rpst_link_update(dev, 0); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -198,6 +204,7 @@ ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev) + { + struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev); + struct ipn3ke_rpst *rpst = IPN3KE_DEV_PRIVATE_TO_RPST(dev); ++ uint16_t i; + + if (hw->retimer.mac_type == IFPGA_RAWDEV_RETIMER_MAC_TYPE_10GE_XFI) { + /* Disable the TX path */ +@@ -207,6 +214,11 @@ ipn3ke_rpst_dev_stop(struct rte_eth_dev *dev) + ipn3ke_xmac_rx_disable(hw, rpst->port_id, 0); + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -2218,9 +2230,6 @@ ipn3ke_rpst_xstats_get struct ipn3ke_rpst_hw_port_stats hw_stats; struct rte_eth_stats stats; @@ -29200,7 +57363,7 @@ index de325c7d29..abbecfdf2e 100644 if (!ethdev) { IPN3KE_AFU_PMD_ERR("ethernet device to get statistics is NULL"); return -EINVAL; -@@ -2282,7 +2279,7 @@ ipn3ke_rpst_xstats_get +@@ -2282,7 +2291,7 @@ ipn3ke_rpst_xstats_get count++; } @@ -29209,7 +57372,7 @@ index de325c7d29..abbecfdf2e 100644 for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) { xstats[count].value = *(uint64_t *)(((char *)(&hw_stats)) + ipn3ke_rpst_hw_port_strings[i].offset); -@@ -2290,7 +2287,7 @@ ipn3ke_rpst_xstats_get +@@ -2290,7 +2299,7 @@ ipn3ke_rpst_xstats_get count++; } @@ -29218,7 +57381,7 @@ index de325c7d29..abbecfdf2e 100644 for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) { for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) { xstats[count].value = -@@ -2302,7 +2299,7 @@ ipn3ke_rpst_xstats_get +@@ -2302,7 +2311,7 @@ ipn3ke_rpst_xstats_get } } @@ -29227,7 +57390,7 @@ index de325c7d29..abbecfdf2e 100644 for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) { for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) { xstats[count].value = -@@ -2340,7 +2337,7 @@ __rte_unused unsigned int limit) +@@ -2340,7 +2349,7 @@ __rte_unused unsigned int limit) count++; } @@ -29236,7 +57399,7 @@ index de325c7d29..abbecfdf2e 100644 for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) { snprintf(xstats_names[count].name, sizeof(xstats_names[count].name), -@@ -2349,7 +2346,7 @@ __rte_unused unsigned int limit) +@@ -2349,7 +2358,7 @@ __rte_unused unsigned int limit) count++; } @@ -29245,7 +57408,7 @@ index de325c7d29..abbecfdf2e 100644 for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) { for (prio = 0; prio < 8; prio++) { snprintf(xstats_names[count].name, -@@ -2361,7 +2358,7 @@ __rte_unused unsigned int limit) +@@ -2361,7 +2370,7 @@ __rte_unused unsigned int limit) } } @@ -29254,6 +57417,15 @@ index de325c7d29..abbecfdf2e 100644 for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) { for (prio = 0; prio < 8; prio++) { snprintf(xstats_names[count].name, +@@ -2582,7 +2591,7 @@ ipn3ke_rpst_scan_handle_request(__rte_unused void *param) + } + rte_delay_us(50 * MS); + +- if (num == 0xffffff) ++ if (num == 0 || num == 0xffffff) + return NULL; + } + diff --git a/dpdk/drivers/net/ipn3ke/meson.build b/dpdk/drivers/net/ipn3ke/meson.build index 4bf739809e..104d2f58e5 100644 --- a/dpdk/drivers/net/ipn3ke/meson.build @@ -29303,7 +57475,7 @@ index 8eb773391b..6ef965dbb6 100644 * @hw: pointer to hardware structure * @cmd: The control word we are setting. diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -index fe61dba81d..31d06b6110 100644 +index fe61dba81d..e9dd243d5b 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c @@ -128,6 +128,13 @@ @@ -29488,7 +57660,51 @@ index fe61dba81d..31d06b6110 100644 #endif return ret; -@@ -4236,7 +4299,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -3793,23 +3856,32 @@ static int + ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) + { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- u16 eeprom_verh, eeprom_verl; +- u32 etrack_id; ++ struct ixgbe_nvm_version nvm_ver; + int ret; + +- ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); +- ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); ++ ixgbe_get_oem_prod_version(hw, &nvm_ver); ++ if (nvm_ver.oem_valid) { ++ snprintf(fw_version, fw_size, "%x.%x.%x", ++ nvm_ver.oem_major, nvm_ver.oem_minor, ++ nvm_ver.oem_release); ++ return 0; ++ } + +- etrack_id = (eeprom_verh << 16) | eeprom_verl; +- ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); ++ ixgbe_get_etk_id(hw, &nvm_ver); ++ ixgbe_get_orom_version(hw, &nvm_ver); ++ ++ if (nvm_ver.or_valid) { ++ snprintf(fw_version, fw_size, "0x%08x, %d.%d.%d", ++ nvm_ver.etk_id, nvm_ver.or_major, ++ nvm_ver.or_build, nvm_ver.or_patch); ++ return 0; ++ } ++ ++ ret = snprintf(fw_version, fw_size, "0x%08x", nvm_ver.etk_id); + if (ret < 0) + return -EINVAL; + +- ret += 1; /* add the size of '\0' */ +- if (fw_size < (size_t)ret) +- return ret; +- else +- return 0; ++ return (fw_size < (size_t)ret++) ? ret : 0; + } + + static int +@@ -4236,7 +4308,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &link); } @@ -29498,7 +57714,7 @@ index fe61dba81d..31d06b6110 100644 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); if ((esdp_reg & IXGBE_ESDP_SDP3)) link_up = 0; -@@ -4603,7 +4667,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +@@ -4603,7 +4676,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) * @param handle * Pointer to interrupt handle. * @param param @@ -29507,7 +57723,7 @@ index fe61dba81d..31d06b6110 100644 * * @return * void -@@ -4659,7 +4723,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) +@@ -4659,7 +4732,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) * @param handle * Pointer to interrupt handle. * @param param @@ -29516,7 +57732,7 @@ index fe61dba81d..31d06b6110 100644 * * @return * void -@@ -5921,7 +5985,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) +@@ -5921,7 +5994,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -29525,7 +57741,7 @@ index fe61dba81d..31d06b6110 100644 */ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); rte_intr_vec_list_index_set(intr_handle, q_idx, -@@ -6256,7 +6320,7 @@ ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, +@@ -6256,7 +6329,7 @@ ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, * @param * dev: Pointer to struct rte_eth_dev. * index: the index the filter allocates. @@ -29534,7 +57750,7 @@ index fe61dba81d..31d06b6110 100644 * rx_queue: the queue id the filter assigned to. * * @return -@@ -6872,7 +6936,7 @@ ixgbe_timesync_disable(struct rte_eth_dev *dev) +@@ -6872,7 +6945,7 @@ ixgbe_timesync_disable(struct rte_eth_dev *dev) /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); @@ -29543,7 +57759,42 @@ index fe61dba81d..31d06b6110 100644 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); return 0; -@@ -8225,6 +8289,8 @@ ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) +@@ -7725,9 +7798,13 @@ static int + ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) + { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ int mode = IXGBEVF_XCAST_MODE_NONE; + int ret; + +- switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { ++ if (dev->data->all_multicast) ++ mode = IXGBEVF_XCAST_MODE_ALLMULTI; ++ ++ switch (hw->mac.ops.update_xcast_mode(hw, mode)) { + case IXGBE_SUCCESS: + ret = 0; + break; +@@ -7749,6 +7826,9 @@ ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) + int ret; + int mode = IXGBEVF_XCAST_MODE_ALLMULTI; + ++ if (dev->data->promiscuous) ++ return 0; ++ + switch (hw->mac.ops.update_xcast_mode(hw, mode)) { + case IXGBE_SUCCESS: + ret = 0; +@@ -7770,6 +7850,9 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + ++ if (dev->data->promiscuous) ++ return 0; ++ + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { + case IXGBE_SUCCESS: + ret = 0; +@@ -8225,6 +8308,8 @@ ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); @@ -29589,7 +57840,7 @@ index 7894047829..834c1b3f51 100644 break; case 1: diff --git a/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/dpdk/drivers/net/ixgbe/ixgbe_flow.c -index bdc9d4796c..368342872a 100644 +index bdc9d4796c..6c5ca631d2 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_flow.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_flow.c @@ -135,7 +135,7 @@ const struct rte_flow_action *next_no_void_action( @@ -29601,6 +57852,32 @@ index bdc9d4796c..368342872a 100644 * rte_flow_item is using big endian, rte_flow_attr and * rte_flow_action are using CPU order. * Because the pattern is used to describe the packets, +@@ -1918,9 +1918,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + + /* check src addr mask */ + for (j = 0; j < 16; j++) { +- if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) { +- rule->mask.src_ipv6_mask |= 1 << j; +- } else if (ipv6_mask->hdr.src_addr[j] != 0) { ++ if (ipv6_mask->hdr.src_addr[j] == 0) { ++ rule->mask.src_ipv6_mask &= ~(1 << j); ++ } else if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, +@@ -1931,9 +1931,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev, + + /* check dst addr mask */ + for (j = 0; j < 16; j++) { +- if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) { +- rule->mask.dst_ipv6_mask |= 1 << j; +- } else if (ipv6_mask->hdr.dst_addr[j] != 0) { ++ if (ipv6_mask->hdr.dst_addr[j] == 0) { ++ rule->mask.dst_ipv6_mask &= ~(1 << j); ++ } else if (ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) { + memset(rule, 0, sizeof(struct ixgbe_fdir_rule)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, @@ -3261,7 +3261,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev, /** @@ -29624,7 +57901,7 @@ index 944c9f2380..c353ae33b4 100644 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0); diff --git a/dpdk/drivers/net/ixgbe/ixgbe_pf.c b/dpdk/drivers/net/ixgbe/ixgbe_pf.c -index 9f1bd0a62b..c73833b7ae 100644 +index 9f1bd0a62b..0a0f639e39 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_pf.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_pf.c @@ -242,7 +242,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) @@ -29636,11 +57913,58 @@ index 9f1bd0a62b..c73833b7ae 100644 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); /* clear VMDq map to scan rar 127 */ +@@ -747,9 +747,9 @@ ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: +- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | ++ disable = IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; +- enable = 0; ++ enable = IXGBE_VMOLR_BAM; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; +@@ -771,9 +771,9 @@ ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + return -1; + } + +- disable = 0; ++ disable = IXGBE_VMOLR_VPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | +- IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; ++ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; + break; + default: + return -1; diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -index d7c80d4242..99e928a2a9 100644 +index d7c80d4242..c137707869 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -@@ -1954,7 +1954,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, +@@ -1818,11 +1818,22 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. ++ * ++ * Meanwhile, to prevent the CPU from executing out of order, we ++ * need to use a proper memory barrier to ensure the memory ++ * ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + break; ++ ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + /* +@@ -1954,7 +1965,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * register. * Update the RDT with the value of the last processed RX descriptor * minus 1, to guarantee that the RDT register is never equal to the @@ -29649,7 +57973,57 @@ index d7c80d4242..99e928a2a9 100644 * hardware point of view... */ nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); -@@ -2303,7 +2303,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, +@@ -2089,32 +2100,10 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + + next_desc: + /* +- * The code in this whole file uses the volatile pointer to +- * ensure the read ordering of the status and the rest of the +- * descriptor fields (on the compiler level only!!!). This is so +- * UGLY - why not to just use the compiler barrier instead? DPDK +- * even has the rte_compiler_barrier() for that. +- * +- * But most importantly this is just wrong because this doesn't +- * ensure memory ordering in a general case at all. For +- * instance, DPDK is supposed to work on Power CPUs where +- * compiler barrier may just not be enough! +- * +- * I tried to write only this function properly to have a +- * starting point (as a part of an LRO/RSC series) but the +- * compiler cursed at me when I tried to cast away the +- * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm +- * keeping it the way it is for now. +- * +- * The code in this file is broken in so many other places and +- * will just not work on a big endian CPU anyway therefore the +- * lines below will have to be revisited together with the rest +- * of the ixgbe PMD. +- * +- * TODO: +- * - Get rid of "volatile" and let the compiler do its job. +- * - Use the proper memory barrier (rte_rmb()) to ensure the +- * memory ordering below. ++ * "Volatile" only prevents caching of the variable marked ++ * volatile. Most important, "volatile" cannot prevent the CPU ++ * from executing out of order. So, it is necessary to use a ++ * proper memory barrier to ensure the memory ordering below. + */ + rxdp = &rx_ring[rx_id]; + staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error); +@@ -2122,6 +2111,12 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + if (!(staterr & IXGBE_RXDADV_STAT_DD)) + break; + ++ /* ++ * Use acquire fence to ensure that status_error which includes ++ * DD bit is loaded before loading of other descriptor words. ++ */ ++ rte_atomic_thread_fence(__ATOMIC_ACQUIRE); ++ + rxd = *rxdp; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " +@@ -2303,7 +2298,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, * register. * Update the RDT with the value of the last processed RX descriptor * minus 1, to guarantee that the RDT register is never equal to the @@ -29658,7 +58032,7 @@ index d7c80d4242..99e928a2a9 100644 * hardware point of view... */ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) { -@@ -2666,7 +2666,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -2666,7 +2661,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, */ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); @@ -29667,7 +58041,23 @@ index d7c80d4242..99e928a2a9 100644 tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; if (tx_conf->tx_rs_thresh > 0) -@@ -4831,7 +4831,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) +@@ -3385,6 +3380,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -3394,6 +3390,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(adapter, rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + /* If loopback mode was enabled, reconfigure the link accordingly */ +@@ -4831,7 +4828,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc; } else { @@ -29676,7 +58066,7 @@ index d7c80d4242..99e928a2a9 100644 "single allocation) " "Scattered Rx callback " "(port=%d).", -@@ -5170,7 +5170,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) +@@ -5170,7 +5167,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Setup the Checksum Register. * Disable Full-Packet Checksum which is mutually exclusive with RSS. @@ -29685,6 +58075,24 @@ index d7c80d4242..99e928a2a9 100644 */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; +@@ -5831,6 +5828,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); ++ else ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + +@@ -5848,6 +5847,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); ++ else ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1); + diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index 1eed949495..bb34b27168 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -29756,7 +58164,7 @@ index 079cf01269..7886644412 100644 */ ret = memif_msg_receive_init(cc, &msg); diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c -index e3d523af57..205d08b028 100644 +index e3d523af57..abaf98c65e 100644 --- a/dpdk/drivers/net/memif/rte_eth_memif.c +++ b/dpdk/drivers/net/memif/rte_eth_memif.c @@ -351,13 +351,13 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) @@ -29783,7 +58191,66 @@ index e3d523af57..205d08b028 100644 ret = memif_region_init_shm(dev, /* has buffers */ 1); if (ret < 0) return ret; -@@ -1500,23 +1500,6 @@ memif_stats_reset(struct rte_eth_dev *dev) +@@ -1243,6 +1243,7 @@ memif_dev_start(struct rte_eth_dev *dev) + { + struct pmd_internals *pmd = dev->data->dev_private; + int ret = 0; ++ uint16_t i; + + switch (pmd->role) { + case MEMIF_ROLE_CLIENT: +@@ -1257,13 +1258,28 @@ memif_dev_start(struct rte_eth_dev *dev) + break; + } + ++ if (ret == 0) { ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ } ++ + return ret; + } + + static int + memif_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + memif_disconnect(dev); ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -1447,8 +1463,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + stats->opackets = 0; + stats->obytes = 0; + +- tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings : +- pmd->run.num_s2c_rings; ++ tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings : ++ pmd->run.num_c2s_rings; + nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : + RTE_ETHDEV_QUEUE_STAT_CNTRS; + +@@ -1461,8 +1477,8 @@ memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + stats->ibytes += mq->n_bytes; + } + +- tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings : +- pmd->run.num_c2s_rings; ++ tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings : ++ pmd->run.num_s2c_rings; + nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : + RTE_ETHDEV_QUEUE_STAT_CNTRS; + +@@ -1500,23 +1516,6 @@ memif_stats_reset(struct rte_eth_dev *dev) return 0; } @@ -29807,7 +58274,7 @@ index e3d523af57..205d08b028 100644 static const struct eth_dev_ops ops = { .dev_start = memif_dev_start, .dev_stop = memif_dev_stop, -@@ -1527,8 +1510,6 @@ static const struct eth_dev_ops ops = { +@@ -1527,8 +1526,6 @@ static const struct eth_dev_ops ops = { .rx_queue_setup = memif_rx_queue_setup, .rx_queue_release = memif_rx_queue_release, .tx_queue_release = memif_tx_queue_release, @@ -29829,6 +58296,78 @@ index 99a30eab8f..a038c1ec1b 100644 ext_deps += declare_dependency(compile_args: ibv_cflags.split()) endif if static_ibverbs +diff --git a/dpdk/drivers/net/mlx4/mlx4.c b/dpdk/drivers/net/mlx4/mlx4.c +index 3f3c4a7c72..b89dfab215 100644 +--- a/dpdk/drivers/net/mlx4/mlx4.c ++++ b/dpdk/drivers/net/mlx4/mlx4.c +@@ -292,6 +292,7 @@ mlx4_dev_start(struct rte_eth_dev *dev) + { + struct mlx4_priv *priv = dev->data->dev_private; + struct rte_flow_error error; ++ uint16_t i; + int ret; + + if (priv->started) +@@ -327,6 +328,12 @@ mlx4_dev_start(struct rte_eth_dev *dev) + dev->rx_pkt_burst = mlx4_rx_burst; + /* Enable datapath on secondary process. */ + mlx4_mp_req_start_rxtx(dev); ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + err: + mlx4_dev_stop(dev); +@@ -345,6 +352,7 @@ static int + mlx4_dev_stop(struct rte_eth_dev *dev) + { + struct mlx4_priv *priv = dev->data->dev_private; ++ uint16_t i; + + if (!priv->started) + return 0; +@@ -359,6 +367,11 @@ mlx4_dev_stop(struct rte_eth_dev *dev) + mlx4_rxq_intr_disable(priv); + mlx4_rss_deinit(priv); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -877,6 +890,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + snprintf(name, sizeof(name), "%s port %u", + mlx4_glue->get_device_name(ibv_dev), port); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ int fd; ++ + eth_dev = rte_eth_dev_attach_secondary(name); + if (eth_dev == NULL) { + ERROR("can not attach rte ethdev"); +@@ -899,13 +914,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) + if (err) + goto err_secondary; + /* Receive command fd from primary process. */ +- err = mlx4_mp_req_verbs_cmd_fd(eth_dev); +- if (err < 0) { ++ fd = mlx4_mp_req_verbs_cmd_fd(eth_dev); ++ if (fd < 0) { + err = rte_errno; + goto err_secondary; + } + /* Remap UAR for Tx queues. */ +- err = mlx4_tx_uar_init_secondary(eth_dev, err); ++ err = mlx4_tx_uar_init_secondary(eth_dev, fd); ++ close(fd); + if (err) { + err = rte_errno; + goto err_secondary; diff --git a/dpdk/drivers/net/mlx4/mlx4.h b/dpdk/drivers/net/mlx4/mlx4.h index 2d0c512f79..4023a47602 100644 --- a/dpdk/drivers/net/mlx4/mlx4.h @@ -29855,8 +58394,41 @@ index d606ec8ca7..ce74c51ce2 100644 */ int mlx4_stats_reset(struct rte_eth_dev *dev) +diff --git a/dpdk/drivers/net/mlx4/mlx4_mp.c b/dpdk/drivers/net/mlx4/mlx4_mp.c +index 8fcfb5490e..b0bb48c8f1 100644 +--- a/dpdk/drivers/net/mlx4/mlx4_mp.c ++++ b/dpdk/drivers/net/mlx4/mlx4_mp.c +@@ -5,6 +5,7 @@ + + #include <stdio.h> + #include <time.h> ++#include <unistd.h> + + #include <rte_eal.h> + #include <ethdev_driver.h> +@@ -134,15 +135,19 @@ mp_secondary_handle(const struct rte_mp_msg *mp_msg, const void *peer) + mlx4_tx_uar_uninit_secondary(dev); + mlx4_proc_priv_uninit(dev); + ret = mlx4_proc_priv_init(dev); +- if (ret) ++ if (ret) { ++ close(mp_msg->fds[0]); + return -rte_errno; ++ } + ret = mlx4_tx_uar_init_secondary(dev, mp_msg->fds[0]); + if (ret) { ++ close(mp_msg->fds[0]); + mlx4_proc_priv_uninit(dev); + return -rte_errno; + } + } + #endif ++ close(mp_msg->fds[0]); + rte_mb(); + mp_init_msg(dev, &mp_res, param->type); + res->result = 0; diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c -index c19825ee52..fadcbd7ef7 100644 +index c19825ee52..dbfe00ea6d 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c @@ -38,6 +38,7 @@ @@ -29867,10 +58439,30 @@ index c19825ee52..fadcbd7ef7 100644 #include "mlx5.h" #include "mlx5_rxtx.h" -@@ -760,6 +761,56 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) - } - } +@@ -744,6 +745,7 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) + + for (i = 0; i < sh->max_port; ++i) { + struct rte_eth_dev *dev; ++ struct mlx5_priv *priv; + if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { + /* +@@ -754,12 +756,67 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) + } + dev = &rte_eth_devices[sh->port[i].ih_port_id]; + MLX5_ASSERT(dev); +- if (dev->data->dev_conf.intr_conf.rmv) ++ priv = dev->data->dev_private; ++ MLX5_ASSERT(priv); ++ if (!priv->rmv_notified && dev->data->dev_conf.intr_conf.rmv) { ++ /* Notify driver about removal only once. */ ++ priv->rmv_notified = 1; + rte_eth_dev_callback_process + (dev, RTE_ETH_EVENT_INTR_RMV, NULL); ++ } ++ } ++} ++ +static void +mlx5_dev_interrupt_nl_cb(struct nlmsghdr *hdr, void *cb_arg) +{ @@ -29905,9 +58497,9 @@ index c19825ee52..fadcbd7ef7 100644 + (dev, RTE_ETH_EVENT_INTR_LSC, NULL); + break; + } -+ } -+} -+ + } + } + +void +mlx5_dev_interrupt_handler_nl(void *arg) +{ @@ -29924,7 +58516,46 @@ index c19825ee52..fadcbd7ef7 100644 /** * Handle shared asynchronous events the NIC (removal event * and link status change). Supports multiport IB device. -@@ -823,18 +874,6 @@ mlx5_dev_interrupt_handler(void *cb_arg) +@@ -778,21 +835,29 @@ mlx5_dev_interrupt_handler(void *cb_arg) + struct rte_eth_dev *dev; + uint32_t tmp; + +- if (mlx5_glue->get_async_event(sh->cdev->ctx, &event)) ++ if (mlx5_glue->get_async_event(sh->cdev->ctx, &event)) { ++ if (errno == EIO) { ++ DRV_LOG(DEBUG, ++ "IBV async event queue closed on: %s", ++ sh->ibdev_name); ++ mlx5_dev_interrupt_device_fatal(sh); ++ } + break; +- /* Retrieve and check IB port index. */ +- tmp = (uint32_t)event.element.port_num; +- if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) { ++ } ++ if (event.event_type == IBV_EVENT_DEVICE_FATAL) { + /* +- * The DEVICE_FATAL event is called once for +- * entire device without port specifying. +- * We should notify all existing ports. ++ * The DEVICE_FATAL event can be called by kernel ++ * twice - from mlx5 and uverbs layers, and port ++ * index is not applicable. We should notify all ++ * existing ports. + */ +- mlx5_glue->ack_async_event(&event); + mlx5_dev_interrupt_device_fatal(sh); ++ mlx5_glue->ack_async_event(&event); + continue; + } +- MLX5_ASSERT(tmp && (tmp <= sh->max_port)); ++ /* Retrieve and check IB port index. */ ++ tmp = (uint32_t)event.element.port_num; ++ MLX5_ASSERT(tmp <= sh->max_port); + if (!tmp) { + /* Unsupported device level event. */ + mlx5_glue->ack_async_event(&event); +@@ -823,18 +888,6 @@ mlx5_dev_interrupt_handler(void *cb_arg) tmp = sh->port[tmp - 1].ih_port_id; dev = &rte_eth_devices[tmp]; MLX5_ASSERT(dev); @@ -29943,28 +58574,53 @@ index c19825ee52..fadcbd7ef7 100644 DRV_LOG(DEBUG, "port %u cannot handle an unknown event (type %d)", dev->data->port_id, event.event_type); -@@ -1079,7 +1118,6 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) +@@ -1066,7 +1119,8 @@ int + mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) + { + char ifname[IF_NAMESIZE]; +- char port_name[IF_NAMESIZE]; ++ char *port_name = NULL; ++ size_t port_name_size = 0; + FILE *file; + struct mlx5_switch_info data = { + .master = 0, +@@ -1079,7 +1133,7 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) bool port_switch_id_set = false; bool device_dir = false; char c; - int ret; ++ ssize_t line_size; if (!if_indextoname(ifindex, ifname)) { rte_errno = errno; -@@ -1095,10 +1133,9 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) +@@ -1095,10 +1149,23 @@ mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) file = fopen(phys_port_name, "rb"); if (file != NULL) { - ret = fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", port_name); - fclose(file); - if (ret == 1) -+ if (fgets(port_name, IF_NAMESIZE, file) != NULL) ++ char *tail_nl; ++ ++ line_size = getline(&port_name, &port_name_size, file); ++ if (line_size < 0) { ++ free(port_name); ++ fclose(file); ++ rte_errno = errno; ++ return -rte_errno; ++ } else if (line_size > 0) { ++ /* Remove tailing newline character. */ ++ tail_nl = strchr(port_name, '\n'); ++ if (tail_nl) ++ *tail_nl = '\0'; mlx5_translate_port_name(port_name, &data); ++ } ++ free(port_name); + fclose(file); } file = fopen(phys_switch_id, "rb"); if (file == NULL) { -@@ -1347,15 +1384,16 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) +@@ -1347,15 +1414,16 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) } } else { ret = _mlx5_os_read_dev_counters(dev, -1, stats); @@ -29984,13 +58640,62 @@ index c19825ee52..fadcbd7ef7 100644 xstats_ctrl->xstats[i] = stats[i]; else stats[i] = xstats_ctrl->xstats[i]; +@@ -1644,10 +1712,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) + */ + int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) + { +- struct { +- struct ethtool_sset_info hdr; +- uint32_t buf[1]; +- } sset_info; ++ struct ethtool_sset_info *sset_info = NULL; + struct ethtool_drvinfo drvinfo; + struct ifreq ifr; + struct ethtool_gstrings *strings = NULL; +@@ -1658,15 +1723,21 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) + int32_t i; + int ret; + +- sset_info.hdr.cmd = ETHTOOL_GSSET_INFO; +- sset_info.hdr.reserved = 0; +- sset_info.hdr.sset_mask = 1ULL << ETH_SS_PRIV_FLAGS; ++ sset_info = mlx5_malloc(0, sizeof(struct ethtool_sset_info) + ++ sizeof(uint32_t), 0, SOCKET_ID_ANY); ++ if (sset_info == NULL) { ++ rte_errno = ENOMEM; ++ return -rte_errno; ++ } ++ sset_info->cmd = ETHTOOL_GSSET_INFO; ++ sset_info->reserved = 0; ++ sset_info->sset_mask = 1ULL << ETH_SS_PRIV_FLAGS; + ifr.ifr_data = (caddr_t)&sset_info; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (!ret) { +- const uint32_t *sset_lengths = sset_info.hdr.data; ++ const uint32_t *sset_lengths = sset_info->data; + +- len = sset_info.hdr.sset_mask ? sset_lengths[0] : 0; ++ len = sset_info->sset_mask ? sset_lengths[0] : 0; + } else if (ret == -EOPNOTSUPP) { + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t)&drvinfo; +@@ -1739,5 +1810,6 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) + ret = !!(flags.data & (1U << i)); + exit: + mlx5_free(strings); ++ mlx5_free(sset_info); + return ret; + } diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c -index 893f00b824..a5956c255a 100644 +index 893f00b824..08337a9b24 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_flow_os.c -@@ -14,7 +14,8 @@ mlx5_flow_os_init_workspace_once(void) +@@ -12,9 +12,10 @@ static rte_thread_key key_workspace; + int + mlx5_flow_os_init_workspace_once(void) { - if (rte_thread_key_create(&key_workspace, flow_release_workspace)) { +- if (rte_thread_key_create(&key_workspace, flow_release_workspace)) { ++ if (rte_thread_key_create(&key_workspace, NULL)) { DRV_LOG(ERR, "Can't create flow workspace data thread key."); - return -ENOMEM; + rte_errno = ENOMEM; @@ -29998,8 +58703,32 @@ index 893f00b824..a5956c255a 100644 } return 0; } +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c +index c448a3e9eb..0ba2208fe0 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_mp_os.c +@@ -177,14 +177,18 @@ struct rte_mp_msg mp_res; + mlx5_tx_uar_uninit_secondary(dev); + mlx5_proc_priv_uninit(dev); + ret = mlx5_proc_priv_init(dev); +- if (ret) ++ if (ret) { ++ close(mp_msg->fds[0]); + return -rte_errno; ++ } + ret = mlx5_tx_uar_init_secondary(dev, mp_msg->fds[0]); + if (ret) { ++ close(mp_msg->fds[0]); + mlx5_proc_priv_uninit(dev); + return -rte_errno; + } + } ++ close(mp_msg->fds[0]); + rte_mb(); + mp_init_msg(&priv->mp_id, &mp_res, param->type); + res->result = 0; diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -index c29fe3d92b..792dd2cb22 100644 +index c29fe3d92b..85eda47f7d 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c @@ -112,7 +112,7 @@ static struct mlx5_indexed_pool_config icfg[] = { @@ -30055,6 +58784,19 @@ index c29fe3d92b..792dd2cb22 100644 } /** +@@ -718,10 +722,10 @@ mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused) + */ + if (!priv->sh->drop_action_check_flag) { + if (!mlx5_flow_discover_dr_action_support(dev)) +- priv->sh->dr_drop_action_en = 1; ++ priv->sh->dr_root_drop_action_en = 1; + priv->sh->drop_action_check_flag = 1; + } +- if (priv->sh->dr_drop_action_en) ++ if (priv->sh->dr_root_drop_action_en) + priv->root_drop_action = priv->sh->dr_drop_action; + else + priv->root_drop_action = priv->drop_queue.hrxq->action; @@ -881,10 +885,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, unsigned int mpls_en = 0; unsigned int swp = 0; @@ -30066,7 +58808,31 @@ index c29fe3d92b..792dd2cb22 100644 struct rte_ether_addr mac; char name[RTE_ETH_NAME_MAX_LEN]; int own_domain_id = 0; -@@ -981,10 +981,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -933,6 +933,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct mlx5_mp_id mp_id; ++ int fd; + + eth_dev = rte_eth_dev_attach_secondary(name); + if (eth_dev == NULL) { +@@ -949,11 +950,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + return NULL; + mlx5_mp_id_init(&mp_id, eth_dev->data->port_id); + /* Receive command fd from primary process */ +- err = mlx5_mp_req_verbs_cmd_fd(&mp_id); +- if (err < 0) ++ fd = mlx5_mp_req_verbs_cmd_fd(&mp_id); ++ if (fd < 0) + goto err_secondary; + /* Remap UAR for Tx queues. */ +- err = mlx5_tx_uar_init_secondary(eth_dev, err); ++ err = mlx5_tx_uar_init_secondary(eth_dev, fd); ++ close(fd); + if (err) + goto err_secondary; + /* +@@ -981,10 +983,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, strerror(rte_errno)); goto error; } @@ -30077,7 +58843,7 @@ index c29fe3d92b..792dd2cb22 100644 sh = mlx5_alloc_shared_dev_ctx(spawn, config); if (!sh) return NULL; -@@ -1039,15 +1035,17 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1039,15 +1037,17 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, mprq_caps.max_single_wqe_log_num_of_strides); DRV_LOG(DEBUG, "\tsupported_qpts: %d", mprq_caps.supported_qpts); @@ -30099,7 +58865,7 @@ index c29fe3d92b..792dd2cb22 100644 mprq_caps.max_single_wqe_log_num_of_strides; } #endif -@@ -1088,7 +1086,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1088,7 +1088,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, " old OFED/rdma-core version or firmware configuration"); #endif config->mpls_en = mpls_en; @@ -30108,7 +58874,7 @@ index c29fe3d92b..792dd2cb22 100644 /* Check port status. */ if (spawn->phys_port <= UINT8_MAX) { /* Legacy Verbs api only support u8 port number. */ -@@ -1135,7 +1133,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1135,7 +1135,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, priv->mtu = RTE_ETHER_MTU; /* Some internal functions rely on Netlink sockets, open them now. */ priv->nl_socket_rdma = nl_rdma; @@ -30117,7 +58883,7 @@ index c29fe3d92b..792dd2cb22 100644 priv->representor = !!switch_info->representor; priv->master = !!switch_info->master; priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; -@@ -1243,6 +1241,32 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1243,6 +1243,32 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, } /* Override some values set by hardware configuration. */ mlx5_args(config, dpdk_dev->devargs); @@ -30150,7 +58916,7 @@ index c29fe3d92b..792dd2cb22 100644 err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev); if (err) goto error; -@@ -1253,12 +1277,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1253,12 +1279,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \ !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) DRV_LOG(DEBUG, "counters are not supported"); @@ -30163,7 +58929,15 @@ index c29fe3d92b..792dd2cb22 100644 #endif config->ind_table_max_size = sh->device_attr.max_rwq_indirection_table_size; -@@ -1548,36 +1566,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1306,6 +1326,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + config->mps == MLX5_MPW_ENHANCED ? "enhanced " : + config->mps == MLX5_MPW ? "legacy " : "", + config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); ++ + if (sh->devx) { + config->hca_attr = sh->cdev->config.hca_attr; + sh->steering_format_version = +@@ -1548,36 +1569,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, config->hw_fcs_strip = 0; DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", (config->hw_fcs_strip ? "" : "not ")); @@ -30201,7 +58975,7 @@ index c29fe3d92b..792dd2cb22 100644 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); config->mprq.enabled = 0; } -@@ -1676,20 +1665,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1676,20 +1668,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, /* Bring Ethernet device up. */ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", eth_dev->data->port_id); @@ -30227,7 +59001,7 @@ index c29fe3d92b..792dd2cb22 100644 /* Detect minimal data bytes to inline. */ mlx5_set_min_inline(spawn, config); /* Store device configuration on private structure. */ -@@ -1743,7 +1724,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1743,7 +1727,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev); if (!priv->drop_queue.hrxq) goto error; @@ -30236,7 +59010,7 @@ index c29fe3d92b..792dd2cb22 100644 if (!priv->sh->flow_priority_check_flag) { /* Supported Verbs flow priority number detection. */ err = mlx5_flow_discover_priorities(eth_dev); -@@ -1756,12 +1737,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, +@@ -1756,12 +1740,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, err = -err; goto error; } @@ -30249,7 +59023,17 @@ index c29fe3d92b..792dd2cb22 100644 mlx5_set_metadata_mask(eth_dev); if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && !priv->sh->dv_regc0_mask) { -@@ -2068,7 +2043,8 @@ mlx5_device_bond_pci_match(const char *ibdev_name, +@@ -1825,6 +1803,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + return eth_dev; + error: + if (priv) { ++ priv->sh->port[priv->dev_port - 1].nl_ih_port_id = ++ RTE_MAX_ETHPORTS; ++ rte_io_wmb(); + if (priv->mreg_cp_tbl) + mlx5_hlist_destroy(priv->mreg_cp_tbl); + if (priv->sh) +@@ -2068,7 +2049,8 @@ mlx5_device_bond_pci_match(const char *ibdev_name, } static void @@ -30259,7 +59043,7 @@ index c29fe3d92b..792dd2cb22 100644 { memset(config, 0, sizeof(*config)); config->mps = MLX5_ARG_UNSET; -@@ -2080,6 +2056,10 @@ mlx5_os_config_default(struct mlx5_dev_config *config) +@@ -2080,6 +2062,11 @@ mlx5_os_config_default(struct mlx5_dev_config *config) config->vf_nl_en = 1; config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; @@ -30267,10 +59051,11 @@ index c29fe3d92b..792dd2cb22 100644 + cconf->hca_attr.log_min_stride_wqe_sz : + MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE; + config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; ++ config->mprq.log_stride_size = MLX5_ARG_UNSET; config->dv_esw_en = 1; config->dv_flow_en = 1; config->decap_en = 1; -@@ -2156,8 +2136,8 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, +@@ -2156,8 +2143,8 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, * matching ones, gathering into the list. */ struct ibv_device *ibv_match[ret + 1]; @@ -30281,7 +59066,7 @@ index c29fe3d92b..792dd2cb22 100644 unsigned int i; while (ret-- > 0) { -@@ -2209,9 +2189,9 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, +@@ -2209,9 +2196,9 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, if (!nd) { /* No device matches, just complain and bail out. */ DRV_LOG(WARNING, @@ -30293,7 +59078,7 @@ index c29fe3d92b..792dd2cb22 100644 owner_pci.devid, owner_pci.function); rte_errno = ENOENT; ret = -rte_errno; -@@ -2300,7 +2280,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, +@@ -2300,7 +2287,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, /* * Force standalone bonding * device for ROCE LAG @@ -30302,7 +59087,7 @@ index c29fe3d92b..792dd2cb22 100644 */ list[ns].info.master = 0; list[ns].info.representor = 0; -@@ -2496,7 +2476,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, +@@ -2496,7 +2483,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, uint32_t restore; /* Default configuration. */ @@ -30311,7 +59096,16 @@ index c29fe3d92b..792dd2cb22 100644 dev_config.vf = dev_config_vf; list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], &dev_config, ð_da); -@@ -2632,16 +2612,16 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev) +@@ -2592,7 +2579,7 @@ mlx5_os_parse_eth_devargs(struct rte_device *dev, + dev->devargs->cls_str); + return -rte_errno; + } +- if (eth_da->type == RTE_ETH_REPRESENTOR_NONE) { ++ if (eth_da->type == RTE_ETH_REPRESENTOR_NONE && dev->devargs->args) { + /* Parse legacy device argument */ + ret = rte_eth_devargs_parse(dev->devargs->args, eth_da); + if (ret) { +@@ -2632,16 +2619,16 @@ mlx5_os_pci_probe(struct mlx5_common_device *cdev) for (p = 0; p < eth_da.nb_ports; p++) { ret = mlx5_os_pci_probe_pf(cdev, ð_da, eth_da.ports[p]); @@ -30338,7 +59132,7 @@ index c29fe3d92b..792dd2cb22 100644 } } else { ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0); -@@ -2666,7 +2646,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) +@@ -2666,7 +2653,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) if (ret != 0) return ret; /* Set default config data. */ @@ -30347,7 +59141,7 @@ index c29fe3d92b..792dd2cb22 100644 config.sf = 1; /* Init spawn data. */ spawn.max_port = 1; -@@ -2733,6 +2713,40 @@ mlx5_os_net_cleanup(void) +@@ -2733,6 +2720,40 @@ mlx5_os_net_cleanup(void) mlx5_pmd_socket_uninit(); } @@ -30388,7 +59182,7 @@ index c29fe3d92b..792dd2cb22 100644 /** * Install shared asynchronous device events handler. * This function is implemented to support event sharing -@@ -2770,6 +2784,18 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) +@@ -2770,6 +2791,18 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) rte_intr_fd_set(sh->intr_handle, -1); } } @@ -30407,7 +59201,7 @@ index c29fe3d92b..792dd2cb22 100644 if (sh->devx) { #ifdef HAVE_IBV_DEVX_ASYNC sh->intr_handle_devx = -@@ -2817,10 +2843,19 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) +@@ -2817,10 +2850,19 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) { @@ -30476,10 +59270,59 @@ index 005904bdfe..7ee2460a23 100644 DRV_LOG(WARNING, "Can not create Netlink socket" diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c -index aa5f313c1a..2234dc7563 100644 +index aa5f313c1a..5645e8656c 100644 --- a/dpdk/drivers/net/mlx5/mlx5.c +++ b/dpdk/drivers/net/mlx5/mlx5.c -@@ -1172,12 +1172,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, +@@ -518,22 +518,37 @@ mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free ++ * ++ * @return ++ * 0 on success, otherwise negative errno value and rte_errno is set. + */ +-static void ++static int + mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) + { ++ void *pools; + int i; + ++ pools = mlx5_malloc(MLX5_MEM_ZERO, ++ sizeof(struct mlx5_flow_counter_pool *) * ++ MLX5_COUNTER_POOLS_MAX_NUM, ++ 0, SOCKET_ID_ANY); ++ if (!pools) { ++ DRV_LOG(ERR, "Counter management allocation was failed."); ++ rte_errno = ENOMEM; ++ return -rte_errno; ++ } + memset(&sh->cmng, 0, sizeof(sh->cmng)); + TAILQ_INIT(&sh->cmng.flow_counters); + sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET; + sh->cmng.max_id = -1; ++ sh->cmng.pools = pools; + sh->cmng.last_pool_idx = POOL_IDX_INVALID; + rte_spinlock_init(&sh->cmng.pool_update_sl); + for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) { + TAILQ_INIT(&sh->cmng.counters[i]); + rte_spinlock_init(&sh->cmng.csl[i]); + } ++ return 0; + } + + /** +@@ -591,8 +606,7 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) + claim_zero + (mlx5_flow_os_destroy_flow_action + (cnt->action)); +- if (fallback && MLX5_POOL_GET_CNT +- (pool, j)->dcs_when_free) ++ if (fallback && cnt->dcs_when_free) + claim_zero(mlx5_devx_cmd_destroy + (cnt->dcs_when_free)); + } +@@ -1172,12 +1186,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, MLX5_ASSERT(spawn->max_port); sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, sizeof(struct mlx5_dev_ctx_shared) + @@ -30495,7 +59338,7 @@ index aa5f313c1a..2234dc7563 100644 goto exit; } pthread_mutex_init(&sh->txpp.mutex, NULL); -@@ -1199,24 +1198,24 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, +@@ -1199,24 +1212,24 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx), sizeof(sh->ibdev_path) - 1); /* @@ -30525,7 +59368,21 @@ index aa5f313c1a..2234dc7563 100644 goto error; } err = mlx5_rxtx_uars_prepare(sh); -@@ -1246,19 +1245,19 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, +@@ -1236,8 +1249,12 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, + if (err) + goto error; + } ++ err = mlx5_flow_counters_mng_init(sh); ++ if (err) { ++ DRV_LOG(ERR, "Fail to initialize counters manage."); ++ goto error; ++ } + mlx5_flow_aging_init(sh); +- mlx5_flow_counters_mng_init(sh); + mlx5_flow_ipool_create(sh, config); + /* Add context to the global device list. */ + LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); +@@ -1246,19 +1263,19 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); return sh; error: @@ -30549,7 +59406,15 @@ index aa5f313c1a..2234dc7563 100644 rte_errno = err; return NULL; } -@@ -1321,6 +1320,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) +@@ -1310,6 +1327,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) + if (LIST_EMPTY(&mlx5_dev_ctx_list)) { + mlx5_os_net_cleanup(); + mlx5_flow_os_release_workspace(); ++ mlx5_flow_workspace_gc_release(); + } + pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); + if (sh->flex_parsers_dv) { +@@ -1321,6 +1339,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) * Only primary process handles async device events. **/ mlx5_flow_counters_mng_close(sh); @@ -30558,7 +59423,7 @@ index aa5f313c1a..2234dc7563 100644 if (sh->aso_age_mng) { mlx5_flow_aso_age_mng_close(sh); sh->aso_age_mng = NULL; -@@ -1594,8 +1595,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) +@@ -1594,8 +1614,6 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); @@ -30567,7 +59432,16 @@ index aa5f313c1a..2234dc7563 100644 mlx5_os_free_shared_dr(priv); if (priv->rss_conf.rss_key != NULL) mlx5_free(priv->rss_conf.rss_key); -@@ -1642,7 +1641,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) +@@ -1639,10 +1657,16 @@ mlx5_dev_close(struct rte_eth_dev *dev) + dev->data->port_id); + if (priv->hrxqs) + mlx5_list_destroy(priv->hrxqs); ++ priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS; ++ /* ++ * The interrupt handler port id must be reset before priv is reset ++ * since 'mlx5_dev_interrupt_nl_cb' uses priv. ++ */ ++ rte_io_wmb(); /* * Free the shared context in last turn, because the cleanup * routines above may use some shared fields, like @@ -30576,7 +59450,7 @@ index aa5f313c1a..2234dc7563 100644 * ifindex if Netlink fails. */ mlx5_free_shared_dev_ctx(priv->sh); -@@ -1884,9 +1883,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque) +@@ -1884,9 +1908,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque) } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { config->mprq.enabled = !!tmp; } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { @@ -30588,7 +59462,7 @@ index aa5f313c1a..2234dc7563 100644 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { config->mprq.max_memcpy_len = tmp; } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { -@@ -1962,7 +1961,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) +@@ -1962,7 +1986,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) if (tmp != MLX5_RCM_NONE && tmp != MLX5_RCM_LIGHT && tmp != MLX5_RCM_AGGR) { @@ -30597,7 +59471,7 @@ index aa5f313c1a..2234dc7563 100644 rte_errno = EINVAL; return -rte_errno; } -@@ -2177,17 +2176,17 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev) +@@ -2177,17 +2201,17 @@ mlx5_set_metadata_mask(struct rte_eth_dev *dev) break; } if (sh->dv_mark_mask && sh->dv_mark_mask != mark) @@ -30619,7 +59493,7 @@ index aa5f313c1a..2234dc7563 100644 else sh->dv_regc0_mask = reg_c0; diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h -index 8466531060..128ebd6937 100644 +index 8466531060..6a3c48eaeb 100644 --- a/dpdk/drivers/net/mlx5/mlx5.h +++ b/dpdk/drivers/net/mlx5/mlx5.h @@ -275,10 +275,14 @@ struct mlx5_dev_config { @@ -30641,7 +59515,35 @@ index 8466531060..128ebd6937 100644 unsigned int max_memcpy_len; /* Maximum packet size to memcpy Rx packets. */ unsigned int min_rxqs_num; -@@ -601,6 +605,7 @@ struct mlx5_age_info { +@@ -319,9 +323,10 @@ struct mlx5_lb_ctx { + uint16_t refcnt; /* Reference count for representors. */ + }; + ++#define MLX5_COUNTER_POOLS_MAX_NUM (1 << 15) + #define MLX5_COUNTERS_PER_POOL 512 + #define MLX5_MAX_PENDING_QUERIES 4 +-#define MLX5_CNT_CONTAINER_RESIZE 64 ++#define MLX5_CNT_MR_ALLOC_BULK 64 + #define MLX5_CNT_SHARED_OFFSET 0x80000000 + #define IS_BATCH_CNT(cnt) (((cnt) & (MLX5_CNT_SHARED_OFFSET - 1)) >= \ + MLX5_CNT_BATCH_OFFSET) +@@ -477,7 +482,6 @@ TAILQ_HEAD(mlx5_counter_pools, mlx5_flow_counter_pool); + /* Counter global management structure. */ + struct mlx5_flow_counter_mng { + volatile uint16_t n_valid; /* Number of valid pools. */ +- uint16_t n; /* Number of pools. */ + uint16_t last_pool_idx; /* Last used pool index */ + int min_id; /* The minimum counter ID in the pools. */ + int max_id; /* The maximum counter ID in the pools. */ +@@ -546,6 +550,7 @@ struct mlx5_aso_age_action { + }; + + #define MLX5_ASO_AGE_ACTIONS_PER_POOL 512 ++#define MLX5_ASO_AGE_CONTAINER_RESIZE 64 + + struct mlx5_aso_age_pool { + struct mlx5_devx_obj *flow_hit_aso_obj; +@@ -601,6 +606,7 @@ struct mlx5_age_info { struct mlx5_dev_shared_port { uint32_t ih_port_id; uint32_t devx_ih_port_id; @@ -30649,7 +59551,7 @@ index 8466531060..128ebd6937 100644 /* * Interrupt handler port_id. Used by shared interrupt * handler to find the corresponding rte_eth device -@@ -742,6 +747,8 @@ struct mlx5_flow_meter_policy { +@@ -742,6 +748,8 @@ struct mlx5_flow_meter_policy { /* If yellow color policy is skipped. */ uint32_t skip_g:1; /* If green color policy is skipped. */ @@ -30658,7 +59560,7 @@ index 8466531060..128ebd6937 100644 rte_spinlock_t sl; uint32_t ref_cnt; /* Use count. */ -@@ -956,7 +963,6 @@ union mlx5_flow_tbl_key { +@@ -956,7 +964,6 @@ union mlx5_flow_tbl_key { /* Table structure. */ struct mlx5_flow_tbl_resource { void *obj; /**< Pointer to DR table object. */ @@ -30666,7 +59568,7 @@ index 8466531060..128ebd6937 100644 }; #define MLX5_MAX_TABLES UINT16_MAX -@@ -977,7 +983,7 @@ struct mlx5_flow_id_pool { +@@ -977,7 +984,7 @@ struct mlx5_flow_id_pool { uint32_t base_index; /**< The next index that can be used without any free elements. */ uint32_t *curr; /**< Pointer to the index to pop. */ @@ -30675,7 +59577,7 @@ index 8466531060..128ebd6937 100644 uint32_t max_id; /**< Maximum id can be allocated from the pool. */ }; -@@ -1014,7 +1020,7 @@ struct mlx5_dev_txpp { +@@ -1014,7 +1021,7 @@ struct mlx5_dev_txpp { void *pp; /* Packet pacing context. */ uint16_t pp_id; /* Packet pacing context index. */ uint16_t ts_n; /* Number of captured timestamps. */ @@ -30684,7 +59586,7 @@ index 8466531060..128ebd6937 100644 struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */ struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */ uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */ -@@ -1118,7 +1124,7 @@ struct mlx5_flex_parser_devx { +@@ -1118,7 +1125,7 @@ struct mlx5_flex_parser_devx { uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM]; }; @@ -30693,7 +59595,16 @@ index 8466531060..128ebd6937 100644 __extension__ struct mlx5_flex_pattern_field { uint16_t width:6; -@@ -1169,7 +1175,7 @@ struct mlx5_dev_ctx_shared { +@@ -1152,7 +1159,7 @@ struct mlx5_dev_ctx_shared { + uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */ + uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */ + uint32_t reclaim_mode:1; /* Reclaim memory. */ +- uint32_t dr_drop_action_en:1; /* Use DR drop action. */ ++ uint32_t dr_root_drop_action_en:1; /* DR drop action is usable on root tables. */ + uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */ + uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */ + uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */ +@@ -1169,7 +1176,7 @@ struct mlx5_dev_ctx_shared { /* Shared DV/DR flow data section. */ uint32_t dv_meta_mask; /* flow META metadata supported mask. */ uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */ @@ -30702,7 +59613,7 @@ index 8466531060..128ebd6937 100644 void *fdb_domain; /* FDB Direct Rules name space handle. */ void *rx_domain; /* RX Direct Rules name space handle. */ void *tx_domain; /* TX Direct Rules name space handle. */ -@@ -1199,6 +1205,7 @@ struct mlx5_dev_ctx_shared { +@@ -1199,6 +1206,7 @@ struct mlx5_dev_ctx_shared { /* Shared interrupt handler section. */ struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */ struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */ @@ -30710,15 +59621,25 @@ index 8466531060..128ebd6937 100644 void *devx_comp; /* DEVX async comp obj. */ struct mlx5_devx_obj *tis[16]; /* TIS object. */ struct mlx5_devx_obj *td; /* Transport domain. */ -@@ -1409,6 +1416,7 @@ struct mlx5_priv { +@@ -1409,6 +1417,8 @@ struct mlx5_priv { unsigned int mtr_en:1; /* Whether support meter. */ unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */ unsigned int lb_used:1; /* Loopback queue is referred to. */ ++ unsigned int rmv_notified:1; /* Notified about removal event */ + uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */ uint16_t domain_id; /* Switch domain identifier. */ uint16_t vport_id; /* Associated VF vport index (if any). */ uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */ -@@ -1580,6 +1588,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, +@@ -1444,7 +1454,7 @@ struct mlx5_priv { + uint32_t refcnt; /**< Reference counter. */ + /**< Verbs modify header action object. */ + uint8_t ft_type; /**< Flow table type, Rx or Tx. */ +- uint8_t max_lro_msg_size; ++ uint32_t max_lro_msg_size; + uint32_t link_speed_capa; /* Link speed capabilities. */ + struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ + struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ +@@ -1580,6 +1590,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); void mlx5_dev_interrupt_handler(void *arg); void mlx5_dev_interrupt_handler_devx(void *arg); @@ -30753,7 +59674,7 @@ index 258475ed2c..2d48fde010 100644 /* Two-byte shift is disabled for Multi-Packet RQ. */ #define MLX5_MPRQ_TWO_BYTE_SHIFT 0 diff --git a/dpdk/drivers/net/mlx5/mlx5_devx.c b/dpdk/drivers/net/mlx5/mlx5_devx.c -index 105c3d67f0..44c439bb55 100644 +index 105c3d67f0..b59c5d81bd 100644 --- a/dpdk/drivers/net/mlx5/mlx5_devx.c +++ b/dpdk/drivers/net/mlx5/mlx5_devx.c @@ -257,11 +257,11 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq) @@ -30787,15 +59708,19 @@ index 105c3d67f0..44c439bb55 100644 /* Enable TIR LRO only if all the queues were configured for. */ for (i = 0; i < ind_tbl->queues_n; ++i) { -@@ -768,6 +769,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, +@@ -768,8 +769,10 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; if (lro) { + MLX5_ASSERT(priv->config.lro.supported); tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout; - tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; +- tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; ++ tir_attr->lro_max_msg_sz = ++ priv->max_lro_msg_size / MLX5_LRO_SEG_CHUNK_SIZE; tir_attr->lro_enable_mask = -@@ -931,6 +933,8 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; +@@ -931,6 +934,8 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) rte_errno = ENOMEM; goto error; } @@ -30804,6 +59729,17 @@ index 105c3d67f0..44c439bb55 100644 rxq_obj->rxq_ctrl = rxq_ctrl; rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD; rxq_ctrl->sh = priv->sh; +@@ -1013,6 +1018,10 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + ++#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) ++ if (hrxq->action != NULL) ++ mlx5_flow_os_destroy_flow_action(hrxq->action); ++#endif + if (hrxq->tir != NULL) + mlx5_devx_tir_destroy(hrxq); + if (hrxq->ind_table->ind_table != NULL) diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c index dc647d5580..9c44471c42 100644 --- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c @@ -30818,7 +59754,7 @@ index dc647d5580..9c44471c42 100644 dev->data->port_id); rte_errno = ENOMEM; diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c -index f34e4b88aa..42de516bfd 100644 +index f34e4b88aa..5d489c7f92 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -18,6 +18,7 @@ @@ -30829,7 +59765,16 @@ index f34e4b88aa..42de516bfd 100644 #include <mlx5_glue.h> #include <mlx5_devx_cmds.h> -@@ -148,6 +149,8 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) +@@ -63,8 +64,6 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev, + uint32_t group, uint32_t *table, + struct rte_flow_error *error); + +-static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); +-static void mlx5_flow_pop_thread_workspace(void); + + + /** Device flow drivers. */ +@@ -148,6 +147,8 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) case RTE_FLOW_ITEM_TYPE_IPV6: case RTE_FLOW_ITEM_TYPE_UDP: case RTE_FLOW_ITEM_TYPE_TCP: @@ -30838,7 +59783,7 @@ index f34e4b88aa..42de516bfd 100644 case RTE_FLOW_ITEM_TYPE_VXLAN: case RTE_FLOW_ITEM_TYPE_NVGRE: case RTE_FLOW_ITEM_TYPE_GRE: -@@ -164,128 +167,152 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) +@@ -164,128 +165,152 @@ mlx5_flow_is_rss_expandable_item(const struct rte_flow_item *item) return false; } @@ -31092,7 +60037,7 @@ index f34e4b88aa..42de516bfd 100644 } static const int * -@@ -533,9 +560,11 @@ enum mlx5_expansion { +@@ -533,9 +558,11 @@ enum mlx5_expansion { MLX5_EXPANSION_OUTER_IPV4, MLX5_EXPANSION_OUTER_IPV4_UDP, MLX5_EXPANSION_OUTER_IPV4_TCP, @@ -31104,7 +60049,7 @@ index f34e4b88aa..42de516bfd 100644 MLX5_EXPANSION_VXLAN, MLX5_EXPANSION_STD_VXLAN, MLX5_EXPANSION_L3_VXLAN, -@@ -549,9 +578,11 @@ enum mlx5_expansion { +@@ -549,9 +576,11 @@ enum mlx5_expansion { MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV4_UDP, MLX5_EXPANSION_IPV4_TCP, @@ -31116,7 +60061,7 @@ index f34e4b88aa..42de516bfd 100644 MLX5_EXPANSION_IPV6_FRAG_EXT, MLX5_EXPANSION_GTP, MLX5_EXPANSION_GENEVE, -@@ -586,6 +617,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { +@@ -586,6 +615,7 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT (MLX5_EXPANSION_OUTER_IPV4_UDP, MLX5_EXPANSION_OUTER_IPV4_TCP, @@ -31124,7 +60069,7 @@ index f34e4b88aa..42de516bfd 100644 MLX5_EXPANSION_GRE, MLX5_EXPANSION_NVGRE, MLX5_EXPANSION_IPV4, -@@ -607,10 +639,14 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { +@@ -607,10 +637,14 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .type = RTE_FLOW_ITEM_TYPE_TCP, .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, }, @@ -31139,7 +60084,7 @@ index f34e4b88aa..42de516bfd 100644 MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6, MLX5_EXPANSION_GRE, -@@ -632,6 +668,9 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { +@@ -632,6 +666,9 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .type = RTE_FLOW_ITEM_TYPE_TCP, .rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP, }, @@ -31149,7 +60094,7 @@ index f34e4b88aa..42de516bfd 100644 [MLX5_EXPANSION_VXLAN] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH, MLX5_EXPANSION_IPV4, -@@ -691,7 +730,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { +@@ -691,7 +728,8 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { }, [MLX5_EXPANSION_IPV4] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP, @@ -31159,7 +60104,7 @@ index f34e4b88aa..42de516bfd 100644 .type = RTE_FLOW_ITEM_TYPE_IPV4, .rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER, -@@ -704,9 +744,13 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { +@@ -704,9 +742,13 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { .type = RTE_FLOW_ITEM_TYPE_TCP, .rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP, }, @@ -31173,7 +60118,7 @@ index f34e4b88aa..42de516bfd 100644 MLX5_EXPANSION_IPV6_FRAG_EXT), .type = RTE_FLOW_ITEM_TYPE_IPV6, .rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | -@@ -723,6 +767,9 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { +@@ -723,6 +765,9 @@ static const struct mlx5_flow_expand_node mlx5_support_expansion[] = { [MLX5_EXPANSION_IPV6_FRAG_EXT] = { .type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, }, @@ -31183,7 +60128,7 @@ index f34e4b88aa..42de516bfd 100644 [MLX5_EXPANSION_GTP] = { .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4, MLX5_EXPANSION_IPV6), -@@ -1206,7 +1253,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) +@@ -1206,7 +1251,7 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** @@ -31192,7 +60137,7 @@ index f34e4b88aa..42de516bfd 100644 * flow. * * @param[in] dev -@@ -1219,7 +1266,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, +@@ -1219,7 +1264,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; @@ -31200,7 +60145,7 @@ index f34e4b88aa..42de516bfd 100644 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; -@@ -1254,15 +1300,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, +@@ -1254,15 +1298,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, * this must be always enabled (metadata may arive * from other port - not from local flows only. */ @@ -31216,7 +60161,7 @@ index f34e4b88aa..42de516bfd 100644 if (tunnel) { unsigned int j; -@@ -1280,6 +1317,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, +@@ -1280,6 +1315,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev, } } @@ -31237,7 +60182,7 @@ index f34e4b88aa..42de516bfd 100644 /** * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow * -@@ -1294,7 +1345,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +@@ -1294,7 +1343,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) struct mlx5_priv *priv = dev->data->dev_private; uint32_t handle_idx; struct mlx5_flow_handle *dev_handle; @@ -31249,7 +60194,7 @@ index f34e4b88aa..42de516bfd 100644 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dev_handle, next) flow_drv_rxq_flags_set(dev, dev_handle); -@@ -1314,7 +1369,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, +@@ -1314,7 +1367,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow_handle *dev_handle) { struct mlx5_priv *priv = dev->data->dev_private; @@ -31257,7 +60202,7 @@ index f34e4b88aa..42de516bfd 100644 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL); struct mlx5_ind_table_obj *ind_tbl = NULL; unsigned int i; -@@ -1345,15 +1399,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, +@@ -1345,15 +1397,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, MLX5_ASSERT(rxq_ctrl != NULL); if (rxq_ctrl == NULL) continue; @@ -31273,7 +60218,7 @@ index f34e4b88aa..42de516bfd 100644 if (tunnel) { unsigned int j; -@@ -1410,12 +1455,12 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) +@@ -1410,12 +1453,12 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev) if (rxq == NULL || rxq->ctrl == NULL) continue; @@ -31287,7 +60232,66 @@ index f34e4b88aa..42de516bfd 100644 } /** -@@ -3008,7 +3053,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, +@@ -1563,8 +1606,10 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + /* + * Validate the drop action. + * +- * @param[in] action_flags +- * Bit-fields that holds the actions detected until now. ++ * @param[in] dev ++ * Pointer to the Ethernet device structure. ++ * @param[in] is_root ++ * True if flow is validated for root table. False otherwise. + * @param[in] attr + * Attributes of flow that includes this action. + * @param[out] error +@@ -1574,15 +1619,25 @@ mlx5_flow_validate_action_mark(const struct rte_flow_action *action, + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused, ++mlx5_flow_validate_action_drop(struct rte_eth_dev *dev, ++ bool is_root, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) + { +- if (attr->egress) ++ struct mlx5_priv *priv = dev->data->dev_private; ++ ++ if (priv->config.dv_flow_en == 0 && attr->egress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL, + "drop action not supported for " + "egress"); ++ if (priv->config.dv_flow_en == 1 && is_root && (attr->egress || attr->transfer) && ++ !priv->sh->dr_root_drop_action_en) { ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ATTR, NULL, ++ "drop action not supported for " ++ "egress and transfer on group 0"); ++ } + return 0; + } + +@@ -2311,7 +2366,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 cannot follow L2/VLAN layer " + "which ether type is not IPv4"); +- if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { ++ if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (mask && spec) + next_proto = mask->hdr.next_proto_id & + spec->hdr.next_proto_id; +@@ -2419,7 +2474,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, + "which ether type is not IPv6"); + if (mask && mask->hdr.proto == UINT8_MAX && spec) + next_proto = spec->hdr.proto; +- if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { ++ if (item_flags & MLX5_FLOW_LAYER_IPIP) { + if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, +@@ -3008,7 +3063,7 @@ mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -31296,7 +60300,7 @@ index f34e4b88aa..42de516bfd 100644 /* Check if class type and length masks are full. */ if (full_mask.option_class != mask->option_class || full_mask.option_type != mask->option_type || -@@ -3957,7 +4002,7 @@ find_graph_root(uint32_t rss_level) +@@ -3957,7 +4012,7 @@ find_graph_root(uint32_t rss_level) * subflow. * * @param[in] dev_flow @@ -31305,7 +60309,41 @@ index f34e4b88aa..42de516bfd 100644 * * @return * The layers get from prefix subflow. -@@ -4284,7 +4329,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) +@@ -4173,6 +4228,7 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + int queue_action = 0; + int action_n = 0; + int split = 0; ++ int push_vlan = 0; + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action_raw_encap *raw_encap; +@@ -4181,6 +4237,8 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + if (!attr->ingress) + return 0; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { ++ if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) ++ push_vlan = 1; + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = actions->conf; +@@ -4205,11 +4263,15 @@ flow_check_hairpin_split(struct rte_eth_dev *dev, + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: +- case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + split++; + action_n++; + break; ++ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: ++ if (push_vlan) ++ split++; ++ action_n++; ++ break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap = actions->conf; + if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) +@@ -4284,7 +4346,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, }; @@ -31314,7 +60352,7 @@ index f34e4b88aa..42de516bfd 100644 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); if (ret < 0) return NULL; -@@ -4353,7 +4398,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) +@@ -4353,7 +4415,7 @@ flow_dv_mreg_create_cb(void *tool_ctx, void *cb_ctx) /* * The copy Flows are not included in any list. There * ones are referenced from other Flows and can not @@ -31323,7 +60361,41 @@ index f34e4b88aa..42de516bfd 100644 * by list traversing. */ mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP, -@@ -4796,6 +4841,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, +@@ -4670,19 +4732,32 @@ flow_hairpin_split(struct rte_eth_dev *dev, + struct mlx5_rte_flow_item_tag *tag_item; + struct rte_flow_item *item; + char *addr; ++ int push_vlan = 0; + int encap = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { ++ if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) ++ push_vlan = 1; + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: +- case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + break; ++ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: ++ if (push_vlan) { ++ rte_memcpy(actions_tx, actions, ++ sizeof(struct rte_flow_action)); ++ actions_tx++; ++ } else { ++ rte_memcpy(actions_rx, actions, ++ sizeof(struct rte_flow_action)); ++ actions_rx++; ++ } ++ break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (encap) { + rte_memcpy(actions_tx, actions, +@@ -4796,6 +4871,7 @@ flow_create_split_inner(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_flow *dev_flow; @@ -31331,7 +60403,7 @@ index f34e4b88aa..42de516bfd 100644 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, flow_split_info->flow_idx, error); -@@ -4810,12 +4856,14 @@ flow_create_split_inner(struct rte_eth_dev *dev, +@@ -4810,12 +4886,14 @@ flow_create_split_inner(struct rte_eth_dev *dev, /* * If dev_flow is as one of the suffix flow, some actions in suffix * flow may need some user defined item layer flags, and pass the @@ -31349,7 +60421,7 @@ index f34e4b88aa..42de516bfd 100644 if (sub_flow) *sub_flow = dev_flow; #ifdef HAVE_IBV_FLOW_DV_SUPPORT -@@ -5006,9 +5054,10 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5006,9 +5084,10 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint32_t tag_id = 0; struct rte_flow_item *vlan_item_dst = NULL; const struct rte_flow_item *vlan_item_src = NULL; @@ -31361,7 +60433,7 @@ index f34e4b88aa..42de516bfd 100644 bool mtr_first; uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0; uint8_t mtr_reg_bits = priv->mtr_reg_share ? -@@ -5016,27 +5065,18 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5016,27 +5095,18 @@ flow_meter_split_prep(struct rte_eth_dev *dev, uint32_t flow_id = 0; uint32_t flow_id_reversed = 0; uint8_t flow_id_bits = 0; @@ -31392,7 +60464,7 @@ index f34e4b88aa..42de516bfd 100644 if (!fm->def_policy && wks->policy->is_hierarchy && flow_src_port != priv->representor_id) { if (flow_drv_mtr_hierarchy_rule_create(dev, -@@ -5082,6 +5122,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5082,6 +5152,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, tag_action = actions_pre++; action_cur = actions_pre++; } @@ -31400,7 +60472,7 @@ index f34e4b88aa..42de516bfd 100644 break; case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP: -@@ -5110,6 +5151,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5110,6 +5181,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev, MLX5_RTE_FLOW_ITEM_TYPE_VLAN; } break; @@ -31412,7 +60484,7 @@ index f34e4b88aa..42de516bfd 100644 default: break; } -@@ -5130,7 +5176,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, +@@ -5130,7 +5206,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, if (!fm->def_policy) { sub_policy = get_meter_sub_policy(dev, flow, wks, @@ -31422,7 +60494,7 @@ index f34e4b88aa..42de516bfd 100644 if (!sub_policy) return -rte_errno; } else { -@@ -5359,7 +5406,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, +@@ -5359,7 +5436,7 @@ flow_mreg_split_qrss_prep(struct rte_eth_dev *dev, * @param[out] error * Perform verbose error reporting if not NULL. * @param[in] encap_idx @@ -31431,7 +60503,42 @@ index f34e4b88aa..42de516bfd 100644 * * @return * 0 on success, negative value otherwise -@@ -5527,7 +5574,7 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5436,6 +5513,7 @@ flow_check_match_action(const struct rte_flow_action actions[], + { + const struct rte_flow_action_sample *sample; + const struct rte_flow_action_raw_decap *decap; ++ const struct rte_flow_action *action_cur = NULL; + int actions_n = 0; + uint32_t ratio = 0; + int sub_type = 0; +@@ -5459,7 +5537,8 @@ flow_check_match_action(const struct rte_flow_action actions[], + ratio = sample->ratio; + sub_type = ((const struct rte_flow_action *) + (sample->actions))->type; +- if (ratio == 1 && attr->transfer) ++ if (ratio == 1 && attr->transfer && ++ sub_type != RTE_FLOW_ACTION_TYPE_END) + fdb_mirror = 1; + break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC: +@@ -5495,12 +5574,12 @@ flow_check_match_action(const struct rte_flow_action actions[], + break; + case RTE_FLOW_ACTION_TYPE_RAW_DECAP: + decap = actions->conf; +- while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID) ++ action_cur = actions; ++ while ((++action_cur)->type == RTE_FLOW_ACTION_TYPE_VOID) + ; +- actions_n++; +- if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { ++ if (action_cur->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) { + const struct rte_flow_action_raw_encap *encap = +- actions->conf; ++ action_cur->conf; + if (decap->size <= + MLX5_ENCAPSULATION_DECISION_SIZE && + encap->size > +@@ -5527,7 +5606,7 @@ flow_check_match_action(const struct rte_flow_action actions[], return flag ? actions_n + 1 : 0; } @@ -31440,7 +60547,7 @@ index f34e4b88aa..42de516bfd 100644 /** * Split the sample flow. -@@ -5568,6 +5615,7 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5568,6 +5647,7 @@ flow_check_match_action(const struct rte_flow_action actions[], static int flow_sample_split_prep(struct rte_eth_dev *dev, int add_tag, @@ -31448,7 +60555,7 @@ index f34e4b88aa..42de516bfd 100644 struct rte_flow_item sfx_items[], const struct rte_flow_action actions[], struct rte_flow_action actions_sfx[], -@@ -5584,8 +5632,9 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5584,8 +5664,9 @@ flow_sample_split_prep(struct rte_eth_dev *dev, struct mlx5_rte_flow_item_tag *tag_mask; struct rte_flow_action_jump *jump_action; uint32_t tag_id = 0; @@ -31459,7 +60566,7 @@ index f34e4b88aa..42de516bfd 100644 int ret; if (sample_action_pos < 0) -@@ -5594,6 +5643,52 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5594,6 +5675,54 @@ flow_sample_split_prep(struct rte_eth_dev *dev, NULL, "invalid position of sample " "action in list"); /* Prepare the actions for prefix and suffix flow. */ @@ -31498,9 +60605,11 @@ index f34e4b88aa..42de516bfd 100644 + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: -+ push_vlan_idx = action_idx; -+ if (push_vlan_idx < sample_action_pos) ++ if (action_idx < sample_action_pos && ++ push_vlan_idx == -1) { + set_tag_idx = action_idx; ++ push_vlan_idx = action_idx; ++ } + break; + default: + break; @@ -31512,7 +60621,7 @@ index f34e4b88aa..42de516bfd 100644 if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) { index = qrss_action_pos; /* Put the preceding the Queue/RSS action into prefix flow. */ -@@ -5610,6 +5705,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5610,6 +5739,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, memcpy(actions_sfx, actions + qrss_action_pos, sizeof(struct rte_flow_action)); actions_sfx++; @@ -31527,33 +60636,37 @@ index f34e4b88aa..42de516bfd 100644 } else { index = sample_action_pos; if (index != 0) -@@ -5625,6 +5728,12 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -5624,7 +5761,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, + /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); - ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); +- ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); + /* Trust VF/SF on CX5 not supported meter so that the reserved + * metadata regC is REG_NON, back to use application tag + * index 0. + */ -+ if (unlikely(ret == REG_NON)) ++ if (unlikely(priv->mtr_color_reg == REG_NON)) + ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error); ++ else ++ ret = mlx5_flow_get_reg_id(dev, MLX5_SAMPLE_ID, 0, error); if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool -@@ -5634,6 +5743,12 @@ flow_sample_split_prep(struct rte_eth_dev *dev, - .data = tag_id, - }; - /* Prepare the suffix subflow items. */ +@@ -5639,6 +5783,13 @@ flow_sample_split_prep(struct rte_eth_dev *dev, + tag_spec->id = set_tag->id; + tag_mask = tag_spec + 1; + tag_mask->data = UINT32_MAX; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) { + memcpy(sfx_items, items, sizeof(*sfx_items)); + sfx_items++; ++ break; + } + } - tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM); - tag_spec->data = tag_id; - tag_spec->id = set_tag->id; -@@ -5651,13 +5766,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev, + sfx_items[0] = (struct rte_flow_item){ + .type = (enum rte_flow_item_type) + MLX5_RTE_FLOW_ITEM_TYPE_TAG, +@@ -5651,13 +5802,17 @@ flow_sample_split_prep(struct rte_eth_dev *dev, RTE_FLOW_ITEM_TYPE_END, }; /* Prepare the tag action in prefix subflow. */ @@ -31572,7 +60685,7 @@ index f34e4b88aa..42de516bfd 100644 memcpy(actions_pre + index, actions + sample_action_pos, sizeof(struct rte_flow_action)); index += 1; -@@ -6042,6 +6161,8 @@ flow_create_split_meter(struct rte_eth_dev *dev, +@@ -6042,6 +6197,8 @@ flow_create_split_meter(struct rte_eth_dev *dev, fm->policy_id, NULL); MLX5_ASSERT(wks->policy); @@ -31581,7 +60694,7 @@ index f34e4b88aa..42de516bfd 100644 if (wks->policy->is_hierarchy) { wks->final_policy = mlx5_flow_meter_hierarchy_get_final_policy(dev, -@@ -6065,8 +6186,10 @@ flow_create_split_meter(struct rte_eth_dev *dev, +@@ -6065,8 +6222,10 @@ flow_create_split_meter(struct rte_eth_dev *dev, if (!fm->def_policy && !is_mtr_hierarchy && (!has_modify || !fm->drop_cnt)) set_mtr_reg = false; @@ -31594,7 +60707,7 @@ index f34e4b88aa..42de516bfd 100644 sizeof(struct mlx5_rte_flow_action_set_tag); /* Suffix items: tag, vlan, port id, end. */ #define METER_SUFFIX_ITEM 4 -@@ -6128,7 +6251,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, +@@ -6128,7 +6287,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, MLX5_FLOW_TABLE_LEVEL_METER; flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); @@ -31603,7 +60716,7 @@ index f34e4b88aa..42de516bfd 100644 flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX; } /* Add the prefix subflow. */ -@@ -6194,6 +6317,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6194,6 +6353,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, struct mlx5_flow_dv_sample_resource *sample_res; struct mlx5_flow_tbl_data_entry *sfx_tbl_data; struct mlx5_flow_tbl_resource *sfx_tbl; @@ -31611,7 +60724,38 @@ index f34e4b88aa..42de516bfd 100644 #endif size_t act_size; size_t item_size; -@@ -6240,7 +6364,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6207,6 +6367,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, + uint16_t jump_table = 0; + const uint32_t next_ft_step = 1; + int ret = 0; ++ struct mlx5_priv *item_port_priv = NULL; ++ const struct rte_flow_item *item; + + if (priv->sampler_en) + actions_n = flow_check_match_action(actions, attr, +@@ -6226,8 +6388,20 @@ flow_create_split_sample(struct rte_eth_dev *dev, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "no memory to split " + "sample flow"); ++ for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { ++ if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID) { ++ const struct rte_flow_item_port_id *spec; ++ ++ spec = (const struct rte_flow_item_port_id *)item->spec; ++ if (spec) ++ item_port_priv = ++ mlx5_port_to_eswitch_info(spec->id, true); ++ break; ++ } ++ } + /* The representor_id is UINT16_MAX for uplink. */ +- fdb_tx = (attr->transfer && priv->representor_id != UINT16_MAX); ++ fdb_tx = (attr->transfer && ++ flow_source_vport_representor(priv, item_port_priv)); + /* + * When reg_c_preserve is set, metadata registers Cx preserve + * their value even through packet duplication. +@@ -6240,7 +6414,7 @@ flow_create_split_sample(struct rte_eth_dev *dev, jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR + next_ft_step; pre_actions = sfx_actions + actions_n; @@ -31620,7 +60764,7 @@ index f34e4b88aa..42de516bfd 100644 actions, sfx_actions, pre_actions, actions_n, sample_action_pos, -@@ -6280,7 +6404,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, +@@ -6280,7 +6454,8 @@ flow_create_split_sample(struct rte_eth_dev *dev, } flow_split_info->prefix_layers = flow_get_prefix_layer_flags(dev_flow); @@ -31630,7 +60774,73 @@ index f34e4b88aa..42de516bfd 100644 /* Suffix group level already be scaled with factor, set * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale * again in translation. -@@ -6884,7 +7009,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, +@@ -6364,36 +6539,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) + return tunnel; + } + +-/** +- * Adjust flow RSS workspace if needed. +- * +- * @param wks +- * Pointer to thread flow work space. +- * @param rss_desc +- * Pointer to RSS descriptor. +- * @param[in] nrssq_num +- * New RSS queue number. +- * +- * @return +- * 0 on success, -1 otherwise and rte_errno is set. +- */ +-static int +-flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks, +- struct mlx5_flow_rss_desc *rss_desc, +- uint32_t nrssq_num) +-{ +- if (likely(nrssq_num <= wks->rssq_num)) +- return 0; +- rss_desc->queue = realloc(rss_desc->queue, +- sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2)); +- if (!rss_desc->queue) { +- rte_errno = ENOMEM; +- return -1; +- } +- wks->rssq_num = RTE_ALIGN(nrssq_num, 2); +- return 0; +-} +- + /** + * Create a flow and add it to @p list. + * +@@ -6434,7 +6579,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, + int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS; + union { + struct mlx5_flow_expand_rss buf; +- uint8_t buffer[4096]; ++ uint8_t buffer[8192]; + } expand_buffer; + union { + struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; +@@ -6511,8 +6656,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, + if (attr->ingress && !attr->transfer) + rss = flow_get_rss_action(dev, p_actions_rx); + if (rss) { +- if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num)) +- return 0; ++ MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512); + /* + * The following information is required by + * mlx5_flow_hashfields_adjust() in advance. +@@ -6643,8 +6787,8 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, + rte_errno = ret; /* Restore rte_errno. */ + ret = rte_errno; + rte_errno = ret; +- mlx5_flow_pop_thread_workspace(); + error_before_hairpin_split: ++ mlx5_flow_pop_thread_workspace(); + rte_free(translated_actions); + return 0; + } +@@ -6884,7 +7028,7 @@ flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, * @param type * Flow type to be flushed. * @param active @@ -31639,7 +60849,252 @@ index f34e4b88aa..42de516bfd 100644 */ void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, -@@ -8531,7 +8656,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, +@@ -6945,12 +7089,34 @@ flow_release_workspace(void *data) + + while (wks) { + next = wks->next; +- free(wks->rss_desc.queue); + free(wks); + wks = next; + } + } + ++static struct mlx5_flow_workspace *gc_head; ++static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER; ++ ++static void ++mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws) ++{ ++ rte_spinlock_lock(&mlx5_flow_workspace_lock); ++ ws->gc = gc_head; ++ gc_head = ws; ++ rte_spinlock_unlock(&mlx5_flow_workspace_lock); ++} ++ ++void ++mlx5_flow_workspace_gc_release(void) ++{ ++ while (gc_head) { ++ struct mlx5_flow_workspace *wks = gc_head; ++ ++ gc_head = wks->gc; ++ flow_release_workspace(wks); ++ } ++} ++ + /** + * Get thread specific current flow workspace. + * +@@ -6976,24 +7142,17 @@ mlx5_flow_get_thread_workspace(void) + static struct mlx5_flow_workspace* + flow_alloc_thread_workspace(void) + { +- struct mlx5_flow_workspace *data = calloc(1, sizeof(*data)); ++ size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long)); ++ size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512; ++ struct mlx5_flow_workspace *data = calloc(1, data_size + ++ rss_queue_array_size); + + if (!data) { +- DRV_LOG(ERR, "Failed to allocate flow workspace " +- "memory."); ++ DRV_LOG(ERR, "Failed to allocate flow workspace memory."); + return NULL; + } +- data->rss_desc.queue = calloc(1, +- sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM); +- if (!data->rss_desc.queue) +- goto err; +- data->rssq_num = MLX5_RSSQ_DEFAULT_NUM; ++ data->rss_desc.queue = RTE_PTR_ADD(data, data_size); + return data; +-err: +- if (data->rss_desc.queue) +- free(data->rss_desc.queue); +- free(data); +- return NULL; + } + + /** +@@ -7003,7 +7162,7 @@ flow_alloc_thread_workspace(void) + * + * @return pointer to thread specific flow workspace data, NULL on error. + */ +-static struct mlx5_flow_workspace* ++struct mlx5_flow_workspace* + mlx5_flow_push_thread_workspace(void) + { + struct mlx5_flow_workspace *curr; +@@ -7014,6 +7173,7 @@ mlx5_flow_push_thread_workspace(void) + data = flow_alloc_thread_workspace(); + if (!data) + return NULL; ++ mlx5_flow_workspace_gc_add(data); + } else if (!curr->inuse) { + data = curr; + } else if (curr->next) { +@@ -7040,7 +7200,7 @@ mlx5_flow_push_thread_workspace(void) + * + * @return pointer to thread specific flow workspace data, NULL on error. + */ +-static void ++void + mlx5_flow_pop_thread_workspace(void) + { + struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace(); +@@ -7810,7 +7970,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) + { + struct mlx5_counter_stats_mem_mng *mem_mng; + volatile struct flow_counter_stats *raw_data; +- int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; ++ int raws_n = MLX5_CNT_MR_ALLOC_BULK + MLX5_MAX_PENDING_QUERIES; + int size = (sizeof(struct flow_counter_stats) * + MLX5_COUNTERS_PER_POOL + + sizeof(struct mlx5_counter_stats_raw)) * raws_n + +@@ -7848,7 +8008,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) + } + for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) + LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, +- mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i, ++ mem_mng->raws + MLX5_CNT_MR_ALLOC_BULK + i, + next); + LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); + sh->cmng.mem_mng = mem_mng; +@@ -7872,14 +8032,13 @@ mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh, + { + struct mlx5_flow_counter_mng *cmng = &sh->cmng; + /* Resize statistic memory once used out. */ +- if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) && ++ if (!(pool->index % MLX5_CNT_MR_ALLOC_BULK) && + mlx5_flow_create_counter_stat_mem_mng(sh)) { + DRV_LOG(ERR, "Cannot resize counter stat mem."); + return -1; + } + rte_spinlock_lock(&pool->sl); +- pool->raw = cmng->mem_mng->raws + pool->index % +- MLX5_CNT_CONTAINER_RESIZE; ++ pool->raw = cmng->mem_mng->raws + pool->index % MLX5_CNT_MR_ALLOC_BULK; + rte_spinlock_unlock(&pool->sl); + pool->raw_hw = NULL; + return 0; +@@ -7921,13 +8080,14 @@ void + mlx5_flow_query_alarm(void *arg) + { + struct mlx5_dev_ctx_shared *sh = arg; +- int ret; +- uint16_t pool_index = sh->cmng.pool_index; + struct mlx5_flow_counter_mng *cmng = &sh->cmng; ++ uint16_t pool_index = cmng->pool_index; + struct mlx5_flow_counter_pool *pool; + uint16_t n_valid; ++ int ret; ++ + +- if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) ++ if (cmng->pending_queries >= MLX5_MAX_PENDING_QUERIES) + goto set_alarm; + rte_spinlock_lock(&cmng->pool_update_sl); + pool = cmng->pools[pool_index]; +@@ -7939,8 +8099,7 @@ mlx5_flow_query_alarm(void *arg) + if (pool->raw_hw) + /* There is a pool query in progress. */ + goto set_alarm; +- pool->raw_hw = +- LIST_FIRST(&sh->cmng.free_stat_raws); ++ pool->raw_hw = LIST_FIRST(&cmng->free_stat_raws); + if (!pool->raw_hw) + /* No free counter statistics raw memory. */ + goto set_alarm; +@@ -7966,12 +8125,12 @@ mlx5_flow_query_alarm(void *arg) + goto set_alarm; + } + LIST_REMOVE(pool->raw_hw, next); +- sh->cmng.pending_queries++; ++ cmng->pending_queries++; + pool_index++; + if (pool_index >= n_valid) + pool_index = 0; + set_alarm: +- sh->cmng.pool_index = pool_index; ++ cmng->pool_index = pool_index; + mlx5_set_query_alarm(sh); + } + +@@ -8477,23 +8636,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, + } + i = lcore_index; + +- for (j = 0; j <= h->mask; j++) { +- l_inconst = &h->buckets[j].l; +- if (!l_inconst || !l_inconst->cache[i]) +- continue; +- +- e = LIST_FIRST(&l_inconst->cache[i]->h); +- while (e) { +- modify_hdr = +- (struct mlx5_flow_dv_modify_hdr_resource *)e; +- data = (const uint8_t *)modify_hdr->actions; +- size = (size_t)(modify_hdr->actions_num) * 8; +- actions_num = modify_hdr->actions_num; +- id = (uint64_t)(uintptr_t)modify_hdr->action; +- type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; +- save_dump_file(data, size, type, id, +- (void *)(&actions_num), file); +- e = LIST_NEXT(e, next); ++ if (lcore_index == MLX5_LIST_NLCORE) { ++ for (i = 0; i <= (uint32_t)lcore_index; i++) { ++ for (j = 0; j <= h->mask; j++) { ++ l_inconst = &h->buckets[j].l; ++ if (!l_inconst || !l_inconst->cache[i]) ++ continue; ++ ++ e = LIST_FIRST(&l_inconst->cache[i]->h); ++ while (e) { ++ modify_hdr = ++ (struct mlx5_flow_dv_modify_hdr_resource *)e; ++ data = (const uint8_t *)modify_hdr->actions; ++ size = (size_t)(modify_hdr->actions_num) * 8; ++ actions_num = modify_hdr->actions_num; ++ id = (uint64_t)(uintptr_t)modify_hdr->action; ++ type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; ++ save_dump_file(data, size, type, id, ++ (void *)(&actions_num), file); ++ e = LIST_NEXT(e, next); ++ } ++ } ++ } ++ } else { ++ for (j = 0; j <= h->mask; j++) { ++ l_inconst = &h->buckets[j].l; ++ if (!l_inconst || !l_inconst->cache[i]) ++ continue; ++ ++ e = LIST_FIRST(&l_inconst->cache[i]->h); ++ while (e) { ++ modify_hdr = ++ (struct mlx5_flow_dv_modify_hdr_resource *)e; ++ data = (const uint8_t *)modify_hdr->actions; ++ size = (size_t)(modify_hdr->actions_num) * 8; ++ actions_num = modify_hdr->actions_num; ++ id = (uint64_t)(uintptr_t)modify_hdr->action; ++ type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR; ++ save_dump_file(data, size, type, id, ++ (void *)(&actions_num), file); ++ e = LIST_NEXT(e, next); ++ } + } + } + +@@ -8502,7 +8685,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, + } + + /* get counter */ +- MLX5_ASSERT(cmng->n_valid <= cmng->n); ++ MLX5_ASSERT(cmng->n_valid <= MLX5_COUNTER_POOLS_MAX_NUM); + max = MLX5_COUNTERS_PER_POOL * cmng->n_valid; + for (j = 1; j <= max; j++) { + action = NULL; +@@ -8531,7 +8714,7 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, * Perform verbose error reporting if not NULL. PMDs initialize this * structure in case of error only. * @return @@ -31648,7 +61103,28 @@ index f34e4b88aa..42de516bfd 100644 */ int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx, -@@ -9009,7 +9134,7 @@ mlx5_get_tof(const struct rte_flow_item *item, +@@ -8785,9 +8968,18 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, + const struct mlx5_flow_driver_ops *fops = + flow_get_drv_ops(flow_get_drv_type(dev, &attr)); + int ret; ++ uint32_t act_idx = (uint32_t)(uintptr_t)handle; ++ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; + +- ret = flow_drv_action_validate(dev, NULL, +- (const struct rte_flow_action *)update, fops, error); ++ switch (type) { ++ case MLX5_INDIRECT_ACTION_TYPE_CT: ++ ret = 0; ++ break; ++ default: ++ ret = flow_drv_action_validate(dev, NULL, ++ (const struct rte_flow_action *)update, ++ fops, error); ++ } + if (ret) + return ret; + return flow_drv_action_update(dev, handle, update, fops, +@@ -9009,7 +9201,7 @@ mlx5_get_tof(const struct rte_flow_item *item, } /** @@ -31657,7 +61133,16 @@ index f34e4b88aa..42de516bfd 100644 */ #ifdef HAVE_IBV_FLOW_DV_SUPPORT __extension__ -@@ -9822,10 +9947,27 @@ mlx5_flow_flex_item_create(struct rte_eth_dev *dev, +@@ -9536,7 +9728,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, + if (!is_tunnel_offload_active(dev)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, +- "tunnel offload was not activated"); ++ "tunnel offload was not activated, consider setting dv_xmeta_en=3"); + if (!tunnel) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, +@@ -9822,10 +10014,27 @@ mlx5_flow_flex_item_create(struct rte_eth_dev *dev, struct rte_flow_error *error) { static const char err_msg[] = "flex item creation unsupported"; @@ -31685,7 +61170,7 @@ index f34e4b88aa..42de516bfd 100644 if (!fops->item_create) { DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg); rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -10012,3 +10154,80 @@ mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, +@@ -10012,3 +10221,80 @@ mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, } return res; } @@ -31767,10 +61252,83 @@ index f34e4b88aa..42de516bfd 100644 + return 0; +} diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h -index 1f54649c69..29ccb98351 100644 +index 1f54649c69..6482ef708c 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.h +++ b/dpdk/drivers/net/mlx5/mlx5_flow.h -@@ -426,7 +426,7 @@ enum mlx5_feature_name { +@@ -224,34 +224,34 @@ enum mlx5_feature_name { + (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4) + + /* Actions */ +-#define MLX5_FLOW_ACTION_DROP (1u << 0) +-#define MLX5_FLOW_ACTION_QUEUE (1u << 1) +-#define MLX5_FLOW_ACTION_RSS (1u << 2) +-#define MLX5_FLOW_ACTION_FLAG (1u << 3) +-#define MLX5_FLOW_ACTION_MARK (1u << 4) +-#define MLX5_FLOW_ACTION_COUNT (1u << 5) +-#define MLX5_FLOW_ACTION_PORT_ID (1u << 6) +-#define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7) +-#define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8) +-#define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9) +-#define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10) +-#define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11) +-#define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12) +-#define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13) +-#define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14) +-#define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15) +-#define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16) +-#define MLX5_FLOW_ACTION_JUMP (1u << 17) +-#define MLX5_FLOW_ACTION_SET_TTL (1u << 18) +-#define MLX5_FLOW_ACTION_DEC_TTL (1u << 19) +-#define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20) +-#define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21) +-#define MLX5_FLOW_ACTION_ENCAP (1u << 22) +-#define MLX5_FLOW_ACTION_DECAP (1u << 23) +-#define MLX5_FLOW_ACTION_INC_TCP_SEQ (1u << 24) +-#define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1u << 25) +-#define MLX5_FLOW_ACTION_INC_TCP_ACK (1u << 26) +-#define MLX5_FLOW_ACTION_DEC_TCP_ACK (1u << 27) ++#define MLX5_FLOW_ACTION_DROP (1ull << 0) ++#define MLX5_FLOW_ACTION_QUEUE (1ull << 1) ++#define MLX5_FLOW_ACTION_RSS (1ull << 2) ++#define MLX5_FLOW_ACTION_FLAG (1ull << 3) ++#define MLX5_FLOW_ACTION_MARK (1ull << 4) ++#define MLX5_FLOW_ACTION_COUNT (1ull << 5) ++#define MLX5_FLOW_ACTION_PORT_ID (1ull << 6) ++#define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7) ++#define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8) ++#define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9) ++#define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10) ++#define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11) ++#define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12) ++#define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13) ++#define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14) ++#define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15) ++#define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16) ++#define MLX5_FLOW_ACTION_JUMP (1ull << 17) ++#define MLX5_FLOW_ACTION_SET_TTL (1ull << 18) ++#define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19) ++#define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20) ++#define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21) ++#define MLX5_FLOW_ACTION_ENCAP (1ull << 22) ++#define MLX5_FLOW_ACTION_DECAP (1ull << 23) ++#define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24) ++#define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25) ++#define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26) ++#define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27) + #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28) + #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29) + #define MLX5_FLOW_ACTION_SET_META (1ull << 30) +@@ -267,6 +267,9 @@ enum mlx5_feature_name { + #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40) + #define MLX5_FLOW_ACTION_CT (1ull << 41) + ++#define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \ ++ (MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE) ++ + #define MLX5_FLOW_FATE_ACTIONS \ + (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \ + MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \ +@@ -426,7 +429,7 @@ enum mlx5_feature_name { #define MLX5_ACT_NUM_MDF_IPV6 4 #define MLX5_ACT_NUM_MDF_MAC 2 #define MLX5_ACT_NUM_MDF_VID 1 @@ -31779,7 +61337,7 @@ index 1f54649c69..29ccb98351 100644 #define MLX5_ACT_NUM_MDF_TTL 1 #define MLX5_ACT_NUM_DEC_TTL MLX5_ACT_NUM_MDF_TTL #define MLX5_ACT_NUM_MDF_TCPSEQ 1 -@@ -598,7 +598,7 @@ struct mlx5_flow_tbl_data_entry { +@@ -598,7 +601,7 @@ struct mlx5_flow_tbl_data_entry { const struct mlx5_flow_tunnel *tunnel; uint32_t group_id; uint32_t external:1; @@ -31788,7 +61346,7 @@ index 1f54649c69..29ccb98351 100644 uint32_t is_egress:1; /**< Egress table. */ uint32_t is_transfer:1; /**< Transfer table. */ uint32_t dummy:1; /**< DR table. */ -@@ -696,10 +696,8 @@ struct mlx5_flow_handle { +@@ -696,10 +699,8 @@ struct mlx5_flow_handle { /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ void *drv_flow; /**< pointer to driver flow object. */ uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */ @@ -31800,7 +61358,7 @@ index 1f54649c69..29ccb98351 100644 union { uint32_t rix_hrxq; /**< Hash Rx queue object index. */ uint32_t rix_jump; /**< Index to the jump action resource. */ -@@ -715,6 +713,7 @@ struct mlx5_flow_handle { +@@ -715,6 +716,7 @@ struct mlx5_flow_handle { #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) struct mlx5_flow_handle_dv dvh; #endif @@ -31808,15 +61366,41 @@ index 1f54649c69..29ccb98351 100644 } __rte_packed; /* -@@ -1108,6 +1107,7 @@ struct mlx5_flow_workspace { +@@ -1096,10 +1098,10 @@ struct mlx5_flow_workspace { + /* If creating another flow in same thread, push new as stack. */ + struct mlx5_flow_workspace *prev; + struct mlx5_flow_workspace *next; ++ struct mlx5_flow_workspace *gc; + uint32_t inuse; /* can't create new flow with current. */ + struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS]; + struct mlx5_flow_rss_desc rss_desc; +- uint32_t rssq_num; /* Allocated queue num in rss_desc. */ + uint32_t flow_idx; /* Intermediate device flow index. */ + struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */ + struct mlx5_flow_meter_policy *policy; +@@ -1108,6 +1110,8 @@ struct mlx5_flow_workspace { /* The final policy when meter policy is hierarchy. */ uint32_t skip_matcher_reg:1; /* Indicates if need to skip matcher register in translate. */ + uint32_t mark:1; /* Indicates if flow contains mark action. */ ++ uint32_t vport_meta_tag; /* Used for vport index match. */ }; struct mlx5_flow_split_info { -@@ -1450,6 +1450,20 @@ flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx) +@@ -1297,8 +1301,11 @@ struct mlx5_flow_driver_ops { + }; + + /* mlx5_flow.c */ +- ++struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); ++void mlx5_flow_pop_thread_workspace(void); + struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); ++void mlx5_flow_workspace_gc_release(void); ++ + __extension__ + struct flow_grp_info { + uint64_t external:1; +@@ -1450,6 +1457,39 @@ flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx) return ct; } @@ -31834,10 +61418,39 @@ index 1f54649c69..29ccb98351 100644 + return 0; +} + ++/** ++ * Indicates whether flow source vport is representor port. ++ * ++ * @param[in] priv ++ * Pointer to device private context structure. ++ * @param[in] act_priv ++ * Pointer to actual device private context structure if have. ++ * ++ * @return ++ * True when the flow source vport is representor port, false otherwise. ++ */ ++static inline bool ++flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv) ++{ ++ MLX5_ASSERT(priv); ++ return (!act_priv ? (priv->representor_id != UINT16_MAX) : ++ (act_priv->representor_id != UINT16_MAX)); ++} ++ int mlx5_flow_group_to_table(struct rte_eth_dev *dev, const struct mlx5_flow_tunnel *tunnel, uint32_t group, uint32_t *table, -@@ -1752,4 +1766,14 @@ const struct mlx5_flow_tunnel * +@@ -1479,7 +1519,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, + int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); +-int mlx5_flow_validate_action_drop(uint64_t action_flags, ++int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev, ++ bool is_root, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); + int mlx5_flow_validate_action_flag(uint64_t action_flags, +@@ -1752,4 +1793,14 @@ const struct mlx5_flow_tunnel * mlx5_get_tof(const struct rte_flow_item *items, const struct rte_flow_action *actions, enum mlx5_tof_rule_type *rule_type); @@ -32056,7 +61669,7 @@ index ddf4328dec..eb7fc43da3 100644 return -1; } diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -index 3da122cbb9..70031d3dc9 100644 +index 3da122cbb9..0c66c76ef5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c @@ -93,37 +93,6 @@ static int @@ -32097,15 +61710,62 @@ index 3da122cbb9..70031d3dc9 100644 /** * Initialize flow attributes structure according to flow items' types. * -@@ -172,6 +141,7 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, +@@ -144,6 +113,7 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, + struct mlx5_flow *dev_flow, bool tunnel_decap) + { + uint64_t layers = dev_flow->handle->layers; ++ bool tunnel_match = false; + + /* + * If layers is already initialized, it means this dev_flow is the +@@ -152,6 +122,13 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, + * have the user defined items as the flow is split. + */ + if (layers) { ++ if (tunnel_decap) { ++ /* ++ * If decap action before modify, it means the driver ++ * should take the inner as outer for the modify actions. ++ */ ++ layers = ((layers >> 6) & MLX5_FLOW_LAYER_OUTER); ++ } + if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) + attr->ipv4 = 1; + else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) +@@ -172,8 +149,11 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: case RTE_FLOW_ITEM_TYPE_GENEVE: case RTE_FLOW_ITEM_TYPE_MPLS: +- if (tunnel_decap) + case RTE_FLOW_ITEM_TYPE_GTP: - if (tunnel_decap) ++ if (tunnel_decap) { + attr->attr = 0; ++ tunnel_match = true; ++ } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + if (!attr->ipv6) +@@ -187,7 +167,8 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, + ((const struct rte_flow_item_ipv4 *) + (item->mask))->hdr.next_proto_id; + if ((next_protocol == IPPROTO_IPIP || +- next_protocol == IPPROTO_IPV6) && tunnel_decap) ++ next_protocol == IPPROTO_IPV6) && tunnel_decap && ++ !tunnel_match) + attr->attr = 0; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: +@@ -202,7 +183,8 @@ flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr, + ((const struct rte_flow_item_ipv6 *) + (item->mask))->hdr.proto; + if ((next_protocol == IPPROTO_IPIP || +- next_protocol == IPPROTO_IPV6) && tunnel_decap) ++ next_protocol == IPPROTO_IPV6) && tunnel_decap && ++ !tunnel_match) attr->attr = 0; break; -@@ -326,7 +296,8 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl, + case RTE_FLOW_ITEM_TYPE_UDP: +@@ -326,7 +308,8 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl, mlx5_list_match_cb cb_match, mlx5_list_remove_cb cb_remove, mlx5_list_clone_cb cb_clone, @@ -32115,7 +61775,7 @@ index 3da122cbb9..70031d3dc9 100644 { struct mlx5_hlist *hl; struct mlx5_hlist *expected = NULL; -@@ -341,7 +312,9 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl, +@@ -341,7 +324,9 @@ flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl, cb_clone_free); if (!hl) { DRV_LOG(ERR, "%s hash creation failed", name); @@ -32126,7 +61786,7 @@ index 3da122cbb9..70031d3dc9 100644 return NULL; } if (!__atomic_compare_exchange_n(phl, &expected, hl, false, -@@ -1503,7 +1476,7 @@ mlx5_flow_field_id_to_modify_info +@@ -1503,7 +1488,7 @@ mlx5_flow_field_id_to_modify_info if (data->offset < 16) info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_DMAC_15_0}; @@ -32135,7 +61795,7 @@ index 3da122cbb9..70031d3dc9 100644 MLX5_MODI_OUT_DMAC_47_16}; } break; -@@ -1533,7 +1506,7 @@ mlx5_flow_field_id_to_modify_info +@@ -1533,7 +1518,7 @@ mlx5_flow_field_id_to_modify_info if (data->offset < 16) info[idx++] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_SMAC_15_0}; @@ -32144,7 +61804,7 @@ index 3da122cbb9..70031d3dc9 100644 MLX5_MODI_OUT_SMAC_47_16}; } break; -@@ -1881,7 +1854,7 @@ flow_dv_convert_action_modify_field +@@ -1881,7 +1866,7 @@ flow_dv_convert_action_modify_field struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = { {0, 0, 0} }; uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = {0, 0, 0, 0, 0}; @@ -32153,7 +61813,7 @@ index 3da122cbb9..70031d3dc9 100644 uint32_t shift = 0; if (conf->src.field == RTE_FLOW_FIELD_POINTER || -@@ -1894,6 +1867,11 @@ flow_dv_convert_action_modify_field +@@ -1894,6 +1879,11 @@ flow_dv_convert_action_modify_field item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ? (void *)(uintptr_t)conf->src.pvalue : (void *)(uintptr_t)&conf->src.value; @@ -32165,7 +61825,7 @@ index 3da122cbb9..70031d3dc9 100644 } else { type = MLX5_MODIFICATION_TYPE_COPY; /** For COPY fill the destination field (dcopy) without mask. */ -@@ -2032,7 +2010,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, +@@ -2032,7 +2022,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -32174,7 +61834,84 @@ index 3da122cbb9..70031d3dc9 100644 if (reg == REG_B) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, -@@ -2879,8 +2857,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, +@@ -2087,6 +2077,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, + static int + flow_dv_validate_item_tag(struct rte_eth_dev *dev, + const struct rte_flow_item *item, ++ uint32_t *tag_bitmap, + const struct rte_flow_attr *attr __rte_unused, + struct rte_flow_error *error) + { +@@ -2130,6 +2121,12 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, + if (ret < 0) + return ret; + MLX5_ASSERT(ret != REG_NON); ++ if (*tag_bitmap & (1 << ret)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, ++ item->spec, ++ "Duplicated tag index"); ++ *tag_bitmap |= 1 << ret; + return 0; + } + +@@ -2140,6 +2137,8 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. ++ * @param[in] tag_bitmap ++ * Tag index bitmap. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[in] item_flags +@@ -2155,6 +2154,7 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + const struct rte_flow_attr *attr, + uint64_t item_flags, ++ struct mlx5_priv **act_priv, + struct rte_flow_error *error) + { + const struct rte_flow_item_port_id *spec = item->spec; +@@ -2213,6 +2213,7 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec, + "cannot match on a port from a" + " different E-Switch"); ++ *act_priv = esw_priv; + return 0; + } + +@@ -2732,30 +2733,12 @@ flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev, + struct rte_flow_error *error) + { + const struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_dev_ctx_shared *sh = priv->sh; +- bool direction_error = false; + + if (!priv->sh->pop_vlan_action) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "pop vlan action is not supported"); +- /* Pop VLAN is not supported in egress except for CX6 FDB mode. */ +- if (attr->transfer) { +- bool fdb_tx = priv->representor_id != UINT16_MAX; +- bool is_cx5 = sh->steering_format_version == +- MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5; +- +- if (fdb_tx && is_cx5) +- direction_error = true; +- } else if (attr->egress) { +- direction_error = true; +- } +- if (direction_error) +- return rte_flow_error_set(error, ENOTSUP, +- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, +- NULL, +- "pop vlan action not supported for egress"); + if (action_flags & MLX5_FLOW_VLAN_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, +@@ -2879,8 +2862,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, { const struct rte_flow_action_of_push_vlan *push_vlan = action->conf; const struct mlx5_priv *priv = dev->data->dev_private; @@ -32183,7 +61920,7 @@ index 3da122cbb9..70031d3dc9 100644 if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) && push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ)) -@@ -2892,22 +2868,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, +@@ -2892,22 +2873,6 @@ flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "wrong action order, port_id should " "be after push VLAN"); @@ -32206,7 +61943,7 @@ index 3da122cbb9..70031d3dc9 100644 if (!attr->transfer && priv->representor) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -@@ -3205,7 +3165,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, +@@ -3205,7 +3170,7 @@ flow_dv_validate_action_set_meta(struct rte_eth_dev *dev, if (reg == REG_NON) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, @@ -32215,7 +61952,7 @@ index 3da122cbb9..70031d3dc9 100644 if (reg != REG_A && reg != REG_B) { struct mlx5_priv *priv = dev->data->dev_private; -@@ -3283,6 +3243,25 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, +@@ -3283,6 +3248,25 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, return 0; } @@ -32241,7 +61978,7 @@ index 3da122cbb9..70031d3dc9 100644 /** * Validate count action. * -@@ -3292,6 +3271,8 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, +@@ -3292,6 +3276,8 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, * Indicator if action is shared. * @param[in] action_flags * Holds the actions detected until now. @@ -32250,7 +61987,7 @@ index 3da122cbb9..70031d3dc9 100644 * @param[out] error * Pointer to error structure. * -@@ -3301,6 +3282,7 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, +@@ -3301,6 +3287,7 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, static int flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared, uint64_t action_flags, @@ -32258,7 +61995,7 @@ index 3da122cbb9..70031d3dc9 100644 struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; -@@ -3312,10 +3294,10 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared, +@@ -3312,10 +3299,10 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "duplicate count actions set"); if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) && @@ -32271,7 +62008,7 @@ index 3da122cbb9..70031d3dc9 100644 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS return 0; #endif -@@ -3740,7 +3722,8 @@ flow_dv_encap_decap_resource_register +@@ -3740,7 +3727,8 @@ flow_dv_encap_decap_resource_register flow_dv_encap_decap_match_cb, flow_dv_encap_decap_remove_cb, flow_dv_encap_decap_clone_cb, @@ -32281,7 +62018,54 @@ index 3da122cbb9..70031d3dc9 100644 if (unlikely(!encaps_decaps)) return -rte_errno; resource->flags = dev_flow->dv.group ? 0 : 1; -@@ -4982,7 +4965,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, +@@ -4265,6 +4253,7 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) + { + struct rte_ether_hdr *eth = NULL; + struct rte_vlan_hdr *vlan = NULL; ++ struct rte_ipv4_hdr *ipv4 = NULL; + struct rte_ipv6_hdr *ipv6 = NULL; + struct rte_udp_hdr *udp = NULL; + char *next_hdr; +@@ -4281,24 +4270,27 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) + next_hdr += sizeof(struct rte_vlan_hdr); + } + +- /* HW calculates IPv4 csum. no need to proceed */ +- if (proto == RTE_ETHER_TYPE_IPV4) +- return 0; +- + /* non IPv4/IPv6 header. not supported */ +- if (proto != RTE_ETHER_TYPE_IPV6) { ++ if (proto != RTE_ETHER_TYPE_IPV4 && proto != RTE_ETHER_TYPE_IPV6) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "Cannot offload non IPv4/IPv6"); + } + +- ipv6 = (struct rte_ipv6_hdr *)next_hdr; +- +- /* ignore non UDP */ +- if (ipv6->proto != IPPROTO_UDP) +- return 0; ++ if (proto == RTE_ETHER_TYPE_IPV4) { ++ ipv4 = (struct rte_ipv4_hdr *)next_hdr; ++ /* ignore non UDP */ ++ if (ipv4->next_proto_id != IPPROTO_UDP) ++ return 0; ++ udp = (struct rte_udp_hdr *)(ipv4 + 1); ++ } else { ++ ipv6 = (struct rte_ipv6_hdr *)next_hdr; ++ /* ignore non UDP */ ++ if (ipv6->proto != IPPROTO_UDP) ++ return 0; ++ udp = (struct rte_udp_hdr *)(ipv6 + 1); ++ } + +- udp = (struct rte_udp_hdr *)(ipv6 + 1); + udp->dgram_cksum = 0; + + return 0; +@@ -4982,7 +4974,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, const struct rte_flow_attr *attributes, bool external, struct rte_flow_error *error) { @@ -32290,7 +62074,7 @@ index 3da122cbb9..70031d3dc9 100644 int ret = 0; struct flow_grp_info grp_info = { .external = !!external, -@@ -5013,6 +4996,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, +@@ -5013,6 +5005,10 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "target group must be other than" " the current flow group"); @@ -32301,7 +62085,7 @@ index 3da122cbb9..70031d3dc9 100644 return 0; } -@@ -5145,7 +5132,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, +@@ -5145,7 +5141,7 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused, * Pointer to error structure. * * @return @@ -32310,7 +62094,7 @@ index 3da122cbb9..70031d3dc9 100644 */ static int mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, -@@ -5230,21 +5217,12 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, +@@ -5230,21 +5226,12 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev, */ struct mlx5_priv *policy_port_priv = mtr_policy->dev->data->dev_private; @@ -32336,16 +62120,206 @@ index 3da122cbb9..70031d3dc9 100644 } if (flow_src_port != policy_port_priv->representor_id) return rte_flow_error_set(error, -@@ -5678,7 +5656,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5560,8 +5547,8 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) + * Pointer to the RSS action in sample action list. + * @param[out] count + * Pointer to the COUNT action in sample action list. +- * @param[out] fdb_mirror_limit +- * Pointer to the FDB mirror limitation flag. ++ * @param[out] fdb_mirror ++ * Pointer to the FDB mirror flag. + * @param[out] error + * Pointer to error structure. + * +@@ -5570,6 +5557,7 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) + */ + static int + flow_dv_validate_action_sample(uint64_t *action_flags, ++ uint64_t *sub_action_flags, + const struct rte_flow_action *action, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, +@@ -5577,14 +5565,15 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + const struct rte_flow_action_rss *rss, + const struct rte_flow_action_rss **sample_rss, + const struct rte_flow_action_count **count, +- int *fdb_mirror_limit, ++ int *fdb_mirror, ++ uint16_t *sample_port_id, + struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *dev_conf = &priv->config; + const struct rte_flow_action_sample *sample = action->conf; ++ const struct rte_flow_action_port_id *port = NULL; + const struct rte_flow_action *act; +- uint64_t sub_action_flags = 0; + uint16_t queue_index = 0xFFFF; + int actions_n = 0; + int ret; +@@ -5630,20 +5619,20 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + ret = mlx5_flow_validate_action_queue(act, +- sub_action_flags, ++ *sub_action_flags, + dev, + attr, error); + if (ret < 0) + return ret; + queue_index = ((const struct rte_flow_action_queue *) + (act->conf))->index; +- sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; ++ *sub_action_flags |= MLX5_FLOW_ACTION_QUEUE; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + *sample_rss = act->conf; + ret = mlx5_flow_validate_action_rss(act, +- sub_action_flags, ++ *sub_action_flags, + dev, attr, + item_flags, + error); +@@ -5659,48 +5648,57 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + "or level in the same flow"); + if (*sample_rss != NULL && (*sample_rss)->queue_num) + queue_index = (*sample_rss)->queue[0]; +- sub_action_flags |= MLX5_FLOW_ACTION_RSS; ++ *sub_action_flags |= MLX5_FLOW_ACTION_RSS; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + ret = flow_dv_validate_action_mark(dev, act, +- sub_action_flags, ++ *sub_action_flags, + attr, error); + if (ret < 0) + return ret; + if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) +- sub_action_flags |= MLX5_FLOW_ACTION_MARK | ++ *sub_action_flags |= MLX5_FLOW_ACTION_MARK | + MLX5_FLOW_ACTION_MARK_EXT; + else +- sub_action_flags |= MLX5_FLOW_ACTION_MARK; ++ *sub_action_flags |= MLX5_FLOW_ACTION_MARK; + ++actions_n; + break; case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_validate_action_count - (dev, false, *action_flags | sub_action_flags, +- (dev, false, *action_flags | sub_action_flags, - error); ++ (dev, false, *action_flags | *sub_action_flags, + attr, error); if (ret < 0) return ret; *count = act->conf; -@@ -5832,7 +5810,8 @@ flow_dv_modify_hdr_resource_register +- sub_action_flags |= MLX5_FLOW_ACTION_COUNT; ++ *sub_action_flags |= MLX5_FLOW_ACTION_COUNT; + *action_flags |= MLX5_FLOW_ACTION_COUNT; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_PORT_ID: + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + ret = flow_dv_validate_action_port_id(dev, +- sub_action_flags, ++ *sub_action_flags, + act, + attr, + error); + if (ret) + return ret; +- sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; ++ if (act->type == RTE_FLOW_ACTION_TYPE_PORT_ID) { ++ port = (const struct rte_flow_action_port_id *) ++ act->conf; ++ *sample_port_id = port->original ? ++ dev->data->port_id : port->id; ++ } else { ++ *sample_port_id = ((const struct rte_flow_action_ethdev *) ++ act->conf)->port_id; ++ } ++ *sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID; + ++actions_n; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + ret = flow_dv_validate_action_raw_encap_decap +- (dev, NULL, act->conf, attr, &sub_action_flags, ++ (dev, NULL, act->conf, attr, sub_action_flags, + &actions_n, action, item_flags, error); + if (ret < 0) + return ret; +@@ -5709,12 +5707,12 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + ret = flow_dv_validate_action_l2_encap(dev, +- sub_action_flags, ++ *sub_action_flags, + act, attr, + error); + if (ret < 0) + return ret; +- sub_action_flags |= MLX5_FLOW_ACTION_ENCAP; ++ *sub_action_flags |= MLX5_FLOW_ACTION_ENCAP; + ++actions_n; + break; + default: +@@ -5726,7 +5724,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + } + } + if (attr->ingress && !attr->transfer) { +- if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE | ++ if (!(*sub_action_flags & (MLX5_FLOW_ACTION_QUEUE | + MLX5_FLOW_ACTION_RSS))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, +@@ -5748,38 +5746,36 @@ flow_dv_validate_action_sample(uint64_t *action_flags, + "E-Switch doesn't support " + "any optional action " + "for sampling"); +- if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE) ++ if (*sub_action_flags & MLX5_FLOW_ACTION_QUEUE) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action QUEUE"); +- if (sub_action_flags & MLX5_FLOW_ACTION_RSS) ++ if (*sub_action_flags & MLX5_FLOW_ACTION_RSS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "unsupported action QUEUE"); +- if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) ++ if (!(*sub_action_flags & MLX5_FLOW_ACTION_PORT_ID)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "E-Switch must has a dest " + "port for mirroring"); +- if (!priv->config.hca_attr.reg_c_preserve && +- priv->representor_id != UINT16_MAX) +- *fdb_mirror_limit = 1; ++ *fdb_mirror = 1; + } + /* Continue validation for Xcap actions.*/ +- if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && ++ if ((*sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) && + (queue_index == 0xFFFF || + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { +- if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == ++ if ((*sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) == + MLX5_FLOW_XCAP_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "encap and decap " + "combination aren't " + "supported"); +- if (!attr->transfer && attr->ingress && (sub_action_flags & ++ if (!attr->transfer && attr->ingress && (*sub_action_flags & + MLX5_FLOW_ACTION_ENCAP)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, +@@ -5832,7 +5828,8 @@ flow_dv_modify_hdr_resource_register flow_dv_modify_match_cb, flow_dv_modify_remove_cb, flow_dv_modify_clone_cb, @@ -32355,7 +62329,93 @@ index 3da122cbb9..70031d3dc9 100644 if (unlikely(!modify_cmds)) return -rte_errno; resource->root = !dev_flow->dv.group; -@@ -6714,6 +6693,12 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev, +@@ -5874,7 +5871,7 @@ flow_dv_counter_get_by_idx(struct rte_eth_dev *dev, + + /* Decrease to original index and clear shared bit. */ + idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); +- MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n); ++ MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < MLX5_COUNTER_POOLS_MAX_NUM); + pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; + MLX5_ASSERT(pool); + if (ppool) +@@ -5950,39 +5947,6 @@ flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id) + return pool; + } + +-/** +- * Resize a counter container. +- * +- * @param[in] dev +- * Pointer to the Ethernet device structure. +- * +- * @return +- * 0 on success, otherwise negative errno value and rte_errno is set. +- */ +-static int +-flow_dv_container_resize(struct rte_eth_dev *dev) +-{ +- struct mlx5_priv *priv = dev->data->dev_private; +- struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; +- void *old_pools = cmng->pools; +- uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE; +- uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; +- void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); +- +- if (!pools) { +- rte_errno = ENOMEM; +- return -ENOMEM; +- } +- if (old_pools) +- memcpy(pools, old_pools, cmng->n * +- sizeof(struct mlx5_flow_counter_pool *)); +- cmng->n = resize; +- cmng->pools = pools; +- if (old_pools) +- mlx5_free(old_pools); +- return 0; +-} +- + /** + * Query a devx flow counter. + * +@@ -6034,8 +5998,6 @@ _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts, + * The devX counter handle. + * @param[in] age + * Whether the pool is for counter that was allocated for aging. +- * @param[in/out] cont_cur +- * Pointer to the container pointer, it will be update in pool resize. + * + * @return + * The pool container pointer on success, NULL otherwise and rte_errno is set. +@@ -6047,9 +6009,14 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; +- bool fallback = priv->sh->cmng.counter_fallback; ++ bool fallback = cmng->counter_fallback; + uint32_t size = sizeof(*pool); + ++ if (cmng->n_valid == MLX5_COUNTER_POOLS_MAX_NUM) { ++ DRV_LOG(ERR, "All counter is in used, try again later."); ++ rte_errno = EAGAIN; ++ return NULL; ++ } + size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE; + size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE); + pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); +@@ -6068,11 +6035,6 @@ flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs, + pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; + rte_spinlock_lock(&cmng->pool_update_sl); + pool->index = cmng->n_valid; +- if (pool->index == cmng->n && flow_dv_container_resize(dev)) { +- mlx5_free(pool); +- rte_spinlock_unlock(&cmng->pool_update_sl); +- return NULL; +- } + cmng->pools[pool->index] = pool; + cmng->n_valid++; + if (unlikely(fallback)) { +@@ -6714,6 +6676,12 @@ flow_dv_validate_item_integrity(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ITEM, integrity_item, "unsupported integrity filter"); @@ -32368,7 +62428,7 @@ index 3da122cbb9..70031d3dc9 100644 if (spec->level > 1) { if (pattern_flags & MLX5_FLOW_ITEM_INNER_INTEGRITY) return rte_flow_error_set -@@ -6844,7 +6829,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -6844,14 +6812,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, bool external, int hairpin, struct rte_flow_error *error) { int ret; @@ -32377,19 +62437,51 @@ index 3da122cbb9..70031d3dc9 100644 uint64_t item_flags = 0; uint64_t last_item = 0; uint8_t next_protocol = 0xff; -@@ -6911,7 +6896,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + uint16_t ether_type = 0; + int actions_n = 0; + uint8_t item_ipv6_proto = 0; +- int fdb_mirror_limit = 0; ++ int fdb_mirror = 0; + int modify_after_mirror = 0; + const struct rte_flow_item *geneve_item = NULL; + const struct rte_flow_item *gre_item = NULL; +@@ -6911,7 +6879,19 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item *integrity_items[2] = {NULL, NULL}; const struct rte_flow_item *port_id_item = NULL; bool def_policy = false; + bool shared_count = false; uint16_t udp_dport = 0; -+ uint32_t tag_id = 0; ++ uint32_t tag_id = 0, tag_bitmap = 0; + const struct rte_flow_action_age *non_shared_age = NULL; + const struct rte_flow_action_count *count = NULL; ++ const struct rte_flow_action_port_id *port = NULL; ++ const struct mlx5_rte_flow_item_tag *mlx5_tag; ++ struct mlx5_priv *act_priv = NULL; ++ int aso_after_sample = 0; ++ struct mlx5_priv *port_priv = NULL; ++ uint64_t sub_action_flags = 0; ++ uint16_t sample_port_id = 0; ++ uint16_t port_id = 0; if (items == NULL) return -1; -@@ -7209,8 +7198,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -6953,7 +6933,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + break; + case RTE_FLOW_ITEM_TYPE_PORT_ID: + ret = flow_dv_validate_item_port_id +- (dev, items, attr, item_flags, error); ++ (dev, items, attr, item_flags, &act_priv, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_PORT_ID; +@@ -7203,14 +7183,23 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + last_item = MLX5_FLOW_LAYER_ICMP6; + break; + case RTE_FLOW_ITEM_TYPE_TAG: +- ret = flow_dv_validate_item_tag(dev, items, ++ ret = flow_dv_validate_item_tag(dev, items, &tag_bitmap, + attr, error); + if (ret < 0) return ret; last_item = MLX5_FLOW_ITEM_TAG; break; @@ -32398,10 +62490,17 @@ index 3da122cbb9..70031d3dc9 100644 + last_item = MLX5_FLOW_ITEM_TX_QUEUE; + break; + case MLX5_RTE_FLOW_ITEM_TYPE_TAG: ++ mlx5_tag = (const struct mlx5_rte_flow_item_tag *)items->spec; ++ if (tag_bitmap & (1 << mlx5_tag->id)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, ++ items->spec, ++ "Duplicated tag index"); ++ tag_bitmap |= 1 << mlx5_tag->id; break; case RTE_FLOW_ITEM_TYPE_GTP: ret = flow_dv_validate_item_gtp(dev, items, item_flags, -@@ -7281,7 +7272,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7281,7 +7270,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { int type = actions->type; @@ -32409,7 +62508,22 @@ index 3da122cbb9..70031d3dc9 100644 if (!mlx5_flow_os_action_supported(type)) return rte_flow_error_set(error, ENOTSUP, -@@ -7380,6 +7370,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7310,6 +7298,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + error); + if (ret) + return ret; ++ if (type == RTE_FLOW_ACTION_TYPE_PORT_ID) { ++ port = (const struct rte_flow_action_port_id *) ++ actions->conf; ++ port_id = port->original ? dev->data->port_id : port->id; ++ } else { ++ port_id = ((const struct rte_flow_action_ethdev *) ++ actions->conf)->port_id; ++ } + action_flags |= MLX5_FLOW_ACTION_PORT_ID; + ++actions_n; + break; +@@ -7380,11 +7376,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ++actions_n; if (action_flags & MLX5_FLOW_ACTION_SAMPLE) modify_after_mirror = 1; @@ -32418,7 +62532,13 @@ index 3da122cbb9..70031d3dc9 100644 action_flags |= MLX5_FLOW_ACTION_SET_TAG; rw_act_num += MLX5_ACT_NUM_SET_TAG; break; -@@ -7438,9 +7430,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + case RTE_FLOW_ACTION_TYPE_DROP: +- ret = mlx5_flow_validate_action_drop(action_flags, ++ ret = mlx5_flow_validate_action_drop(dev, is_root, + attr, error); + if (ret < 0) + return ret; +@@ -7438,9 +7436,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_validate_action_count(dev, shared_count, action_flags, @@ -32430,7 +62550,26 @@ index 3da122cbb9..70031d3dc9 100644 action_flags |= MLX5_FLOW_ACTION_COUNT; ++actions_n; break; -@@ -7746,6 +7739,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7657,12 +7656,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + error); + if (ret) + return ret; +- if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && +- fdb_mirror_limit) +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, +- NULL, +- "sample and jump action combination is not supported"); + ++actions_n; + action_flags |= MLX5_FLOW_ACTION_JUMP; + break; +@@ -7742,10 +7735,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "duplicate age actions set"); ++ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) ++ aso_after_sample = 1; + action_flags |= MLX5_FLOW_ACTION_AGE; ++actions_n; break; case RTE_FLOW_ACTION_TYPE_AGE: @@ -32438,7 +62577,7 @@ index 3da122cbb9..70031d3dc9 100644 ret = flow_dv_validate_action_age(action_flags, actions, dev, error); -@@ -7753,15 +7747,15 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7753,21 +7749,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; /* * Validate the regular AGE action (using counter) @@ -32457,7 +62596,27 @@ index 3da122cbb9..70031d3dc9 100644 if (sample_count) return rte_flow_error_set (error, EINVAL, -@@ -7814,6 +7808,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "old age action and count must be in the same sub flow"); ++ } else { ++ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) ++ aso_after_sample = 1; + } + action_flags |= MLX5_FLOW_ACTION_AGE; + ++actions_n; +@@ -7806,14 +7805,21 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + break; + case RTE_FLOW_ACTION_TYPE_SAMPLE: + ret = flow_dv_validate_action_sample(&action_flags, ++ &sub_action_flags, + actions, dev, + attr, item_flags, + rss, &sample_rss, + &sample_count, +- &fdb_mirror_limit, ++ &fdb_mirror, ++ &sample_port_id, error); if (ret < 0) return ret; @@ -32469,7 +62628,16 @@ index 3da122cbb9..70031d3dc9 100644 action_flags |= MLX5_FLOW_ACTION_SAMPLE; ++actions_n; break; -@@ -7858,7 +7857,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7839,6 +7845,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + error); + if (ret < 0) + return ret; ++ if (action_flags & MLX5_FLOW_ACTION_SAMPLE) ++ aso_after_sample = 1; + action_flags |= MLX5_FLOW_ACTION_CT; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET: +@@ -7858,7 +7866,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * - Explicit decap action is prohibited by the tunnel offload API. * - Drop action in tunnel steer rule is prohibited by the API. * - Application cannot use MARK action because it's value can mask @@ -32478,7 +62646,29 @@ index 3da122cbb9..70031d3dc9 100644 * - JUMP in tunnel match rule has no support in current PMD * implementation. * - TAG & META are reserved for future uses. -@@ -7970,6 +7969,28 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7904,18 +7912,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + /* + * Validate the drop action mutual exclusion with other actions. + * Drop action is mutually-exclusive with any other action, except for +- * Count action. ++ * Count/Sample/Age actions. + * Drop action compatibility with tunnel offload was already validated. + */ + if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH | + MLX5_FLOW_ACTION_TUNNEL_MATCH)); + else if ((action_flags & MLX5_FLOW_ACTION_DROP) && +- (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) ++ (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_DROP_INCLUSIVE_ACTIONS))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Drop action is mutually-exclusive " + "with any other action, except for " +- "Count action"); ++ "Count/Sample/Age action"); + /* Eswitch has few restrictions on using items and actions */ + if (attr->transfer) { + if (!mlx5_flow_ext_mreg_supported(dev) && +@@ -7970,6 +7978,28 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap and decap " "combination aren't supported"); @@ -32488,7 +62678,7 @@ index 3da122cbb9..70031d3dc9 100644 + bool direction_error = false; + + if (attr->transfer) { -+ bool fdb_tx = priv->representor_id != UINT16_MAX; ++ bool fdb_tx = flow_source_vport_representor(priv, act_priv); + bool is_cx5 = sh->steering_format_version == + MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5; + @@ -32507,7 +62697,7 @@ index 3da122cbb9..70031d3dc9 100644 if (!attr->transfer && attr->ingress) { if (action_flags & MLX5_FLOW_ACTION_ENCAP) return rte_flow_error_set -@@ -7977,12 +7998,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7977,12 +8007,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap is not supported" " for ingress traffic"); @@ -32520,7 +62710,35 @@ index 3da122cbb9..70031d3dc9 100644 else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) == MLX5_FLOW_VLAN_ACTIONS) return rte_flow_error_set -@@ -8022,6 +8037,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7992,6 +8016,27 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + "multiple VLAN actions"); + } + } ++ /* Pop VLAN is not supported in egress except for NICs newer than CX5. */ ++ if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN) { ++ struct mlx5_dev_ctx_shared *sh = priv->sh; ++ bool direction_error = false; ++ ++ if (attr->transfer) { ++ bool fdb_tx = flow_source_vport_representor(priv, act_priv); ++ bool is_cx5 = sh->steering_format_version == ++ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5; ++ ++ if (fdb_tx && is_cx5) ++ direction_error = true; ++ } else if (attr->egress) { ++ direction_error = true; ++ } ++ if (direction_error) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, ++ NULL, ++ "pop vlan action not supported for egress"); ++ } + if (action_flags & MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY) { + if ((action_flags & (MLX5_FLOW_FATE_ACTIONS & + ~MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) && +@@ -8022,6 +8067,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "cannot be done before meter action"); } } @@ -32541,10 +62759,53 @@ index 3da122cbb9..70031d3dc9 100644 /* * Hairpin flow will add one more TAG action in TX implicit mode. * In TX explicit mode, there will be no hairpin flow ID. -@@ -8045,6 +8074,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "sample before modify action is not supported"); +@@ -8040,11 +8099,59 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + NULL, "too many header modify" + " actions to support"); + } +- /* Eswitch egress mirror and modify flow has limitation on CX5 */ +- if (fdb_mirror_limit && modify_after_mirror) ++ if (fdb_mirror) { ++ if (!priv->sh->cdev->config.hca_attr.reg_c_preserve && ++ flow_source_vport_representor(priv, act_priv)) { ++ /* Eswitch egress mirror and modify flow has limitation on CX5 */ ++ if (modify_after_mirror) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "sample before modify action is not supported"); ++ if (action_flags & MLX5_FLOW_ACTION_JUMP) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "sample and jump action combination is not supported"); ++ } ++ if (aso_mask > 0 && aso_after_sample && fdb_mirror) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "sample before ASO action is not supported"); ++ if (sub_action_flags & MLX5_FLOW_ACTION_PORT_ID) { ++ port_priv = mlx5_port_to_eswitch_info(sample_port_id, false); ++ if (flow_source_vport_representor(priv, port_priv)) { ++ if (sub_action_flags & MLX5_FLOW_ACTION_ENCAP) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "mirror to rep port with encap is not supported"); ++ } else { ++ if (!(sub_action_flags & MLX5_FLOW_ACTION_ENCAP) && ++ (action_flags & MLX5_FLOW_ACTION_JUMP)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "mirror to wire port without encap is not supported"); ++ } ++ } ++ if ((action_flags & MLX5_FLOW_ACTION_PORT_ID) && ++ (action_flags & MLX5_FLOW_ACTION_ENCAP)) { ++ port_priv = mlx5_port_to_eswitch_info(port_id, false); ++ if (flow_source_vport_representor(priv, port_priv)) ++ return rte_flow_error_set(error, ENOTSUP, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "mirror to rep port with encap is not supported"); ++ } ++ } + /* + * Validation the NIC Egress flow on representor, except implicit + * hairpin default egress flow with TX_QUEUE item, other flows not @@ -32552,7 +62813,9 @@ index 3da122cbb9..70031d3dc9 100644 + */ + if ((!attr->transfer && attr->egress) && priv->representor && + !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE)) -+ return rte_flow_error_set(error, EINVAL, + return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, NULL, +- "sample before modify action is not supported"); + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "NIC egress rules on representors" @@ -32560,7 +62823,15 @@ index 3da122cbb9..70031d3dc9 100644 return 0; } -@@ -9184,7 +9225,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, +@@ -8084,6 +8191,7 @@ flow_dv_prepare(struct rte_eth_dev *dev, + wks->skip_matcher_reg = 0; + wks->policy = NULL; + wks->final_policy = NULL; ++ wks->vport_meta_tag = 0; + /* In case of corrupting the memory. */ + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + rte_flow_error_set(error, ENOSPC, +@@ -9184,7 +9292,7 @@ flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev, geneve_opt_v->option_type && geneve_opt_resource->length == geneve_opt_v->option_len) { @@ -32569,8 +62840,15 @@ index 3da122cbb9..70031d3dc9 100644 __atomic_fetch_add(&geneve_opt_resource->refcnt, 1, __ATOMIC_RELAXED); } else { -@@ -9713,7 +9754,7 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, +@@ -9708,12 +9816,14 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, + { + const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL; + const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL; ++ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_priv *priv; + uint16_t mask, id; ++ MLX5_ASSERT(wks); if (pid_v && pid_v->id == MLX5_PORT_ESW_MGR) { flow_dv_translate_item_source_vport(matcher, key, - flow_dv_get_esw_manager_vport_id(dev), 0xffff); @@ -32578,7 +62856,15 @@ index 3da122cbb9..70031d3dc9 100644 return 0; } mask = pid_m ? pid_m->id : 0xffff; -@@ -10170,7 +10211,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key, +@@ -9727,6 +9837,7 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher, + * register. + */ + if (priv->vport_meta_mask) { ++ wks->vport_meta_tag = priv->vport_meta_tag; + /* + * Provide the hint for SW steering library + * to insert the flow into ingress domain and +@@ -10170,7 +10281,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key, /* Don't count both inner and outer flex items in one rule. */ if (mlx5_flex_acquire_index(dev, spec->handle, true) != index) MLX5_ASSERT(false); @@ -32587,7 +62873,7 @@ index 3da122cbb9..70031d3dc9 100644 } mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner); } -@@ -10226,7 +10267,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria) +@@ -10226,7 +10337,7 @@ __flow_dv_adjust_buf_size(size_t *size, uint8_t match_criteria) * Check flow matching criteria first, subtract misc5/4 length if flow * doesn't own misc5/4 parameters. In some old rdma-core releases, * misc5/4 are not supported, and matcher creation failure is expected @@ -32596,7 +62882,7 @@ index 3da122cbb9..70031d3dc9 100644 * misc5 is right after misc4. */ if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) { -@@ -10514,7 +10555,8 @@ flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) +@@ -10514,7 +10625,8 @@ flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) tbl_data->tunnel->tunnel_id : 0, tbl_data->group_id); } @@ -32606,7 +62892,7 @@ index 3da122cbb9..70031d3dc9 100644 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx); } -@@ -10769,7 +10811,8 @@ flow_dv_tag_resource_register +@@ -10769,7 +10881,8 @@ flow_dv_tag_resource_register flow_dv_tag_match_cb, flow_dv_tag_remove_cb, flow_dv_tag_clone_cb, @@ -32616,7 +62902,7 @@ index 3da122cbb9..70031d3dc9 100644 if (unlikely(!tag_table)) return -rte_errno; entry = mlx5_hlist_register(tag_table, tag_be24, &ctx); -@@ -11074,6 +11117,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, +@@ -11074,6 +11187,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *dh = dev_flow->handle; @@ -32624,7 +62910,7 @@ index 3da122cbb9..70031d3dc9 100644 struct mlx5_hrxq *hrxq; MLX5_ASSERT(rss_desc->queue_num); -@@ -11088,6 +11132,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, +@@ -11088,6 +11202,7 @@ flow_dv_hrxq_prepare(struct rte_eth_dev *dev, return NULL; hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], *hrxq_idx); @@ -32632,7 +62918,7 @@ index 3da122cbb9..70031d3dc9 100644 return hrxq; } -@@ -11425,7 +11470,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) +@@ -11425,7 +11540,7 @@ flow_dv_dest_array_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) goto error; } } @@ -32641,7 +62927,7 @@ index 3da122cbb9..70031d3dc9 100644 ret = mlx5_os_flow_dr_create_flow_action_dest_array (domain, resource->num_of_dest, -@@ -11660,7 +11705,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, +@@ -11660,7 +11775,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, (((const struct rte_flow_action_mark *) (sub_actions->conf))->id); @@ -32650,7 +62936,44 @@ index 3da122cbb9..70031d3dc9 100644 pre_rix = dev_flow->handle->dvh.rix_tag; /* Save the mark resource before sample */ pre_r = dev_flow->dv.tag_resource; -@@ -12820,7 +12865,7 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -11770,6 +11885,9 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, + uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)]; + uint64_t set_action; + } action_ctx = { .set_action = 0 }; ++ uint32_t vport_meta_tag = wks->vport_meta_tag ? ++ wks->vport_meta_tag : ++ priv->vport_meta_tag; + + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB; + MLX5_SET(set_action_in, action_ctx.action_in, action_type, +@@ -11777,7 +11895,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev, + MLX5_SET(set_action_in, action_ctx.action_in, field, + MLX5_MODI_META_REG_C_0); + MLX5_SET(set_action_in, action_ctx.action_in, data, +- priv->vport_meta_tag); ++ vport_meta_tag); + res->set_action = action_ctx.set_action; + } else if (attr->ingress) { + res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX; +@@ -11963,7 +12081,7 @@ flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx) + } + + /** +- * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools. ++ * Resize the ASO age pools array by MLX5_ASO_AGE_CONTAINER_RESIZE pools. + * + * @param[in] dev + * Pointer to the Ethernet device structure. +@@ -11977,7 +12095,7 @@ flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; + void *old_pools = mng->pools; +- uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE; ++ uint32_t resize = mng->n + MLX5_ASO_AGE_CONTAINER_RESIZE; + uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); + +@@ -12820,7 +12938,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_FLAG: action_flags |= MLX5_FLOW_ACTION_FLAG; @@ -32659,7 +62982,7 @@ index 3da122cbb9..70031d3dc9 100644 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { struct rte_flow_action_mark mark = { .id = MLX5_FLOW_MARK_DEFAULT, -@@ -12849,7 +12894,7 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -12849,7 +12967,7 @@ flow_dv_translate(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_MARK: action_flags |= MLX5_FLOW_ACTION_MARK; @@ -32668,7 +62991,7 @@ index 3da122cbb9..70031d3dc9 100644 if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { const struct rte_flow_action_mark *mark = (const struct rte_flow_action_mark *) -@@ -13306,8 +13351,7 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -13306,8 +13424,7 @@ flow_dv_translate(struct rte_eth_dev *dev, */ if (action_flags & MLX5_FLOW_ACTION_AGE) { if ((non_shared_age && count) || @@ -32678,7 +63001,7 @@ index 3da122cbb9..70031d3dc9 100644 /* Creates age by counters. */ cnt_act = flow_dv_prepare_counter (dev, dev_flow, -@@ -13538,11 +13582,13 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -13538,11 +13655,13 @@ flow_dv_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_ICMP: flow_dv_translate_item_icmp(match_mask, match_value, items, tunnel); @@ -32692,7 +63015,7 @@ index 3da122cbb9..70031d3dc9 100644 last_item = MLX5_FLOW_LAYER_ICMP6; break; case RTE_FLOW_ITEM_TYPE_TAG: -@@ -13617,12 +13663,14 @@ flow_dv_translate(struct rte_eth_dev *dev, +@@ -13617,12 +13736,14 @@ flow_dv_translate(struct rte_eth_dev *dev, /* * When E-Switch mode is enabled, we have two cases where we need to * set the source port manually. @@ -32711,7 +63034,16 @@ index 3da122cbb9..70031d3dc9 100644 if (flow_dv_translate_item_port_id(dev, match_mask, match_value, NULL, attr)) return -rte_errno; -@@ -14508,7 +14556,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +@@ -13999,7 +14120,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + } + dv->actions[n++] = priv->sh->default_miss_action; + } +- misc_mask = flow_dv_matcher_enable(dv->value.buf); ++ misc_mask = flow_dv_matcher_enable(dv_h->matcher->mask.buf); + __flow_dv_adjust_buf_size(&dv->value.size, misc_mask); + err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, + (void *)&dv->value, n, +@@ -14508,7 +14629,7 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) int index = rte_bsf32(dev_handle->flex_item); mlx5_flex_release_index(dev, index); @@ -32720,7 +63052,7 @@ index 3da122cbb9..70031d3dc9 100644 } if (dev_handle->dvh.matcher) flow_dv_matcher_release(dev, dev_handle); -@@ -14607,8 +14655,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, +@@ -14607,8 +14728,8 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, * MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share * same slot in mlx5_rss_hash_fields. * @@ -32731,7 +63063,7 @@ index 3da122cbb9..70031d3dc9 100644 * @param[in, out] hash_field * hash_field variable needed to be adjusted. * -@@ -14616,10 +14664,10 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, +@@ -14616,10 +14737,10 @@ __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev, * void */ static void @@ -32744,7 +63076,7 @@ index 3da122cbb9..70031d3dc9 100644 switch (*hash_field & ~IBV_RX_HASH_INNER) { case MLX5_RSS_HASH_IPV4: -@@ -14721,7 +14769,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, +@@ -14721,7 +14842,8 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev, uint64_t hash_fields = mlx5_rss_hash_fields[i]; int tunnel = 0; @@ -32754,7 +63086,7 @@ index 3da122cbb9..70031d3dc9 100644 if (shared_rss->origin.level > 1) { hash_fields |= IBV_RX_HASH_INNER; tunnel = 1; -@@ -15455,7 +15504,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, +@@ -15455,7 +15577,6 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, NULL, "cannot create policy " "mark action for this color"); @@ -32762,7 +63094,7 @@ index 3da122cbb9..70031d3dc9 100644 if (flow_dv_tag_resource_register(dev, tag_be, &dev_flow, &flow_err)) return -rte_mtr_error_set(error, -@@ -15467,6 +15515,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, +@@ -15467,6 +15588,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, act_cnt->rix_mark = dev_flow.handle->dvh.rix_tag; action_flags |= MLX5_FLOW_ACTION_MARK; @@ -32770,7 +63102,7 @@ index 3da122cbb9..70031d3dc9 100644 break; } case RTE_FLOW_ACTION_TYPE_SET_TAG: -@@ -15750,6 +15799,8 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, +@@ -15750,6 +15872,8 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev, act_cnt->next_sub_policy = NULL; mtr_policy->is_hierarchy = 1; mtr_policy->dev = next_policy->dev; @@ -32779,7 +63111,83 @@ index 3da122cbb9..70031d3dc9 100644 action_flags |= MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY; break; -@@ -16880,7 +16931,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, +@@ -16165,7 +16289,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) + static int + __flow_dv_create_policy_flow(struct rte_eth_dev *dev, + uint32_t color_reg_c_idx, +- enum rte_color color, void *matcher_object, ++ enum rte_color color, struct mlx5_flow_dv_matcher *dv_matcher, + int actions_n, void *actions, + bool match_src_port, const struct rte_flow_item *item, + void **rule, const struct rte_flow_attr *attr) +@@ -16191,9 +16315,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, + flow_dv_match_meta_reg(matcher.buf, value.buf, + (enum modify_reg)color_reg_c_idx, + rte_col_2_mlx5_col(color), UINT32_MAX); +- misc_mask = flow_dv_matcher_enable(value.buf); ++ misc_mask = flow_dv_matcher_enable(dv_matcher->mask.buf); + __flow_dv_adjust_buf_size(&value.size, misc_mask); +- ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value, ++ ret = mlx5_flow_os_create_flow(dv_matcher->matcher_object, (void *)&value, + actions_n, actions, rule); + if (ret) { + DRV_LOG(ERR, "Failed to create meter policy%d flow.", color); +@@ -16341,7 +16465,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + /* Create flow, matching color. */ + if (__flow_dv_create_policy_flow(dev, + color_reg_c_idx, (enum rte_color)i, +- color_rule->matcher->matcher_object, ++ color_rule->matcher, + acts[i].actions_n, acts[i].dv_actions, + svport_match, NULL, &color_rule->rule, + &attr)) { +@@ -16549,8 +16673,11 @@ flow_dv_create_policy_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter_policy *mtr_policy) + { + int i; ++ int ret = 0; + uint16_t sub_policy_num; ++ struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace(); + ++ RTE_SET_USED(wks); + for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) { + sub_policy_num = (mtr_policy->sub_policy_num >> + (MLX5_MTR_SUB_POLICY_NUM_SHIFT * i)) & +@@ -16562,10 +16689,13 @@ flow_dv_create_policy_rules(struct rte_eth_dev *dev, + mtr_policy->sub_policys[i][0], i)) { + DRV_LOG(ERR, "Failed to create policy action " + "list per domain."); +- return -1; ++ ret = -1; ++ goto exit; + } + } +- return 0; ++exit: ++ mlx5_flow_pop_thread_workspace(); ++ return ret; + } + + static int +@@ -16795,7 +16925,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, + actions[i++] = priv->sh->dr_drop_action; + flow_dv_match_meta_reg(matcher_para.buf, value.buf, + (enum modify_reg)mtr_id_reg_c, 0, 0); +- misc_mask = flow_dv_matcher_enable(value.buf); ++ misc_mask = flow_dv_matcher_enable(mtrmng->def_matcher[domain]->mask.buf); + __flow_dv_adjust_buf_size(&value.size, misc_mask); + ret = mlx5_flow_os_create_flow + (mtrmng->def_matcher[domain]->matcher_object, +@@ -16840,7 +16970,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, + fm->drop_cnt, NULL); + actions[i++] = cnt->action; + actions[i++] = priv->sh->dr_drop_action; +- misc_mask = flow_dv_matcher_enable(value.buf); ++ misc_mask = flow_dv_matcher_enable(drop_matcher->mask.buf); + __flow_dv_adjust_buf_size(&value.size, misc_mask); + ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object, + (void *)&value, i, actions, +@@ -16880,7 +17010,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, struct mlx5_meter_policy_action_container *act_cnt; uint32_t domain = MLX5_MTR_DOMAIN_INGRESS; uint16_t sub_policy_num; @@ -32789,7 +63197,7 @@ index 3da122cbb9..70031d3dc9 100644 rte_spinlock_lock(&mtr_policy->sl); for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) { if (!rss_desc[i]) -@@ -16914,7 +16967,8 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, +@@ -16914,7 +17046,8 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, } } /* Create sub policy. */ @@ -32799,7 +63207,7 @@ index 3da122cbb9..70031d3dc9 100644 /* Reuse the first pre-allocated sub_policy. */ sub_policy = mtr_policy->sub_policys[domain][0]; sub_policy_idx = sub_policy->idx; -@@ -16954,7 +17008,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, +@@ -16954,7 +17087,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev, if (act_cnt->rix_mark || act_cnt->modify_hdr) { memset(&dh, 0, sizeof(struct mlx5_flow_handle)); if (act_cnt->rix_mark) @@ -32808,7 +63216,16 @@ index 3da122cbb9..70031d3dc9 100644 dh.fate_action = MLX5_FLOW_FATE_QUEUE; dh.rix_hrxq = hrxq_idx[i]; flow_drv_rxq_flags_set(dev, &dh); -@@ -17635,7 +17689,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev, +@@ -17212,7 +17345,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + } + if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, + (enum rte_color)i, +- color_rule->matcher->matcher_object, ++ color_rule->matcher, + acts.actions_n, acts.dv_actions, + true, item, + &color_rule->rule, &attr)) { +@@ -17635,7 +17768,7 @@ flow_dv_action_validate(struct rte_eth_dev *dev, "Indirect age action not supported"); return flow_dv_validate_action_age(0, action, dev, err); case RTE_FLOW_ACTION_TYPE_COUNT: @@ -32817,7 +63234,25 @@ index 3da122cbb9..70031d3dc9 100644 case RTE_FLOW_ACTION_TYPE_CONNTRACK: if (!priv->sh->ct_aso_en) return rte_flow_error_set(err, ENOTSUP, -@@ -18291,4 +18345,3 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { +@@ -17907,7 +18040,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, + break; + case RTE_FLOW_ACTION_TYPE_DROP: + ret = mlx5_flow_validate_action_drop +- (action_flags[i], attr, &flow_err); ++ (dev, false, attr, &flow_err); + if (ret < 0) + return -rte_mtr_error_set(error, + ENOTSUP, +@@ -18229,7 +18362,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, + break; + } + /* Try to apply the flow to HW. */ +- misc_mask = flow_dv_matcher_enable(flow.dv.value.buf); ++ misc_mask = flow_dv_matcher_enable(flow.handle->dvh.matcher->mask.buf); + __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask); + err = mlx5_flow_os_create_flow + (flow.handle->dvh.matcher->matcher_object, +@@ -18291,4 +18424,3 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ @@ -32864,7 +63299,7 @@ index 64867dc9e2..3ef46db1f6 100644 node->header_length_field_offset = field->offset_base; } diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c -index f4a7b697e6..a58e30dc83 100644 +index f4a7b697e6..fd3f993892 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c @@ -15,6 +15,9 @@ @@ -32990,20 +63425,64 @@ index f4a7b697e6..a58e30dc83 100644 if (mtr_policy) __atomic_add_fetch(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED); return 0; -@@ -1615,7 +1632,7 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev, +@@ -1607,7 +1624,7 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev, + fm->profile = fmp; + /* Update meter params in HW (if not disabled). */ + if (fm->active_state == MLX5_FLOW_METER_DISABLE) +- return 0; ++ goto dec_ref_cnt; + ret = mlx5_flow_meter_action_modify(priv, fm, &fm->profile->srtcm_prm, + modify_bits, fm->active_state, 1); + if (ret) { +@@ -1615,8 +1632,9 @@ mlx5_flow_meter_profile_update(struct rte_eth_dev *dev, return -rte_mtr_error_set(error, -ret, RTE_MTR_ERROR_TYPE_MTR_PARAMS, NULL, "Failed to update meter" - " parmeters in hardware."); + " parameters in hardware."); } ++dec_ref_cnt: old_fmp->ref_cnt--; fmp->ref_cnt++; + return 0; diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c -index 29cd694752..165786f864 100644 +index 29cd694752..0fe2c9af5a 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c -@@ -882,13 +882,48 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, +@@ -232,27 +232,14 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t id __rte_unused) + break; + } + if (!cnt) { +- struct mlx5_flow_counter_pool **pools; + uint32_t size; + +- if (n_valid == cmng->n) { +- /* Resize the container pool array. */ +- size = sizeof(struct mlx5_flow_counter_pool *) * +- (n_valid + MLX5_CNT_CONTAINER_RESIZE); +- pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0, +- SOCKET_ID_ANY); +- if (!pools) +- return 0; +- if (n_valid) { +- memcpy(pools, cmng->pools, +- sizeof(struct mlx5_flow_counter_pool *) * +- n_valid); +- mlx5_free(cmng->pools); +- } +- cmng->pools = pools; +- cmng->n += MLX5_CNT_CONTAINER_RESIZE; ++ if (n_valid == MLX5_COUNTER_POOLS_MAX_NUM) { ++ DRV_LOG(ERR, "All counter is in used, try again later."); ++ rte_errno = EAGAIN; ++ return 0; + } +- /* Allocate memory for new pool*/ ++ /* Allocate memory for new pool */ + size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL; + pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); + if (!pool) +@@ -882,13 +869,48 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, } } @@ -33054,7 +63533,7 @@ index 29cd694752..165786f864 100644 * @param[in] item * Item specification. * @param[in] item_flags -@@ -896,6 +931,7 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, +@@ -896,6 +918,7 @@ flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr, */ static void flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, @@ -33062,7 +63541,7 @@ index 29cd694752..165786f864 100644 const struct rte_flow_item *item __rte_unused, uint64_t item_flags) { -@@ -907,6 +943,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, +@@ -907,6 +930,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, .size = size, }; #else @@ -33070,7 +63549,7 @@ index 29cd694752..165786f864 100644 const struct rte_flow_item_gre *spec = item->spec; const struct rte_flow_item_gre *mask = item->mask; unsigned int size = sizeof(struct ibv_flow_spec_gre); -@@ -915,17 +952,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, +@@ -915,17 +939,29 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, .size = size, }; @@ -33110,7 +63589,7 @@ index 29cd694752..165786f864 100644 } #endif if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) -@@ -936,7 +985,8 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, +@@ -936,7 +972,8 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, flow_verbs_item_gre_ip_protocol_update(&verbs->attr, IBV_FLOW_SPEC_IPV6, IPPROTO_GRE); @@ -33120,7 +63599,26 @@ index 29cd694752..165786f864 100644 } /** -@@ -1666,6 +1716,8 @@ flow_verbs_translate(struct rte_eth_dev *dev, +@@ -1195,6 +1232,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, + uint16_t ether_type = 0; + bool is_empty_vlan = false; + uint16_t udp_dport = 0; ++ /* Verbs interface does not support groups higher than 0. */ ++ bool is_root = true; + + if (items == NULL) + return -1; +@@ -1397,7 +1436,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, + action_flags |= MLX5_FLOW_ACTION_MARK; + break; + case RTE_FLOW_ACTION_TYPE_DROP: +- ret = mlx5_flow_validate_action_drop(action_flags, ++ ret = mlx5_flow_validate_action_drop(dev, ++ is_root, + attr, + error); + if (ret < 0) +@@ -1666,6 +1706,8 @@ flow_verbs_translate(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); struct mlx5_flow_rss_desc *rss_desc; @@ -33129,7 +63627,7 @@ index 29cd694752..165786f864 100644 MLX5_ASSERT(wks); rss_desc = &wks->rss_desc; -@@ -1680,12 +1732,12 @@ flow_verbs_translate(struct rte_eth_dev *dev, +@@ -1680,12 +1722,12 @@ flow_verbs_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_FLAG: flow_verbs_translate_action_flag(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_FLAG; @@ -33144,7 +63642,7 @@ index 29cd694752..165786f864 100644 break; case RTE_FLOW_ACTION_TYPE_DROP: flow_verbs_translate_action_drop(dev_flow, actions); -@@ -1803,10 +1855,10 @@ flow_verbs_translate(struct rte_eth_dev *dev, +@@ -1803,10 +1845,10 @@ flow_verbs_translate(struct rte_eth_dev *dev, item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; break; case RTE_FLOW_ITEM_TYPE_GRE: @@ -33157,7 +63655,7 @@ index 29cd694752..165786f864 100644 break; case RTE_FLOW_ITEM_TYPE_MPLS: flow_verbs_translate_item_mpls(dev_flow, items, -@@ -1820,6 +1872,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, +@@ -1820,6 +1862,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, NULL, "item not supported"); } } @@ -33168,10 +63666,20 @@ index 29cd694752..165786f864 100644 /* Other members of attr will be ignored. */ dev_flow->verbs.attr.priority = diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c -index e8215f7381..9fcd039c22 100644 +index e8215f7381..ab88f2526f 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rx.c +++ b/dpdk/drivers/net/mlx5/mlx5_rx.c -@@ -73,7 +73,7 @@ rx_queue_count(struct mlx5_rxq_data *rxq) +@@ -34,7 +34,8 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + + static __rte_always_inline int + mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +- uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe); ++ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe, ++ uint16_t *skip_cnt, bool mprq); + + static __rte_always_inline uint32_t + rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); +@@ -73,7 +74,7 @@ rx_queue_count(struct mlx5_rxq_data *rxq) const unsigned int cqe_n = (1 << rxq->cqe_n); const unsigned int sges_n = (1 << rxq->sges_n); const unsigned int elts_n = (1 << rxq->elts_n); @@ -33180,7 +63688,7 @@ index e8215f7381..9fcd039c22 100644 const unsigned int cqe_cnt = cqe_n - 1; unsigned int cq_ci, used; -@@ -167,8 +167,8 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, +@@ -167,8 +168,8 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ? @@ -33191,7 +63699,7 @@ index e8215f7381..9fcd039c22 100644 } /** -@@ -178,7 +178,7 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, +@@ -178,7 +179,7 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, * Pointer to the device structure. * * @param rx_queue_id @@ -33200,7 +63708,7 @@ index e8215f7381..9fcd039c22 100644 * * @param mode * Pointer to the burts mode information. -@@ -354,10 +354,10 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) +@@ -354,10 +355,10 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) scat = &((volatile struct mlx5_wqe_mprq *) rxq->wqes)[i].dseg; @@ -33215,7 +63723,7 @@ index e8215f7381..9fcd039c22 100644 lkey = mlx5_rx_addr2mr(rxq, addr); } else { struct rte_mbuf *buf = (*rxq->elts)[i]; -@@ -383,13 +383,18 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) +@@ -383,13 +384,22 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) .ai = 0, }; rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ? @@ -33227,24 +63735,37 @@ index e8215f7381..9fcd039c22 100644 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); } ++#define MLX5_ERROR_CQE_MASK 0x40000000 +/* Must be negative. */ -+#define MLX5_ERROR_CQE_RET (-1) ++#define MLX5_REGULAR_ERROR_CQE_RET (-5) ++#define MLX5_CRITICAL_ERROR_CQE_RET (-4) +/* Must not be negative. */ +#define MLX5_RECOVERY_ERROR_RET 0 ++#define MLX5_RECOVERY_IGNORE_RET 1 ++#define MLX5_RECOVERY_COMPLETED_RET 2 + /** * Handle a Rx error. * The function inserts the RQ state to reset when the first error CQE is -@@ -404,7 +409,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) +@@ -402,30 +412,62 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) + * @param[in] vec + * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ. * 0 when called from non-vectorized Rx burst. ++ * @param[in] err_n ++ * Number of CQEs to check for an error. * * @return - * -1 in case of recovery error, otherwise the CQE status. -+ * MLX5_RECOVERY_ERROR_RET in case of recovery error, otherwise the CQE status. ++ * MLX5_RECOVERY_ERROR_RET in case of recovery error, ++ * MLX5_RECOVERY_IGNORE_RET in case of non-critical error syndrome, ++ * MLX5_RECOVERY_COMPLETED_RET in case of recovery is completed, ++ * otherwise the CQE status after ignored error syndrome or queue reset. */ int - mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) -@@ -412,7 +417,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +-mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) ++mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, ++ uint16_t err_n, uint16_t *skip_cnt) + { const uint16_t cqe_n = 1 << rxq->cqe_n; const uint16_t cqe_mask = cqe_n - 1; const uint16_t wqe_n = 1 << rxq->elts_n; @@ -33253,7 +63774,48 @@ index e8215f7381..9fcd039c22 100644 struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); union { -@@ -433,7 +438,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + volatile struct mlx5_cqe *cqe; + volatile struct mlx5_err_cqe *err_cqe; + } u = { +- .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], ++ .cqe = &(*rxq->cqes)[(rxq->cq_ci - vec) & cqe_mask], + }; + struct mlx5_mp_arg_queue_state_modify sm; +- int ret; ++ bool critical_syndrome = false; ++ int ret, i; + + switch (rxq->err_state) { ++ case MLX5_RXQ_ERR_STATE_IGNORE: ++ ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci - vec); ++ if (ret != MLX5_CQE_STATUS_ERR) { ++ rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; ++ return ret; ++ } ++ /* Fall-through */ + case MLX5_RXQ_ERR_STATE_NO_ERROR: ++ for (i = 0; i < (int)err_n; i++) { ++ u.cqe = &(*rxq->cqes)[(rxq->cq_ci - vec - i) & cqe_mask]; ++ if (MLX5_CQE_OPCODE(u.cqe->op_own) == MLX5_CQE_RESP_ERR) { ++ if (u.err_cqe->syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || ++ u.err_cqe->syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || ++ u.err_cqe->syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR) ++ critical_syndrome = true; ++ break; ++ } ++ } ++ if (!critical_syndrome) { ++ if (rxq->err_state == MLX5_RXQ_ERR_STATE_NO_ERROR) { ++ *skip_cnt = 0; ++ if (i == err_n) ++ rxq->err_state = MLX5_RXQ_ERR_STATE_IGNORE; ++ } ++ return MLX5_RECOVERY_IGNORE_RET; ++ } + rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET; + /* Fall-through */ + case MLX5_RXQ_ERR_STATE_NEED_RESET: +@@ -433,7 +475,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) sm.queue_id = rxq->idx; sm.state = IBV_WQS_RESET; if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) @@ -33262,7 +63824,7 @@ index e8215f7381..9fcd039c22 100644 if (rxq_ctrl->dump_file_n < RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) { MKSTR(err_str, "Unexpected CQE error syndrome " -@@ -473,7 +478,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +@@ -473,7 +515,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) sm.queue_id = rxq->idx; sm.state = IBV_WQS_RDY; if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) @@ -33271,7 +63833,15 @@ index e8215f7381..9fcd039c22 100644 if (vec) { const uint32_t elts_n = mlx5_rxq_mprq_enabled(rxq) ? -@@ -501,7 +506,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +@@ -484,7 +526,6 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + rxq->elts_ci : rxq->rq_ci; + uint32_t elt_idx; + struct rte_mbuf **elt; +- int i; + unsigned int n = elts_n - (elts_ci - + rxq->rq_pi); + +@@ -501,7 +542,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) rte_pktmbuf_free_seg (*elt); } @@ -33280,7 +63850,11 @@ index e8215f7381..9fcd039c22 100644 } } for (i = 0; i < (int)elts_n; ++i) { -@@ -520,7 +525,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +@@ -517,10 +558,11 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + } + mlx5_rxq_initialize(rxq); + rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; ++ return MLX5_RECOVERY_COMPLETED_RET; } return ret; default: @@ -33289,41 +63863,119 @@ index e8215f7381..9fcd039c22 100644 } } -@@ -538,7 +543,9 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +@@ -536,17 +578,24 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + * @param[out] mcqe + * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not * written. - * +- * ++ * @param[out] skip_cnt ++ * Number of packets skipped due to recoverable errors. ++ * @param mprq ++ * Indication if it is called from MPRQ. * @return - * 0 in case of empty CQE, otherwise the packet size in bytes. -+ * 0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE, -+ * otherwise the packet size in regular RxQ, and striding byte -+ * count format in mprq case. ++ * 0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, ++ * MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset, ++ * otherwise the packet size in regular RxQ, ++ * and striding byte count format in mprq case. */ static inline int mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, -@@ -605,8 +612,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +- uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe) ++ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe, ++ uint16_t *skip_cnt, bool mprq) + { + struct rxq_zip *zip = &rxq->zip; + uint16_t cqe_n = cqe_cnt + 1; +- int len; ++ int len = 0, ret = 0; + uint16_t idx, end; + + do { +@@ -595,7 +644,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + * compressed. + */ + } else { +- int ret; + int8_t op_own; + uint32_t cq_ci; + +@@ -603,10 +651,12 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { + if (unlikely(ret == MLX5_CQE_STATUS_ERR || rxq->err_state)) { - ret = mlx5_rx_err_handle(rxq, 0); - if (ret == MLX5_CQE_STATUS_HW_OWN || +- ret = mlx5_rx_err_handle(rxq, 0); +- if (ret == MLX5_CQE_STATUS_HW_OWN || - ret == -1) - return 0; -+ ret == MLX5_RECOVERY_ERROR_RET) -+ return MLX5_ERROR_CQE_RET; ++ ret = mlx5_rx_err_handle(rxq, 0, 1, skip_cnt); ++ if (ret == MLX5_CQE_STATUS_HW_OWN) ++ return MLX5_ERROR_CQE_MASK; ++ if (ret == MLX5_RECOVERY_ERROR_RET || ++ ret == MLX5_RECOVERY_COMPLETED_RET) ++ return MLX5_CRITICAL_ERROR_CQE_RET; } else { return 0; } -@@ -851,8 +858,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -659,8 +709,15 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + } + } + if (unlikely(rxq->err_state)) { ++ if (rxq->err_state == MLX5_RXQ_ERR_STATE_IGNORE && ++ ret == MLX5_CQE_STATUS_SW_OWN) { ++ rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; ++ return len & MLX5_ERROR_CQE_MASK; ++ } + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; + ++rxq->stats.idropped; ++ (*skip_cnt) += mprq ? (len & MLX5_MPRQ_STRIDE_NUM_MASK) >> ++ MLX5_MPRQ_STRIDE_NUM_SHIFT : 1; + } else { + return len; + } +@@ -812,6 +869,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + int len = 0; /* keep its value across iterations. */ + + while (pkts_n) { ++ uint16_t skip_cnt; + unsigned int idx = rq_ci & wqe_cnt; + volatile struct mlx5_wqe_data_seg *wqe = + &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; +@@ -850,8 +908,23 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + } if (!pkt) { cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; - len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); +- len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); - if (!len) { -+ if (len <= 0) { - rte_mbuf_raw_free(rep); -+ if (unlikely(len == MLX5_ERROR_CQE_RET)) ++ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe, &skip_cnt, false); ++ if (unlikely(len & MLX5_ERROR_CQE_MASK)) { ++ if (len == MLX5_CRITICAL_ERROR_CQE_RET) { ++ rte_mbuf_raw_free(rep); + rq_ci = rxq->rq_ci << sges_n; ++ break; ++ } ++ rq_ci >>= sges_n; ++ rq_ci += skip_cnt; ++ rq_ci <<= sges_n; ++ idx = rq_ci & wqe_cnt; ++ wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; ++ seg = (*rxq->elts)[idx]; ++ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; ++ len = len & ~MLX5_ERROR_CQE_MASK; ++ } ++ if (len == 0) { + rte_mbuf_raw_free(rep); break; } - pkt = seg; -@@ -1045,8 +1054,8 @@ uint16_t +@@ -954,6 +1027,7 @@ mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, + tcp->cksum = 0; + csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4); + csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); ++ csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); + csum = (~csum) & 0xffff; + if (csum == 0) + csum = 0xffff; +@@ -1045,8 +1119,8 @@ uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct mlx5_rxq_data *rxq = dpdk_rxq; @@ -33334,26 +63986,55 @@ index e8215f7381..9fcd039c22 100644 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1; const uint32_t wq_mask = (1 << rxq->elts_n) - 1; volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; -@@ -1075,8 +1084,13 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1062,6 +1136,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + uint16_t strd_cnt; + uint16_t strd_idx; + uint32_t byte_cnt; ++ uint16_t skip_cnt; + volatile struct mlx5_mini_cqe8 *mcqe = NULL; + enum mlx5_rqx_code rxq_code; + +@@ -1074,8 +1149,25 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; } cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; - ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); +- ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); - if (!ret) ++ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe, &skip_cnt, true); ++ if (unlikely(ret & MLX5_ERROR_CQE_MASK)) { ++ if (ret == MLX5_CRITICAL_ERROR_CQE_RET) { ++ rq_ci = rxq->rq_ci; ++ consumed_strd = rxq->consumed_strd; ++ break; ++ } ++ consumed_strd += skip_cnt; ++ while (consumed_strd >= strd_n) { ++ /* Replace WQE if the buffer is still in use. */ ++ mprq_buf_replace(rxq, rq_ci & wq_mask); ++ /* Advance to the next WQE. */ ++ consumed_strd -= strd_n; ++ ++rq_ci; ++ buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; ++ } ++ cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; ++ } + if (ret == 0) -+ break; -+ if (unlikely(ret == MLX5_ERROR_CQE_RET)) { -+ rq_ci = rxq->rq_ci; -+ consumed_strd = rxq->consumed_strd; break; -+ } byte_cnt = ret; len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; - MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.h b/dpdk/drivers/net/mlx5/mlx5_rx.h -index f808bf288f..423d80e4a7 100644 +index f808bf288f..5bcb6cb03a 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rx.h +++ b/dpdk/drivers/net/mlx5/mlx5_rx.h -@@ -88,8 +88,8 @@ struct mlx5_rxq_data { +@@ -61,6 +61,7 @@ enum mlx5_rxq_err_state { + MLX5_RXQ_ERR_STATE_NO_ERROR = 0, + MLX5_RXQ_ERR_STATE_NEED_RESET, + MLX5_RXQ_ERR_STATE_NEED_READY, ++ MLX5_RXQ_ERR_STATE_IGNORE, + }; + + enum mlx5_rqx_code { +@@ -88,8 +89,8 @@ struct mlx5_rxq_data { unsigned int elts_n:4; /* Log 2 of Mbufs. */ unsigned int rss_hash:1; /* RSS hash result is enabled. */ unsigned int mark:1; /* Marked flow available on the queue. */ @@ -33364,7 +64045,7 @@ index f808bf288f..423d80e4a7 100644 unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ unsigned int strd_scatter_en:1; /* Scattered packets from a stride. */ -@@ -125,6 +125,7 @@ struct mlx5_rxq_data { +@@ -125,6 +126,7 @@ struct mlx5_rxq_data { struct mlx5_dev_ctx_shared *sh; /* Shared context. */ uint16_t idx; /* Queue index. */ struct mlx5_rxq_stats stats; @@ -33372,7 +64053,7 @@ index f808bf288f..423d80e4a7 100644 rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ struct mlx5_uar_data uar_data; /* CQ doorbell. */ -@@ -161,7 +162,6 @@ struct mlx5_rxq_ctrl { +@@ -161,7 +163,6 @@ struct mlx5_rxq_ctrl { uint16_t share_qid; /* Shared RxQ ID in group. */ unsigned int started:1; /* Whether (shared) RXQ has been started. */ unsigned int irq:1; /* Whether IRQ is enabled. */ @@ -33380,7 +64061,17 @@ index f808bf288f..423d80e4a7 100644 uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ uint32_t wqn; /* WQ number. */ uint32_t rxseg_n; /* Number of split segment descriptions. */ -@@ -401,7 +401,7 @@ mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset, +@@ -272,7 +273,8 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, + + uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); + void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); +-__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec); ++__rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, ++ uint16_t err_n, uint16_t *skip_cnt); + void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf); + uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, + uint16_t pkts_n); +@@ -401,7 +403,7 @@ mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset, static __rte_always_inline void mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx) { @@ -33389,7 +64080,7 @@ index f808bf288f..423d80e4a7 100644 struct mlx5_mprq_buf *rep = rxq->mprq_repl; volatile struct mlx5_wqe_data_seg *wqe = &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg; -@@ -459,8 +459,8 @@ static __rte_always_inline enum mlx5_rqx_code +@@ -459,8 +461,8 @@ static __rte_always_inline enum mlx5_rqx_code mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, struct mlx5_mprq_buf *buf, uint16_t strd_idx, uint16_t strd_cnt) { @@ -33400,7 +64091,7 @@ index f808bf288f..423d80e4a7 100644 const uint16_t strd_shift = MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en; const int32_t hdrm_overlap = -@@ -543,7 +543,7 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, +@@ -543,7 +545,7 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len, buf_len, shinfo); /* Set mbuf head-room. */ SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM); @@ -33409,7 +64100,7 @@ index f808bf288f..423d80e4a7 100644 MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >= len - (hdrm_overlap > 0 ? hdrm_overlap : 0)); DATA_LEN(pkt) = len; -@@ -605,7 +605,7 @@ mlx5_check_mprq_support(struct rte_eth_dev *dev) +@@ -605,7 +607,7 @@ mlx5_check_mprq_support(struct rte_eth_dev *dev) static __rte_always_inline int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) { @@ -33419,7 +64110,7 @@ index f808bf288f..423d80e4a7 100644 /** diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c -index f77d42dedf..807aaf2fc9 100644 +index f77d42dedf..da1b1f8bb9 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxq.c +++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c @@ -67,7 +67,7 @@ mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data) @@ -33476,6 +64167,24 @@ index f77d42dedf..807aaf2fc9 100644 const uint16_t q_mask = q_n - 1; uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? rxq->elts_ci : rxq->rq_ci; +@@ -525,12 +528,12 @@ mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) + * synchronized, that might be broken on RQ restart + * and cause Rx malfunction, so queue stopping is + * not supported if vectorized Rx burst is engaged. +- * The routine pointer depends on the process +- * type, should perform check there. ++ * The routine pointer depends on the process type, ++ * should perform check there. MPRQ is not supported as well. + */ +- if (pkt_burst == mlx5_rx_burst_vec) { +- DRV_LOG(ERR, "Rx queue stop is not supported " +- "for vectorized Rx"); ++ if (pkt_burst != mlx5_rx_burst) { ++ DRV_LOG(ERR, "Rx queue stop is only supported " ++ "for non-vectorized single-packet Rx"); + rte_errno = EINVAL; + return -EINVAL; + } @@ -838,6 +841,14 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; @@ -33491,7 +64200,31 @@ index f77d42dedf..807aaf2fc9 100644 if (mp) { /* * The parameters should be checked on rte_eth_dev layer. -@@ -1378,8 +1389,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) +@@ -888,16 +899,20 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + /* Try to reuse shared RXQ. */ + rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group, + conf->share_qid); ++ res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl); ++ if (res) ++ return res; + if (rxq_ctrl != NULL && + !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket, + conf, mp)) { + rte_errno = EINVAL; + return -rte_errno; + } ++ } else { ++ res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl); ++ if (res) ++ return res; + } +- res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl); +- if (res) +- return res; + /* Allocate RXQ. */ + rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0, + SOCKET_ID_ANY); +@@ -1378,8 +1393,8 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) unsigned int buf_len; unsigned int obj_num; unsigned int obj_size; @@ -33502,7 +64235,7 @@ index f77d42dedf..807aaf2fc9 100644 unsigned int i; unsigned int n_ibv = 0; int ret; -@@ -1398,16 +1409,18 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) +@@ -1398,16 +1413,18 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) n_ibv++; desc += 1 << rxq->elts_n; /* Get the max number of strides. */ @@ -33530,7 +64263,7 @@ index f77d42dedf..807aaf2fc9 100644 /* * Received packets can be either memcpy'd or externally referenced. In * case that the packet is attached to an mbuf as an external buffer, as -@@ -1453,7 +1466,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) +@@ -1453,7 +1470,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, 0, NULL, NULL, mlx5_mprq_buf_init, @@ -33539,10 +64272,24 @@ index f77d42dedf..807aaf2fc9 100644 dev->device->numa_node, 0); if (mp == NULL) { DRV_LOG(ERR, -@@ -1530,6 +1543,126 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx, - priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE); - } - +@@ -1517,8 +1534,6 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx, + MLX5_MAX_TCP_HDR_OFFSET) + max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET; + max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE); +- MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE); +- max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE; + if (priv->max_lro_msg_size) + priv->max_lro_msg_size = + RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size); +@@ -1526,8 +1541,145 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx, + priv->max_lro_msg_size = max_lro_size; + DRV_LOG(DEBUG, + "port %u Rx Queue %u max LRO message size adjusted to %u bytes", +- dev->data->port_id, idx, +- priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE); ++ dev->data->port_id, idx, priv->max_lro_msg_size); ++} ++ +/** + * Prepare both size and number of stride for Multi-Packet RQ. + * @@ -33598,8 +64345,8 @@ index f77d42dedf..807aaf2fc9 100644 + } else { + *actual_log_stride_num = config->mprq.log_stride_num; + } -+ if (config->mprq.log_stride_size) { -+ /* Checks if chosen size of stride is in supported range. */ ++ /* Checks if chosen size of stride is in supported range. */ ++ if (config->mprq.log_stride_size != (uint32_t)MLX5_ARG_UNSET) { + if (config->mprq.log_stride_size > log_max_stride_size || + config->mprq.log_stride_size < log_min_stride_size) { + *actual_log_stride_size = log_def_stride_size; @@ -33611,10 +64358,26 @@ index f77d42dedf..807aaf2fc9 100644 + *actual_log_stride_size = config->mprq.log_stride_size; + } + } else { -+ if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) ++ /* Make the stride fit the mbuf size by default. */ ++ if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) { ++ DRV_LOG(WARNING, ++ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to match the mbuf size (%u)", ++ dev->data->port_id, idx, min_mbuf_size); + *actual_log_stride_size = log2above(min_mbuf_size); -+ else ++ } else { + goto unsupport; ++ } ++ } ++ /* Make sure the stride size is greater than the headroom. */ ++ if (RTE_BIT32(*actual_log_stride_size) < RTE_PKTMBUF_HEADROOM) { ++ if (RTE_BIT32(log_max_stride_size) > RTE_PKTMBUF_HEADROOM) { ++ DRV_LOG(WARNING, ++ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to accommodate the headroom (%u)", ++ dev->data->port_id, idx, RTE_PKTMBUF_HEADROOM); ++ *actual_log_stride_size = log2above(RTE_PKTMBUF_HEADROOM); ++ } else { ++ goto unsupport; ++ } + } + log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size; + /* Check if WQE buffer size is supported by hardware. */ @@ -33653,6 +64416,8 @@ index f77d42dedf..807aaf2fc9 100644 + " min_stride_sz = %u, max_stride_sz = %u).\n" + "Rx segment is %senable.", + dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n, ++ config->mprq.log_stride_size == (uint32_t)MLX5_ARG_UNSET ? ++ RTE_BIT32(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) : + RTE_BIT32(config->mprq.log_stride_size), + RTE_BIT32(config->mprq.log_stride_num), + config->mprq.min_rxqs_num, @@ -33661,12 +64426,10 @@ index f77d42dedf..807aaf2fc9 100644 + RTE_BIT32(config->mprq.log_max_stride_size), + rx_seg_en ? "" : "not "); + return -1; -+} -+ + } + /** - * Create a DPDK Rx queue. - * -@@ -1567,41 +1700,33 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, +@@ -1567,41 +1719,33 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, RTE_PKTMBUF_HEADROOM; unsigned int max_lro_size = 0; unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; @@ -33722,7 +64485,7 @@ index f77d42dedf..807aaf2fc9 100644 rxq->ctrl = tmpl; LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); -@@ -1695,43 +1820,19 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, +@@ -1695,43 +1839,19 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, tmpl->socket = socket; if (dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; @@ -33771,7 +64534,7 @@ index f77d42dedf..807aaf2fc9 100644 } else if (tmpl->rxq.rxseg_n == 1) { MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size); tmpl->rxq.sges_n = 0; -@@ -1765,24 +1866,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, +@@ -1765,24 +1885,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, tmpl->rxq.sges_n = sges_n; max_lro_size = max_rx_pktlen; } @@ -33796,7 +64559,7 @@ index f77d42dedf..807aaf2fc9 100644 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { -@@ -1840,20 +1923,24 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, +@@ -1840,20 +1942,24 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, dev->data->port_id, tmpl->rxq.crc_present ? "disabled" : "enabled", tmpl->rxq.crc_present << 2); @@ -33826,7 +64589,7 @@ index f77d42dedf..807aaf2fc9 100644 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: -@@ -1969,6 +2056,8 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) +@@ -1969,6 +2075,8 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; @@ -33835,7 +64598,23 @@ index f77d42dedf..807aaf2fc9 100644 MLX5_ASSERT(priv->rxq_privs != NULL); return (*priv->rxq_privs)[idx]; } -@@ -2152,7 +2241,7 @@ mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx) +@@ -2053,6 +2161,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) + RTE_ETH_QUEUE_STATE_STOPPED; + } + } else { /* Refcnt zero, closing device. */ ++ LIST_REMOVE(rxq_ctrl, next); + LIST_REMOVE(rxq, owner_entry); + if (LIST_EMPTY(&rxq_ctrl->owners)) { + if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) +@@ -2060,7 +2169,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) + (&rxq_ctrl->rxq.mr_ctrl.cache_bh); + if (rxq_ctrl->rxq.shared) + LIST_REMOVE(rxq_ctrl, share_entry); +- LIST_REMOVE(rxq_ctrl, next); + mlx5_free(rxq_ctrl); + } + dev->data->rx_queues[idx] = NULL; +@@ -2152,7 +2260,7 @@ mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx) * Number of queues in the array. * * @return @@ -33844,7 +64623,7 @@ index f77d42dedf..807aaf2fc9 100644 */ static int mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl, -@@ -2586,7 +2675,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, +@@ -2586,7 +2694,7 @@ mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, if (hrxq->standalone) { /* * Replacement of indirection table unsupported for @@ -33853,7 +64632,7 @@ index f77d42dedf..807aaf2fc9 100644 */ rte_errno = ENOTSUP; return -rte_errno; -@@ -2828,7 +2917,7 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) +@@ -2828,7 +2936,7 @@ mlx5_drop_action_create(struct rte_eth_dev *dev) if (priv->drop_queue.hrxq) return priv->drop_queue.hrxq; @@ -33863,10 +64642,27 @@ index f77d42dedf..807aaf2fc9 100644 DRV_LOG(WARNING, "Port %u cannot allocate memory for drop queue.", diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c -index 6212ce8247..0e2eab068a 100644 +index 6212ce8247..667475a93e 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c -@@ -148,7 +148,7 @@ static inline void +@@ -51,6 +51,7 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, + uint16_t pkts_n) + { + uint16_t n = 0; ++ uint16_t skip_cnt; + unsigned int i; + #ifdef MLX5_PMD_SOFT_COUNTERS + uint32_t err_bytes = 0; +@@ -74,7 +75,7 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, + rxq->stats.ipackets -= (pkts_n - n); + rxq->stats.ibytes -= err_bytes; + #endif +- mlx5_rx_err_handle(rxq, 1); ++ mlx5_rx_err_handle(rxq, 1, pkts_n, &skip_cnt); + return n; + } + +@@ -148,7 +149,7 @@ static inline void mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq) { const uint16_t wqe_n = 1 << rxq->elts_n; @@ -33875,7 +64671,7 @@ index 6212ce8247..0e2eab068a 100644 const uint32_t elts_n = wqe_n * strd_n; const uint32_t wqe_mask = elts_n - 1; uint32_t n = elts_n - (rxq->elts_ci - rxq->rq_pi); -@@ -197,8 +197,8 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, +@@ -197,8 +198,8 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, { const uint16_t wqe_n = 1 << rxq->elts_n; const uint16_t wqe_mask = wqe_n - 1; @@ -33886,6 +64682,41 @@ index 6212ce8247..0e2eab068a 100644 const uint32_t elts_n = wqe_n * strd_n; const uint32_t elts_mask = elts_n - 1; uint32_t elts_idx = rxq->rq_pi & elts_mask; +@@ -253,8 +254,6 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq, + } + rxq->rq_pi += i; + rxq->cq_ci += i; +- rte_io_wmb(); +- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + if (rq_ci != rxq->rq_ci) { + rxq->rq_ci = rq_ci; + rte_io_wmb(); +@@ -361,8 +360,6 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, + rxq->decompressed -= n; + } + } +- rte_io_wmb(); +- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + *no_cq = !rcvd_pkt; + return rcvd_pkt; + } +@@ -390,6 +387,7 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + bool no_cq = false; + + do { ++ err = 0; + nb_rx = rxq_burst_v(rxq, pkts + tn, pkts_n - tn, + &err, &no_cq); + if (unlikely(err | rxq->err_state)) +@@ -397,6 +395,8 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + tn += nb_rx; + if (unlikely(no_cq)) + break; ++ rte_io_wmb(); ++ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + } while (tn != pkts_n); + return tn; + } @@ -428,7 +428,7 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -33895,8 +64726,25 @@ index 6212ce8247..0e2eab068a 100644 const uint32_t elts_n = wqe_n * strd_n; const uint32_t elts_mask = elts_n - 1; volatile struct mlx5_cqe *cq; +@@ -524,6 +524,7 @@ mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + bool no_cq = false; + + do { ++ err = 0; + nb_rx = rxq_burst_mprq_v(rxq, pkts + tn, pkts_n - tn, + &err, &no_cq); + if (unlikely(err | rxq->err_state)) +@@ -531,6 +532,8 @@ mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + tn += nb_rx; + if (unlikely(no_cq)) + break; ++ rte_io_wmb(); ++ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + } while (tn != pkts_n); + return tn; + } diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h -index 423e229508..683a8f9a6c 100644 +index 423e229508..204d17a8f2 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h @@ -47,11 +47,11 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n) @@ -34801,7 +65649,13 @@ index 423e229508..683a8f9a6c 100644 } /** -@@ -788,31 +788,31 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -783,36 +783,36 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +- unsigned int pos; ++ unsigned int pos, adj; + uint64_t n = 0; uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; uint16_t nocmp_n = 0; unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1)); @@ -34893,7 +65747,7 @@ index 423e229508..683a8f9a6c 100644 + __vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + __vector unsigned char op_own, op_own_tmp1, op_own_tmp2; + __vector unsigned char opcode, owner_mask, invalid_mask; -+ __vector unsigned char comp_mask; ++ __vector unsigned char comp_mask, mini_mask; + __vector unsigned char mask; #ifdef MLX5_PMD_SOFT_COUNTERS - const vector unsigned char lower_half = { @@ -34925,7 +65779,7 @@ index 423e229508..683a8f9a6c 100644 0, 1, 2, 3, 0, 0, 0, 0}; unsigned int p1, p2, p3; -@@ -897,295 +897,295 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -897,295 +897,305 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, } /* A.0 do not cross the end of CQ. */ @@ -35375,6 +66229,16 @@ index 423e229508..683a8f9a6c 100644 - opcode = (vector unsigned char) - vec_andc((vector unsigned long)opcode, - (vector unsigned long)invalid_mask); ++ adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n); ++ mask = (__vector unsigned char)(__vector unsigned long){ ++ (adj * sizeof(uint16_t) * 8), 0}; ++ lshift = vec_splat((__vector unsigned long)mask, 0); ++ shmask = vec_cmpgt(shmax, lshift); ++ mini_mask = (__vector unsigned char) ++ vec_sl((__vector unsigned long)invalid_mask, lshift); ++ mini_mask = (__vector unsigned char) ++ vec_sel((__vector unsigned long)shmask, ++ (__vector unsigned long)mini_mask, shmask); + opcode = (__vector unsigned char) + vec_cmpeq((__vector unsigned int)resp_err_check, + (__vector unsigned int)opcode); @@ -35383,7 +66247,7 @@ index 423e229508..683a8f9a6c 100644 + (__vector unsigned int)zero); + opcode = (__vector unsigned char) + vec_andc((__vector unsigned long)opcode, -+ (__vector unsigned long)invalid_mask); ++ (__vector unsigned long)mini_mask); /* D.4 mark if any error is set */ - *err |= ((vector unsigned long)opcode)[0]; @@ -35391,7 +66255,7 @@ index 423e229508..683a8f9a6c 100644 /* D.5 fill in mbuf - rearm_data and packet_type. */ rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); -@@ -1230,7 +1230,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -1230,7 +1240,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, uint32_t mask = rxq->flow_meta_port_mask; uint32_t metadata; @@ -35400,7 +66264,7 @@ index 423e229508..683a8f9a6c 100644 metadata = rte_be_to_cpu_32 (cq[pos].flow_table_metadata) & mask; *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = -@@ -1255,20 +1255,20 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -1255,20 +1265,20 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, #ifdef MLX5_PMD_SOFT_COUNTERS /* Add up received bytes count. */ byte_cnt = vec_perm(op_own, zero, len_shuf_mask); @@ -35436,10 +66300,57 @@ index 423e229508..683a8f9a6c 100644 /* diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h -index b1d16baa61..f7bbde4e0e 100644 +index b1d16baa61..6d3c594e56 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h -@@ -839,7 +839,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -524,7 +524,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +- unsigned int pos; ++ unsigned int pos, adj; + uint64_t n = 0; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; +@@ -616,7 +616,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + pos += MLX5_VPMD_DESCS_PER_LOOP) { + uint16x4_t op_own; + uint16x4_t opcode, owner_mask, invalid_mask; +- uint16x4_t comp_mask; ++ uint16x4_t comp_mask, mini_mask; + uint16x4_t mask; + uint16x4_t byte_cnt; + uint32x4_t ptype_info, flow_tag; +@@ -647,6 +647,14 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + c0 = vld1q_u64((uint64_t *)(p0 + 48)); + /* Synchronize for loading the rest of blocks. */ + rte_io_rmb(); ++ /* B.0 (CQE 3) reload lower half of the block. */ ++ c3 = vld1q_lane_u64((uint64_t *)(p3 + 48), c3, 0); ++ /* B.0 (CQE 2) reload lower half of the block. */ ++ c2 = vld1q_lane_u64((uint64_t *)(p2 + 48), c2, 0); ++ /* B.0 (CQE 1) reload lower half of the block. */ ++ c1 = vld1q_lane_u64((uint64_t *)(p1 + 48), c1, 0); ++ /* B.0 (CQE 0) reload lower half of the block. */ ++ c0 = vld1q_lane_u64((uint64_t *)(p0 + 48), c0, 0); + /* Prefetch next 4 CQEs. */ + if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { + unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP; +@@ -780,8 +788,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + -1UL >> (n * sizeof(uint16_t) * 8) : 0); + invalid_mask = vorr_u16(invalid_mask, mask); + /* D.3 check error in opcode. */ ++ adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n); ++ mask = vcreate_u16(adj ? ++ -1UL >> ((n + 1) * sizeof(uint16_t) * 8) : -1UL); ++ mini_mask = vand_u16(invalid_mask, mask); + opcode = vceq_u16(resp_err_check, opcode); +- opcode = vbic_u16(opcode, invalid_mask); ++ opcode = vbic_u16(opcode, mini_mask); + /* D.4 mark if any error is set */ + *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); + /* C.4 fill in mbuf - rearm_data and packet_type. */ +@@ -839,7 +851,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, } } if (rxq->dynf_meta) { @@ -35449,10 +66360,42 @@ index b1d16baa61..f7bbde4e0e 100644 uint32_t mask = rxq->flow_meta_port_mask; diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h -index f3d838389e..185d2695db 100644 +index f3d838389e..ab69af0c55 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h -@@ -772,7 +772,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -523,7 +523,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + { + const uint16_t q_n = 1 << rxq->cqe_n; + const uint16_t q_mask = q_n - 1; +- unsigned int pos; ++ unsigned int pos, adj; + uint64_t n = 0; + uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; + uint16_t nocmp_n = 0; +@@ -591,7 +591,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; + __m128i op_own, op_own_tmp1, op_own_tmp2; + __m128i opcode, owner_mask, invalid_mask; +- __m128i comp_mask; ++ __m128i comp_mask, mini_mask; + __m128i mask; + #ifdef MLX5_PMD_SOFT_COUNTERS + __m128i byte_cnt; +@@ -729,9 +729,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + mask = _mm_sll_epi64(ones, mask); + invalid_mask = _mm_or_si128(invalid_mask, mask); + /* D.3 check error in opcode. */ ++ adj = (comp_idx != MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n); ++ mask = _mm_set_epi64x(0, adj * sizeof(uint16_t) * 8); ++ mini_mask = _mm_sll_epi64(invalid_mask, mask); + opcode = _mm_cmpeq_epi32(resp_err_check, opcode); + opcode = _mm_packs_epi32(opcode, zero); +- opcode = _mm_andnot_si128(invalid_mask, opcode); ++ opcode = _mm_andnot_si128(mini_mask, opcode); + /* D.4 mark if any error is set */ + *err |= _mm_cvtsi128_si64(opcode); + /* D.5 fill in mbuf - rearm_data and packet_type. */ +@@ -772,7 +775,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, } } if (rxq->dynf_meta) { @@ -35462,7 +66405,7 @@ index f3d838389e..185d2695db 100644 uint32_t mask = rxq->flow_meta_port_mask; diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c -index 732775954a..f64fa3587b 100644 +index 732775954a..615e1d073d 100644 --- a/dpdk/drivers/net/mlx5/mlx5_stats.c +++ b/dpdk/drivers/net/mlx5/mlx5_stats.c @@ -114,18 +114,23 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) @@ -35538,11 +66481,134 @@ index 732775954a..f64fa3587b 100644 } mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); stats_ctrl->imissed = 0; +@@ -280,10 +288,9 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev, + + if (n >= mlx5_xstats_n && xstats_names) { + for (i = 0; i != mlx5_xstats_n; ++i) { +- strncpy(xstats_names[i].name, ++ strlcpy(xstats_names[i].name, + xstats_ctrl->info[i].dpdk_name, + RTE_ETH_XSTATS_NAME_SIZE); +- xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; + } + } + mlx5_xstats_n = mlx5_txpp_xstats_get_names(dev, xstats_names, diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c -index 74c9c0a4ff..2ba456ad7a 100644 +index 74c9c0a4ff..587bed53af 100644 --- a/dpdk/drivers/net/mlx5/mlx5_trigger.c +++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c -@@ -1205,11 +1205,18 @@ mlx5_dev_start(struct rte_eth_dev *dev) +@@ -226,17 +226,17 @@ mlx5_rxq_start(struct rte_eth_dev *dev) + if (rxq == NULL) + continue; + rxq_ctrl = rxq->ctrl; +- if (!rxq_ctrl->started) { ++ if (!rxq_ctrl->started) + if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0) + goto error; +- LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); +- } + ret = priv->obj_ops.rxq_obj_new(rxq); + if (ret) { + mlx5_free(rxq_ctrl->obj); + rxq_ctrl->obj = NULL; + goto error; + } ++ if (!rxq_ctrl->started) ++ LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next); + rxq_ctrl->started = true; + } + return 0; +@@ -345,8 +345,8 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) + ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); + if (ret) + goto error; +- rq_attr.state = MLX5_SQC_STATE_RDY; +- rq_attr.rq_state = MLX5_SQC_STATE_RST; ++ rq_attr.state = MLX5_RQC_STATE_RDY; ++ rq_attr.rq_state = MLX5_RQC_STATE_RST; + rq_attr.hairpin_peer_sq = sq->id; + rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id; + ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); +@@ -599,8 +599,8 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, + " mismatch", dev->data->port_id, cur_queue); + return -rte_errno; + } +- rq_attr.state = MLX5_SQC_STATE_RDY; +- rq_attr.rq_state = MLX5_SQC_STATE_RST; ++ rq_attr.state = MLX5_RQC_STATE_RDY; ++ rq_attr.rq_state = MLX5_RQC_STATE_RST; + rq_attr.hairpin_peer_sq = peer_info->qp_id; + rq_attr.hairpin_peer_vhca = peer_info->vhca_id; + ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); +@@ -664,7 +664,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, + return -rte_errno; + } + sq_attr.state = MLX5_SQC_STATE_RST; +- sq_attr.sq_state = MLX5_SQC_STATE_RST; ++ sq_attr.sq_state = MLX5_SQC_STATE_RDY; + ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); + if (ret == 0) + txq_ctrl->hairpin_status = 0; +@@ -698,8 +698,8 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, + dev->data->port_id, cur_queue); + return -rte_errno; + } +- rq_attr.state = MLX5_SQC_STATE_RST; +- rq_attr.rq_state = MLX5_SQC_STATE_RST; ++ rq_attr.state = MLX5_RQC_STATE_RST; ++ rq_attr.rq_state = MLX5_RQC_STATE_RDY; + ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr); + if (ret == 0) + rxq->hairpin_status = 0; +@@ -843,6 +843,11 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) + txq_ctrl = mlx5_txq_get(dev, i); + if (txq_ctrl == NULL) + continue; ++ if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN || ++ txq_ctrl->hairpin_conf.peers[0].port != rx_port) { ++ mlx5_txq_release(dev, i); ++ continue; ++ } + rx_queue = txq_ctrl->hairpin_conf.peers[0].queue; + rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0); + mlx5_hairpin_queue_peer_unbind(dev, i, 1); +@@ -894,11 +899,11 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) + } + /* Indeed, only the first used queue needs to be checked. */ + if (txq_ctrl->hairpin_conf.manual_bind == 0) { ++ mlx5_txq_release(dev, i); + if (cur_port != rx_port) { + rte_errno = EINVAL; + DRV_LOG(ERR, "port %u and port %u are in" + " auto-bind mode", cur_port, rx_port); +- mlx5_txq_release(dev, i); + return -rte_errno; + } else { + return 0; +@@ -1089,6 +1094,22 @@ mlx5_dev_start(struct rte_eth_dev *dev) + else + rte_net_mlx5_dynf_inline_mask = 0; + if (dev->data->nb_rx_queues > 0) { ++ uint32_t max_lro_msg_size = priv->max_lro_msg_size; ++ ++ if (max_lro_msg_size < MLX5_LRO_SEG_CHUNK_SIZE) { ++ uint32_t i; ++ struct mlx5_rxq_priv *rxq; ++ ++ for (i = 0; i != priv->rxqs_n; ++i) { ++ rxq = mlx5_rxq_get(dev, i); ++ if (rxq && rxq->ctrl && rxq->ctrl->rxq.lro) { ++ DRV_LOG(ERR, "port %u invalid max LRO size", ++ dev->data->port_id); ++ rte_errno = EINVAL; ++ return -rte_errno; ++ } ++ } ++ } + ret = mlx5_dev_configure_rss_reta(dev); + if (ret) { + DRV_LOG(ERR, "port %u reta config failed: %s", +@@ -1205,11 +1226,18 @@ mlx5_dev_start(struct rte_eth_dev *dev) priv->sh->port[priv->dev_port - 1].ih_port_id = (uint32_t)dev->data->port_id; } else { @@ -35563,7 +66629,7 @@ index 74c9c0a4ff..2ba456ad7a 100644 if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0) priv->sh->port[priv->dev_port - 1].devx_ih_port_id = (uint32_t)dev->data->port_id; -@@ -1261,6 +1268,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) +@@ -1261,6 +1289,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) mlx5_rx_intr_vec_disable(dev); priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS; priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; @@ -35572,9 +66638,18 @@ index 74c9c0a4ff..2ba456ad7a 100644 mlx5_rxq_stop(dev); if (priv->obj_ops.lb_dummy_queue_release) diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.c b/dpdk/drivers/net/mlx5/mlx5_tx.c -index 5492d64cae..fd2cf20967 100644 +index 5492d64cae..8f03743986 100644 --- a/dpdk/drivers/net/mlx5/mlx5_tx.c +++ b/dpdk/drivers/net/mlx5/mlx5_tx.c +@@ -107,7 +107,7 @@ mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq, + mlx5_dump_debug_information(name, "MLX5 Error CQ:", + (const void *)((uintptr_t) + txq->cqes), +- sizeof(*err_cqe) * ++ sizeof(struct mlx5_cqe) * + (1 << txq->cqe_n)); + mlx5_dump_debug_information(name, "MLX5 Error SQ:", + (const void *)((uintptr_t) @@ -728,7 +728,7 @@ mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, * Pointer to the device structure. * @@ -35585,7 +66660,7 @@ index 5492d64cae..fd2cf20967 100644 * @param mode * Pointer to the burts mode information. diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.h b/dpdk/drivers/net/mlx5/mlx5_tx.h -index 099e72935a..6ed00f722e 100644 +index 099e72935a..e2d0d6de0b 100644 --- a/dpdk/drivers/net/mlx5/mlx5_tx.h +++ b/dpdk/drivers/net/mlx5/mlx5_tx.h @@ -161,6 +161,7 @@ struct mlx5_txq_data { @@ -35596,7 +66671,48 @@ index 099e72935a..6ed00f722e 100644 struct mlx5_uar_data uar_data; struct rte_mbuf *elts[0]; /* Storage for queued packets, must be the last field. */ -@@ -1710,7 +1711,6 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq, +@@ -1621,6 +1622,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq, + static __rte_always_inline enum mlx5_txcmp_code + mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq, + struct mlx5_txq_local *restrict loc, ++ uint16_t elts, + unsigned int olx) + { + if (MLX5_TXOFF_CONFIG(TXPP) && +@@ -1635,7 +1637,7 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq, + * to the queue and we won't get the orphan WAIT WQE. + */ + if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE || +- loc->elts_free < NB_SEGS(loc->mbuf)) ++ loc->elts_free < elts) + return MLX5_TXCMP_CODE_EXIT; + /* Convert the timestamp into completion to wait. */ + ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *); +@@ -1665,6 +1667,9 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq, + * Pointer to TX queue structure. + * @param loc + * Pointer to burst routine local context. ++ * @param elts ++ * Number of free elements in elts buffer to be checked, for zero ++ * value the check is optimized out by compiler. + * @param olx + * Configured Tx offloads mask. It is fully defined at + * compile time and may be used for optimization. +@@ -1682,11 +1687,12 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq, + struct mlx5_wqe *__rte_restrict wqe; + unsigned int ds, dlen, inlen, ntcp, vlan = 0; + ++ MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf)); + if (MLX5_TXOFF_CONFIG(TXPP)) { + enum mlx5_txcmp_code wret; + + /* Generate WAIT for scheduling if requested. */ +- wret = mlx5_tx_schedule_send(txq, loc, olx); ++ wret = mlx5_tx_schedule_send(txq, loc, 0, olx); + if (wret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (wret == MLX5_TXCMP_CODE_ERROR) +@@ -1710,7 +1716,6 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq, inlen <= MLX5_ESEG_MIN_INLINE_SIZE || inlen > (dlen + vlan))) return MLX5_TXCMP_CODE_ERROR; @@ -35604,17 +66720,237 @@ index 099e72935a..6ed00f722e 100644 /* * Check whether there are enough free WQEBBs: * - Control Segment -@@ -2019,7 +2019,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, +@@ -1726,7 +1731,7 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq, + if (unlikely(loc->wqe_free < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_EXIT; + /* Check for maximal WQE size. */ +- if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) ++ if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) + return MLX5_TXCMP_CODE_ERROR; + #ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes/packets counters. */ +@@ -1781,11 +1786,12 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq, + unsigned int ds, nseg; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); ++ MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf)); + if (MLX5_TXOFF_CONFIG(TXPP)) { + enum mlx5_txcmp_code wret; + + /* Generate WAIT for scheduling if requested. */ +- wret = mlx5_tx_schedule_send(txq, loc, olx); ++ wret = mlx5_tx_schedule_send(txq, loc, 0, olx); + if (wret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (wret == MLX5_TXCMP_CODE_ERROR) +@@ -1800,7 +1806,7 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq, if (unlikely(loc->wqe_free < ((ds + 3) / 4))) return MLX5_TXCMP_CODE_EXIT; /* Check for maximal WQE size. */ - if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) return MLX5_TXCMP_CODE_ERROR; + /* + * Some Tx offloads may cause an error if packet is not long enough, +@@ -1896,16 +1902,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, + + MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); + MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); +- if (MLX5_TXOFF_CONFIG(TXPP)) { +- enum mlx5_txcmp_code wret; +- +- /* Generate WAIT for scheduling if requested. */ +- wret = mlx5_tx_schedule_send(txq, loc, olx); +- if (wret == MLX5_TXCMP_CODE_EXIT) +- return MLX5_TXCMP_CODE_EXIT; +- if (wret == MLX5_TXCMP_CODE_ERROR) +- return MLX5_TXCMP_CODE_ERROR; +- } ++ MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf)); + /* + * First calculate data length to be inlined + * to estimate the required space for WQE. +@@ -1925,7 +1922,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, + uintptr_t start; + + mbuf = loc->mbuf; +- nxlen = rte_pktmbuf_data_len(mbuf); ++ nxlen = rte_pktmbuf_data_len(mbuf) + vlan; + /* + * Packet length exceeds the allowed inline data length, + * check whether the minimal inlining is required. +@@ -1944,6 +1941,8 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, + } else if (mbuf->ol_flags & RTE_MBUF_F_TX_DYNF_NOINLINE || + nxlen > txq->inlen_send) { + return mlx5_tx_packet_multi_send(txq, loc, olx); ++ } else if (nxlen <= MLX5_ESEG_MIN_INLINE_SIZE) { ++ inlen = MLX5_ESEG_MIN_INLINE_SIZE; + } else { + goto do_first; + } +@@ -2011,6 +2010,16 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, + * supposing no any mbufs is being freed during inlining. + */ + do_build: ++ if (MLX5_TXOFF_CONFIG(TXPP)) { ++ enum mlx5_txcmp_code wret; ++ ++ /* Generate WAIT for scheduling if requested. */ ++ wret = mlx5_tx_schedule_send(txq, loc, 0, olx); ++ if (wret == MLX5_TXCMP_CODE_EXIT) ++ return MLX5_TXCMP_CODE_EXIT; ++ if (wret == MLX5_TXCMP_CODE_ERROR) ++ return MLX5_TXCMP_CODE_ERROR; ++ } + MLX5_ASSERT(inlen <= txq->inlen_send); + ds = NB_SEGS(loc->mbuf) + 2 + (inlen - + MLX5_ESEG_MIN_INLINE_SIZE + +@@ -2019,8 +2028,24 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, + if (unlikely(loc->wqe_free < ((ds + 3) / 4))) + return MLX5_TXCMP_CODE_EXIT; + /* Check for maximal WQE size. */ +- if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) +- return MLX5_TXCMP_CODE_ERROR; ++ if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) { ++ /* Check if we can adjust the inline length. */ ++ if (unlikely(txq->inlen_mode)) { ++ ds = NB_SEGS(loc->mbuf) + 2 + ++ (txq->inlen_mode - ++ MLX5_ESEG_MIN_INLINE_SIZE + ++ MLX5_WSEG_SIZE + ++ MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; ++ if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) ++ return MLX5_TXCMP_CODE_ERROR; ++ } ++ /* We have lucky opportunity to adjust. */ ++ inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX - ++ MLX5_WSEG_SIZE * 2 - ++ MLX5_WSEG_SIZE * NB_SEGS(loc->mbuf) - ++ MLX5_WSEG_SIZE + ++ MLX5_ESEG_MIN_INLINE_SIZE); ++ } #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes/packets counters. */ + txq->stats.obytes += dlen + vlan; +@@ -2171,7 +2196,7 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq, + enum mlx5_txcmp_code wret; + + /* Generate WAIT for scheduling if requested. */ +- wret = mlx5_tx_schedule_send(txq, loc, olx); ++ wret = mlx5_tx_schedule_send(txq, loc, 1, olx); + if (wret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (wret == MLX5_TXCMP_CODE_ERROR) +@@ -2549,16 +2574,6 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq, + + next_empw: + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); +- if (MLX5_TXOFF_CONFIG(TXPP)) { +- enum mlx5_txcmp_code wret; +- +- /* Generate WAIT for scheduling if requested. */ +- wret = mlx5_tx_schedule_send(txq, loc, olx); +- if (wret == MLX5_TXCMP_CODE_EXIT) +- return MLX5_TXCMP_CODE_EXIT; +- if (wret == MLX5_TXCMP_CODE_ERROR) +- return MLX5_TXCMP_CODE_ERROR; +- } + part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? + MLX5_MPW_MAX_PACKETS : + MLX5_EMPW_MAX_PACKETS); +@@ -2569,6 +2584,16 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq, + /* But we still able to send at least minimal eMPW. */ + part = loc->elts_free; + } ++ if (MLX5_TXOFF_CONFIG(TXPP)) { ++ enum mlx5_txcmp_code wret; ++ ++ /* Generate WAIT for scheduling if requested. */ ++ wret = mlx5_tx_schedule_send(txq, loc, 0, olx); ++ if (wret == MLX5_TXCMP_CODE_EXIT) ++ return MLX5_TXCMP_CODE_EXIT; ++ if (wret == MLX5_TXCMP_CODE_ERROR) ++ return MLX5_TXCMP_CODE_ERROR; ++ } + /* Check whether we have enough WQEs */ + if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) { + if (unlikely(loc->wqe_free < +@@ -2723,23 +2748,23 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq, + unsigned int slen = 0; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); ++ /* ++ * Limits the amount of packets in one WQE ++ * to improve CQE latency generation. ++ */ ++ nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? ++ MLX5_MPW_INLINE_MAX_PACKETS : ++ MLX5_EMPW_MAX_PACKETS); + if (MLX5_TXOFF_CONFIG(TXPP)) { + enum mlx5_txcmp_code wret; + + /* Generate WAIT for scheduling if requested. */ +- wret = mlx5_tx_schedule_send(txq, loc, olx); ++ wret = mlx5_tx_schedule_send(txq, loc, nlim, olx); + if (wret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (wret == MLX5_TXCMP_CODE_ERROR) + return MLX5_TXCMP_CODE_ERROR; + } +- /* +- * Limits the amount of packets in one WQE +- * to improve CQE latency generation. +- */ +- nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? +- MLX5_MPW_INLINE_MAX_PACKETS : +- MLX5_EMPW_MAX_PACKETS); + /* Check whether we have minimal amount WQEs */ + if (unlikely(loc->wqe_free < + ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) +@@ -3022,11 +3047,12 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq, + enum mlx5_txcmp_code ret; + + MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); ++ MLX5_ASSERT(loc->elts_free); + if (MLX5_TXOFF_CONFIG(TXPP)) { + enum mlx5_txcmp_code wret; + + /* Generate WAIT for scheduling if requested. */ +- wret = mlx5_tx_schedule_send(txq, loc, olx); ++ wret = mlx5_tx_schedule_send(txq, loc, 0, olx); + if (wret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (wret == MLX5_TXCMP_CODE_ERROR) +@@ -3262,7 +3288,9 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq, + * if no inlining is configured, this is done + * by calling routine in a batch copy. + */ +- MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); ++ if (MLX5_TXOFF_CONFIG(INLINE)) ++ txq->elts[txq->elts_head++ & txq->elts_m] = ++ loc->mbuf; + --loc->elts_free; + #ifdef MLX5_PMD_SOFT_COUNTERS + /* Update sent data bytes counter. */ +diff --git a/dpdk/drivers/net/mlx5/mlx5_txpp.c b/dpdk/drivers/net/mlx5/mlx5_txpp.c +index af77e91e4c..83d17997d1 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_txpp.c ++++ b/dpdk/drivers/net/mlx5/mlx5_txpp.c +@@ -1064,11 +1064,9 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, + + if (n >= n_used + n_txpp && xstats_names) { + for (i = 0; i < n_txpp; ++i) { +- strncpy(xstats_names[i + n_used].name, ++ strlcpy(xstats_names[i + n_used].name, + mlx5_txpp_stat_names[i], + RTE_ETH_XSTATS_NAME_SIZE); +- xstats_names[i + n_used].name +- [RTE_ETH_XSTATS_NAME_SIZE - 1] = 0; + } + } + return n_used + n_txpp; diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c -index e4e66ae4c5..4115a2ad77 100644 +index e4e66ae4c5..b295702fd4 100644 --- a/dpdk/drivers/net/mlx5/mlx5_utils.c +++ b/dpdk/drivers/net/mlx5/mlx5_utils.c @@ -340,6 +340,8 @@ mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx) @@ -35626,6 +66962,15 @@ index e4e66ae4c5..4115a2ad77 100644 for (i = 0; i < fetch_size; i++) lc->idx[i] = ts_idx + i; lc->len = fetch_size; +@@ -477,7 +479,7 @@ _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) + mlx5_ipool_lock(pool); + gc = pool->gc; + if (ilc->lc != gc) { +- if (!(--ilc->lc->ref_cnt)) ++ if (ilc->lc && !(--ilc->lc->ref_cnt)) + olc = ilc->lc; + gc->ref_cnt++; + ilc->lc = gc; @@ -1184,44 +1186,3 @@ mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, rte_spinlock_unlock(&tbl->sl); return ret; @@ -35720,9 +67065,18 @@ index cf3db89403..254c879d1a 100644 * This function decreases and clear index entry if reference * counter is 0 from Three-level table. diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c -index c4d5790726..f5e3893ed4 100644 +index c4d5790726..0afe74cea8 100644 --- a/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c +++ b/dpdk/drivers/net/mlx5/windows/mlx5_flow_os.c +@@ -262,7 +262,7 @@ mlx5_is_thread_alive(HANDLE thread_handle) + + if (result == WAIT_OBJECT_0) + return false; +- return false; ++ return true; + } + + static int @@ -372,7 +372,7 @@ mlx5_flow_os_init_workspace_once(void) if (err) { @@ -35860,7 +67214,7 @@ index dec4b923d0..ba99901c5c 100644 goto error; } diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -index 10fe6d828c..eef016aa0b 100644 +index 10fe6d828c..d79d069120 100644 --- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c +++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c @@ -247,7 +247,7 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) @@ -35872,8 +67226,57 @@ index 10fe6d828c..eef016aa0b 100644 " current mbuf size: %u. Set MTU to %u, MRU to %u", mbuf_data_size, mtu, mru); } +@@ -381,6 +381,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) + goto out; + } + ++ /* start rx queues */ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + /* start tx queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; +@@ -405,6 +409,7 @@ static int + mvneta_dev_stop(struct rte_eth_dev *dev) + { + struct mvneta_priv *priv = dev->data->dev_private; ++ uint16_t i; + + dev->data->dev_started = 0; + +@@ -417,6 +422,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) + + priv->ppio = NULL; + ++ /* stop rx queues */ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ ++ /* stop tx queues */ ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/mvneta/mvneta_rxtx.c b/dpdk/drivers/net/mvneta/mvneta_rxtx.c +index 6e4a7896b4..952e982275 100644 +--- a/dpdk/drivers/net/mvneta/mvneta_rxtx.c ++++ b/dpdk/drivers/net/mvneta/mvneta_rxtx.c +@@ -79,6 +79,10 @@ mvneta_buffs_refill(struct mvneta_priv *priv, struct mvneta_rxq *rxq, u16 *num) + int i, ret; + uint16_t nb_desc = *num; + ++ /* To prevent GCC-12 warning. */ ++ if (unlikely(nb_desc == 0)) ++ return -1; ++ + ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc); + if (ret) { + MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc); diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c -index 9c7fe13f7f..735efb6cfc 100644 +index 9c7fe13f7f..a1c800aaf8 100644 --- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c @@ -579,7 +579,7 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) @@ -35885,7 +67288,31 @@ index 9c7fe13f7f..735efb6cfc 100644 "by current mbuf size: %u. Set MTU to %u, MRU to %u", mbuf_data_size, mtu, mru); } -@@ -1626,13 +1626,14 @@ mrvl_xstats_get(struct rte_eth_dev *dev, +@@ -956,6 +956,9 @@ mrvl_dev_start(struct rte_eth_dev *dev) + goto out; + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + mrvl_flow_init(dev); + mrvl_mtr_init(dev); + mrvl_set_tx_function(dev); +@@ -1081,6 +1084,13 @@ mrvl_flush_bpool(struct rte_eth_dev *dev) + static int + mrvl_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return mrvl_dev_set_link_down(dev); + } + +@@ -1626,13 +1636,14 @@ mrvl_xstats_get(struct rte_eth_dev *dev, { struct mrvl_priv *priv = dev->data->dev_private; struct pp2_ppio_statistics ppio_stats; @@ -35904,7 +67331,7 @@ index 9c7fe13f7f..735efb6cfc 100644 uint64_t val; if (mrvl_xstats_tbl[i].size == sizeof(uint32_t)) -@@ -1648,7 +1649,7 @@ mrvl_xstats_get(struct rte_eth_dev *dev, +@@ -1648,7 +1659,7 @@ mrvl_xstats_get(struct rte_eth_dev *dev, stats[i].value = val; } @@ -36176,6 +67603,29 @@ index 89dbba6cd9..b90280c9ff 100644 error = hn_nvs_execute(hv, &chim, sizeof(chim), &resp, sizeof(resp), +diff --git a/dpdk/drivers/net/netvsc/hn_rndis.c b/dpdk/drivers/net/netvsc/hn_rndis.c +index 1b63b27e0c..1ad255507e 100644 +--- a/dpdk/drivers/net/netvsc/hn_rndis.c ++++ b/dpdk/drivers/net/netvsc/hn_rndis.c +@@ -35,7 +35,7 @@ + #include "hn_rndis.h" + #include "ndis.h" + +-#define RNDIS_TIMEOUT_SEC 5 ++#define RNDIS_TIMEOUT_SEC 60 + #define RNDIS_DELAY_MS 10 + + #define HN_RNDIS_XFER_SIZE 0x4000 +@@ -329,7 +329,8 @@ void hn_rndis_receive_response(struct hn_data *hv, + + hn_rndis_dump(data); + +- if (len < sizeof(3 * sizeof(uint32_t))) { ++ /* Check we can read first three data fields from RNDIS header */ ++ if (len < 3 * sizeof(uint32_t)) { + PMD_DRV_LOG(ERR, + "missing RNDIS header %u", len); + return; diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c index 028f176c7e..7a3bd523a5 100644 --- a/dpdk/drivers/net/netvsc/hn_rxtx.c @@ -36438,7 +67888,7 @@ index 2d0b613d21..ca6e4d5578 100644 return 0; diff --git a/dpdk/drivers/net/nfp/nfp_common.c b/dpdk/drivers/net/nfp/nfp_common.c -index f8978e803a..34e3a03edd 100644 +index f8978e803a..f73f7dd0be 100644 --- a/dpdk/drivers/net/nfp/nfp_common.c +++ b/dpdk/drivers/net/nfp/nfp_common.c @@ -176,6 +176,13 @@ nfp_net_configure(struct rte_eth_dev *dev) @@ -36446,16 +67896,26 @@ index f8978e803a..34e3a03edd 100644 } + /* Checking MTU set */ -+ if (rxmode->mtu > hw->flbufsz) { -+ PMD_INIT_LOG(INFO, "MTU (%u) larger then current mbufsize (%u) not supported", -+ rxmode->mtu, hw->flbufsz); ++ if (rxmode->mtu > NFP_FRAME_SIZE_MAX) { ++ PMD_INIT_LOG(ERR, "MTU (%u) larger than NFP_FRAME_SIZE_MAX (%u) not supported", ++ rxmode->mtu, NFP_FRAME_SIZE_MAX); + return -ERANGE; + } + return 0; } -@@ -280,10 +287,6 @@ nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +@@ -270,7 +277,8 @@ int + nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) + { + struct nfp_net_hw *hw; +- uint32_t update, ctrl; ++ uint32_t update; ++ uint32_t new_ctrl; + + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && +@@ -280,23 +288,22 @@ nfp_set_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) return -EBUSY; } @@ -36466,7 +67926,26 @@ index f8978e803a..34e3a03edd 100644 /* Writing new MAC to the specific port BAR address */ nfp_net_write_mac(hw, (uint8_t *)mac_addr); -@@ -696,7 +699,17 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + /* Signal the NIC about the change */ + update = NFP_NET_CFG_UPDATE_MACADDR; +- ctrl = hw->ctrl; ++ new_ctrl = hw->ctrl; + if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) && + (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) +- ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR; +- if (nfp_net_reconfig(hw, ctrl, update) < 0) { ++ new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR; ++ if (nfp_net_reconfig(hw, new_ctrl, update) < 0) { + PMD_INIT_LOG(INFO, "MAC address update failed"); + return -EIO; + } ++ ++ hw->ctrl = new_ctrl; ++ + return 0; + } + +@@ -696,7 +703,17 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU; @@ -36485,13 +67964,22 @@ index f8978e803a..34e3a03edd 100644 /* Next should change when PF support is implemented */ dev_info->max_mac_addrs = 1; -@@ -956,6 +969,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +@@ -864,7 +881,7 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev) + * If MSI-X auto-masking is enabled clear the mask bit, otherwise + * clear the ICR for the entry. + */ +-static void ++void + nfp_net_irq_unmask(struct rte_eth_dev *dev) + { + struct nfp_net_hw *hw; +@@ -956,6 +973,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EBUSY; } -+ /* MTU larger then current mbufsize not supported */ ++ /* MTU larger than current mbufsize not supported */ + if (mtu > hw->flbufsz) { -+ PMD_DRV_LOG(ERR, "MTU (%u) larger then current mbufsize (%u) not supported", ++ PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported", + mtu, hw->flbufsz); + return -ERANGE; + } @@ -36499,7 +67987,7 @@ index f8978e803a..34e3a03edd 100644 /* writing to configuration space */ nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu); -@@ -969,22 +989,25 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) +@@ -969,22 +993,25 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask) { uint32_t new_ctrl, update; struct nfp_net_hw *hw; @@ -36536,25 +68024,84 @@ index f8978e803a..34e3a03edd 100644 return 0; update = NFP_NET_CFG_UPDATE_GEN; +@@ -1225,7 +1252,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, + cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL); + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) +- rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP; ++ rss_hf |= RTE_ETH_RSS_IPV4; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; +@@ -1240,7 +1267,7 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev, + rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP; + + if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) +- rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP; ++ rss_hf |= RTE_ETH_RSS_IPV6; + + /* Propagate current RSS hash functions to caller */ + rss_conf->rss_hf = rss_hf; diff --git a/dpdk/drivers/net/nfp/nfp_common.h b/dpdk/drivers/net/nfp/nfp_common.h -index 8b35fa119c..8db5ec23f8 100644 +index 8b35fa119c..8c39b84c36 100644 --- a/dpdk/drivers/net/nfp/nfp_common.h +++ b/dpdk/drivers/net/nfp/nfp_common.h -@@ -98,6 +98,9 @@ struct nfp_net_adapter; +@@ -98,6 +98,10 @@ struct nfp_net_adapter; /* Number of supported physical ports */ #define NFP_MAX_PHYPORTS 12 +/* Maximum supported NFP frame size (MTU + layer 2 headers) */ +#define NFP_FRAME_SIZE_MAX 10048 ++#define DEFAULT_FLBUF_SIZE 9216 + #include <linux/types.h> #include <rte_io.h> +@@ -375,6 +379,7 @@ int nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); + void nfp_net_params_setup(struct nfp_net_hw *hw); + void nfp_net_cfg_queue_setup(struct nfp_net_hw *hw); + void nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src); ++void nfp_net_irq_unmask(struct rte_eth_dev *dev); + void nfp_net_dev_interrupt_handler(void *param); + void nfp_net_dev_interrupt_delayed_handler(void *param); + int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c -index 8e81cc498f..1a9f7581a7 100644 +index 8e81cc498f..e1da0bdebe 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev.c -@@ -302,11 +302,13 @@ nfp_net_close(struct rte_eth_dev *dev) +@@ -132,6 +132,13 @@ nfp_net_start(struct rte_eth_dev *dev) + update = NFP_NET_CFG_UPDATE_MSIX; + } + ++ /* Checking MTU set */ ++ if (dev->data->mtu > hw->flbufsz) { ++ PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)", ++ dev->data->mtu, hw->flbufsz); ++ return -ERANGE; ++ } ++ + rte_intr_enable(intr_handle); + + new_ctrl = nfp_check_offloads(dev); +@@ -160,6 +167,8 @@ nfp_net_start(struct rte_eth_dev *dev) + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return -EIO; + ++ hw->ctrl = new_ctrl; ++ + /* + * Allocating rte mbufs for configured rx queues. + * This requires queues being enabled before +@@ -176,8 +185,6 @@ nfp_net_start(struct rte_eth_dev *dev) + nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 1); + +- hw->ctrl = new_ctrl; +- + return 0; + + error: +@@ -302,11 +309,13 @@ nfp_net_close(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; nfp_net_reset_tx_queue(this_tx_q); @@ -36568,18 +68115,102 @@ index 8e81cc498f..1a9f7581a7 100644 } /* Cancel possible impending LSC work here before releasing the port*/ -@@ -500,6 +502,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -500,6 +509,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; -+ hw->flbufsz = RTE_ETHER_MTU; ++ hw->flbufsz = DEFAULT_FLBUF_SIZE; /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) +@@ -585,6 +595,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + (void *)eth_dev); + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); ++ /* Unmask the LSC interrupt */ ++ nfp_net_irq_unmask(eth_dev); + /* Recording current stats counters values */ + nfp_net_stats_reset(eth_dev); + } +@@ -627,7 +639,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) + goto load_fw; + /* Then try the PCI name */ + snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, +- dev->device.name); ++ dev->name); + + PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); + if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) +@@ -662,7 +674,9 @@ nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp, + char card_desc[100]; + int err = 0; + +- nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); ++ nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno"); ++ if (nfp_fw_model == NULL) ++ nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno"); + + if (nfp_fw_model) { + PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model); +@@ -788,6 +802,7 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) + { + struct nfp_pf_dev *pf_dev = NULL; + struct nfp_cpp *cpp; ++ uint32_t cpp_id; + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; + struct nfp_eth_table *nfp_eth_table = NULL; +@@ -795,6 +810,7 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) + int total_ports; + int ret = -ENODEV; + int err; ++ uint32_t i; + + if (!pci_dev) + return ret; +@@ -831,6 +847,10 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) + goto hwinfo_cleanup; + } + ++ /* Force the physical port down to clear the possible DMA error */ ++ for (i = 0; i < nfp_eth_table->count; i++) ++ nfp_eth_set_configured(cpp, nfp_eth_table->ports[i].index, 0); ++ + if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { + PMD_INIT_LOG(ERR, "Error when uploading firmware"); + ret = -EIO; +@@ -892,7 +912,8 @@ static int nfp_pf_init(struct rte_pci_device *pci_dev) + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar); + + /* configure access to tx/rx vNIC BARs */ +- pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0, ++ cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); ++ pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, cpp_id, + NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, + &pf_dev->hwqueues_area); diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -index 303ef72b1b..0781f34764 100644 +index 303ef72b1b..0dda3961ce 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +@@ -122,6 +122,8 @@ nfp_netvf_start(struct rte_eth_dev *dev) + if (nfp_net_reconfig(hw, new_ctrl, update) < 0) + return -EIO; + ++ hw->ctrl = new_ctrl; ++ + /* + * Allocating rte mbufs for configured rx queues. + * This requires queues being enabled before +@@ -131,8 +133,6 @@ nfp_netvf_start(struct rte_eth_dev *dev) + goto error; + } + +- hw->ctrl = new_ctrl; +- + return 0; + + error: @@ -219,11 +219,13 @@ nfp_netvf_close(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; @@ -36598,14 +68229,74 @@ index 303ef72b1b..0781f34764 100644 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; -+ hw->flbufsz = RTE_ETHER_MTU; ++ hw->flbufsz = DEFAULT_FLBUF_SIZE; /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) +@@ -451,6 +454,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + (void *)eth_dev); + /* Telling the firmware about the LSC interrupt entry */ + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); ++ /* Unmask the LSC interrupt */ ++ nfp_net_irq_unmask(eth_dev); + /* Recording current stats counters values */ + nfp_net_stats_reset(eth_dev); + } diff --git a/dpdk/drivers/net/nfp/nfp_rxtx.c b/dpdk/drivers/net/nfp/nfp_rxtx.c -index 0fe1415596..335a90b2c9 100644 +index 0fe1415596..4fa608d417 100644 --- a/dpdk/drivers/net/nfp/nfp_rxtx.c +++ b/dpdk/drivers/net/nfp/nfp_rxtx.c +@@ -299,8 +299,9 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + struct rte_mbuf *new_mb; + uint16_t nb_hold; + uint64_t dma_addr; +- int avail; ++ uint16_t avail; + ++ avail = 0; + rxq = rx_queue; + if (unlikely(rxq == NULL)) { + /* +@@ -308,11 +309,10 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + * enabled. But the queue needs to be configured + */ + RTE_LOG_DP(ERR, PMD, "RX Bad queue\n"); +- return -EINVAL; ++ return avail; + } + + hw = rxq->hw; +- avail = 0; + nb_hold = 0; + + while (avail < nb_pkts) { +@@ -345,8 +345,6 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + break; + } + +- nb_hold++; +- + /* + * Grab the mbuf and refill the descriptor with the + * previously allocated mbuf +@@ -378,7 +376,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + hw->rx_offset, + rxq->mbuf_size - hw->rx_offset, + mb->data_len); +- return -EINVAL; ++ rte_pktmbuf_free(mb); ++ break; + } + + /* Filling the received mbuf with packet info */ +@@ -416,6 +415,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + rxds->fld.dd = 0; + rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; + rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; ++ nb_hold++; + + rxq->rd_p++; + if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/ @@ -470,6 +470,7 @@ nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) if (rxq) { @@ -36622,6 +68313,23 @@ index 0fe1415596..335a90b2c9 100644 rte_free(txq->txbufs); rte_free(txq); } +@@ -794,10 +796,14 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + static inline + uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq) + { ++ uint32_t free_desc; ++ + if (txq->wr_p >= txq->rd_p) +- return txq->tx_count - (txq->wr_p - txq->rd_p) - 8; ++ free_desc = txq->tx_count - (txq->wr_p - txq->rd_p); + else +- return txq->rd_p - txq->wr_p - 8; ++ free_desc = txq->rd_p - txq->wr_p - 8; ++ ++ return (free_desc > 8) ? (free_desc - 8) : 0; + } + + /* diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h index 0e03948ec7..394a7628e0 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h @@ -36653,8 +68361,38 @@ index 0e03948ec7..394a7628e0 100644 * is either an empty string or the first character * after the separating period. * @return The ME number on succes, -1 on error. +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +index 720d3989e6..61d36047e1 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp.h +@@ -34,6 +34,9 @@ struct nfp_cpp { + */ + uint32_t imb_cat_table[16]; + ++ /* MU access type bit offset */ ++ uint32_t mu_locality_lsb; ++ + int driver_lock_needed; + }; + +@@ -363,7 +366,7 @@ struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, + */ + void nfp_cpp_area_release_free(struct nfp_cpp_area *area); + +-uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, ++uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id, + uint64_t addr, unsigned long size, + struct nfp_cpp_area **area); + /* +@@ -778,4 +781,6 @@ int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); + */ + int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); + ++uint32_t nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp); ++ + #endif /* !__NFP_CPP_H__ */ diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c -index bad80a5a1c..08bc4e8ef2 100644 +index bad80a5a1c..978c91f6b0 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c +++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c @@ -16,9 +16,6 @@ @@ -36667,11 +68405,108 @@ index bad80a5a1c..08bc4e8ef2 100644 #include <stdlib.h> #include <unistd.h> #include <stdint.h> +@@ -69,8 +66,8 @@ + #define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) + #define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +-#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \ +- (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4) ++#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(id, bar, slot) \ ++ (NFP_PCIE_BAR(id) + ((bar) * 8 + (slot)) * 4) + + #define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \ + (((bar) * 8 + (slot)) * 4) +@@ -117,6 +114,7 @@ struct nfp_pcie_user { + int secondary_lock; + char busdev[BUSDEV_SZ]; + int barsz; ++ int dev_id; + char *cfg; + }; + +@@ -258,7 +256,7 @@ nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar, + return (-ENOMEM); + + bar->csr = nfp->cfg + +- NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot); ++ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id, base, slot); + + *(uint32_t *)(bar->csr) = newcfg; + +@@ -328,10 +326,8 @@ nfp_enable_bars(struct nfp_pcie_user *nfp) + bar->base = 0; + bar->iomem = NULL; + bar->lock = 0; +- bar->csr = nfp->cfg + +- NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3, +- bar->index & 7); +- ++ bar->csr = nfp->cfg + NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(nfp->dev_id, ++ bar->index >> 3, bar->index & 7); + bar->iomem = nfp->cfg + (bar->index << bar->bitsize); + } + return 0; +@@ -846,6 +842,7 @@ nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev) + goto error; + + desc->cfg = (char *)dev->mem_resource[0].addr; ++ desc->dev_id = dev->addr.function & 0x7; + + nfp_enable_bars(desc); + diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c -index f91049383e..37799af558 100644 +index f91049383e..13d979633d 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c +++ b/dpdk/drivers/net/nfp/nfpcore/nfp_cppcore.c -@@ -202,7 +202,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest, +@@ -118,6 +118,36 @@ nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) + return cpp_area->name; + } + ++#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) ++#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE RTE_BIT32(12) ++ ++static int ++nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp) ++{ ++ int ret; ++ int mode; ++ int addr40; ++ uint32_t imbcppat; ++ ++ imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU]; ++ mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); ++ addr40 = imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE; ++ ++ ret = nfp_cppat_mu_locality_lsb(mode, addr40); ++ if (ret < 0) ++ return ret; ++ ++ cpp->mu_locality_lsb = ret; ++ ++ return 0; ++} ++ ++uint32_t ++nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp) ++{ ++ return cpp->mu_locality_lsb; ++} ++ + /* + * nfp_cpp_area_alloc - allocate a new CPP area + * @cpp: CPP handle +@@ -142,10 +172,6 @@ nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest, + if (!cpp) + return NULL; + +- /* CPP bus uses only a 40-bit address */ +- if ((address + size) > (1ULL << 40)) +- return NFP_ERRPTR(EFAULT); +- + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) +@@ -202,7 +228,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest, * @address: start address on CPP target * @size: size of area * @@ -36680,6 +68515,62 @@ index f91049383e..37799af558 100644 * that it can be accessed directly. * * NOTE: @address and @size must be 32-bit aligned values. +@@ -588,6 +614,12 @@ nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed) + } + } + ++ err = nfp_cpp_set_mu_locality_lsb(cpp); ++ if (err < 0) { ++ free(cpp); ++ return NULL; ++ } ++ + return cpp; + } + +@@ -819,8 +851,7 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model) + /* + * nfp_cpp_map_area() - Helper function to map an area + * @cpp: NFP CPP handler +- * @domain: CPP domain +- * @target: CPP target ++ * @cpp_id: CPP ID + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (output) +@@ -831,16 +862,13 @@ __nfp_cpp_model_autodetect(struct nfp_cpp *cpp, uint32_t *model) + * Return: Pointer to memory mapped area or ERR_PTR + */ + uint8_t * +-nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr, ++nfp_cpp_map_area(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr, + unsigned long size, struct nfp_cpp_area **area) + { + uint8_t *res; +- uint32_t dest; +- +- dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); + +- *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size); +- if (!*area) ++ *area = nfp_cpp_area_alloc_acquire(cpp, cpp_id, addr, size); ++ if (*area == NULL) + goto err_eio; + + res = nfp_cpp_area_iomem(*area); +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c +index c0516bf8e8..9f848bde79 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_hwinfo.c +@@ -108,7 +108,7 @@ nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) + goto exit_free; + + header = (void *)db; +- printf("NFP HWINFO header: %08x\n", *(uint32_t *)header); ++ printf("NFP HWINFO header: %#08x\n", *(uint32_t *)header); + if (nfp_hwinfo_is_updating(header)) + goto exit_free; + diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h b/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h index c9c7b0d0fb..e74cdeb191 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp_nsp.h @@ -36707,10 +68598,110 @@ index dd41fa4de4..7b5630fd86 100644 void nfp_resource_release(struct nfp_resource *res) diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c b/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c -index cb7d83db51..2feca2ed81 100644 +index cb7d83db51..2c6c639367 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c +++ b/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c -@@ -236,7 +236,7 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) +@@ -232,11 +232,107 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) + return NULL; + } + ++static uint64_t ++nfp_rtsym_size(const struct nfp_rtsym *sym) ++{ ++ switch (sym->type) { ++ case NFP_RTSYM_TYPE_NONE: ++ return 0; ++ case NFP_RTSYM_TYPE_OBJECT: /* Fall through */ ++ case NFP_RTSYM_TYPE_FUNCTION: ++ return sym->size; ++ case NFP_RTSYM_TYPE_ABS: ++ return sizeof(uint64_t); ++ default: ++ return 0; ++ } ++} ++ ++static int ++nfp_rtsym_to_dest(struct nfp_cpp *cpp, ++ const struct nfp_rtsym *sym, ++ uint8_t action, ++ uint8_t token, ++ uint64_t offset, ++ uint32_t *cpp_id, ++ uint64_t *addr) ++{ ++ if (sym->type != NFP_RTSYM_TYPE_OBJECT) ++ return -EINVAL; ++ ++ *addr = sym->addr + offset; ++ ++ if (sym->target >= 0) { ++ *cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token, sym->domain); ++ } else if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) { ++ int locality_off = nfp_cpp_mu_locality_lsb(cpp); ++ ++ *addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); ++ *addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; ++ ++ *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token, ++ sym->domain); ++ } else { ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++nfp_rtsym_readl(struct nfp_cpp *cpp, ++ const struct nfp_rtsym *sym, ++ uint8_t action, ++ uint8_t token, ++ uint64_t offset, ++ uint32_t *value) ++{ ++ int ret; ++ uint64_t addr; ++ uint32_t cpp_id; ++ ++ if (offset + 4 > nfp_rtsym_size(sym)) ++ return -ENXIO; ++ ++ ret = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); ++ if (ret != 0) ++ return ret; ++ ++ return nfp_cpp_readl(cpp, cpp_id, addr, value); ++} ++ ++static int ++nfp_rtsym_readq(struct nfp_cpp *cpp, ++ const struct nfp_rtsym *sym, ++ uint8_t action, ++ uint8_t token, ++ uint64_t offset, ++ uint64_t *value) ++{ ++ int ret; ++ uint64_t addr; ++ uint32_t cpp_id; ++ ++ if (offset + 8 > nfp_rtsym_size(sym)) ++ return -ENXIO; ++ ++ if (sym->type == NFP_RTSYM_TYPE_ABS) { ++ *value = sym->addr; ++ return 0; ++ } ++ ++ ret = nfp_rtsym_to_dest(cpp, sym, action, token, offset, &cpp_id, &addr); ++ if (ret != 0) ++ return ret; ++ ++ return nfp_cpp_readq(cpp, cpp_id, addr, value); ++} ++ + /* * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol * @rtbl: NFP RTsym table * @name: Symbol name @@ -36719,6 +68710,71 @@ index cb7d83db51..2feca2ed81 100644 * * Lookup a symbol, map, read it and return it's value. Value of the symbol * will be interpreted as a simple little-endian unsigned value. Symbol can +@@ -248,7 +344,7 @@ uint64_t + nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) + { + const struct nfp_rtsym *sym; +- uint32_t val32, id; ++ uint32_t val32; + uint64_t val; + int err; + +@@ -258,19 +354,17 @@ nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) + goto exit; + } + +- id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); +- + #ifdef DEBUG + printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n", + name, sym->size, sym->addr); + #endif + switch (sym->size) { + case 4: +- err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); ++ err = nfp_rtsym_readl(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val32); + val = val32; + break; + case 8: +- err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); ++ err = nfp_rtsym_readq(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, &val); + break; + default: + printf("rtsym '%s' unsupported size: %" PRId64 "\n", +@@ -295,8 +389,11 @@ uint8_t * + nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + unsigned int min_size, struct nfp_cpp_area **area) + { +- const struct nfp_rtsym *sym; ++ int ret; + uint8_t *mem; ++ uint64_t addr; ++ uint32_t cpp_id; ++ const struct nfp_rtsym *sym; + + #ifdef DEBUG + printf("mapping symbol %s\n", name); +@@ -307,14 +404,18 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, + return NULL; + } + ++ ret = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, ++ &cpp_id, &addr); ++ if (ret != 0) ++ return NULL; ++ + if (sym->size < min_size) { + printf("Symbol %s too small (%" PRIu64 " < %u)\n", name, + sym->size, min_size); + return NULL; + } + +- mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr, +- sym->size, area); ++ mem = nfp_cpp_map_area(rtbl->cpp, cpp_id, addr, sym->size, area); + if (!mem) { + printf("Failed to map symbol %s\n", name); + return NULL; diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_target.h b/dpdk/drivers/net/nfp/nfpcore/nfp_target.h index 2884a0034f..e8dcc9ad1e 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp_target.h @@ -36839,7 +68895,7 @@ index 61b0d82bfb..d74c9f7b54 100644 hw->mac.led_off = ngbe_mac_led_off_dummy; hw->mac.set_rar = ngbe_mac_set_rar_dummy; diff --git a/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c b/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c -index f9a876e9bd..6375ee9b29 100644 +index f9a876e9bd..104501fa7a 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c +++ b/dpdk/drivers/net/ngbe/base/ngbe_eeprom.c @@ -20,8 +20,6 @@ s32 ngbe_init_eeprom_params(struct ngbe_hw *hw) @@ -36882,32 +68938,56 @@ index f9a876e9bd..6375ee9b29 100644 /* * this release is particularly important because our attempts * above to get the semaphore may have succeeded, and if there -@@ -134,13 +128,12 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { +@@ -111,38 +105,6 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) + status = 0; + } + +- /* Now get the semaphore between SW/FW through the SWESMBI bit */ +- if (status == 0) { +- for (i = 0; i < timeout; i++) { +- /* Set the SW EEPROM semaphore bit to request access */ +- wr32m(hw, NGBE_MNGSWSYNC, +- NGBE_MNGSWSYNC_REQ, NGBE_MNGSWSYNC_REQ); +- +- /* +- * If we set the bit successfully then we got the +- * semaphore. +- */ +- swsm = rd32(hw, NGBE_MNGSWSYNC); +- if (swsm & NGBE_MNGSWSYNC_REQ) +- break; +- +- usec_delay(50); +- } +- +- /* +- * Release semaphores and return error if SW EEPROM semaphore +- * was not granted because we don't have access to the EEPROM +- */ +- if (i >= timeout) { - DEBUGOUT("SWESMBI Software EEPROM semaphore not granted.\n"); -+ DEBUGOUT("SWESMBI Software EEPROM semaphore not granted."); - ngbe_release_eeprom_semaphore(hw); - status = NGBE_ERR_EEPROM; - } - } else { +- ngbe_release_eeprom_semaphore(hw); +- status = NGBE_ERR_EEPROM; +- } +- } else { - DEBUGOUT("Software semaphore SMBI between device drivers " - "not granted.\n"); -+ DEBUGOUT("Software semaphore SMBI between device drivers not granted."); - } - +- } +- return status; -@@ -154,8 +147,6 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) + } + +@@ -154,9 +116,6 @@ s32 ngbe_get_eeprom_semaphore(struct ngbe_hw *hw) **/ void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw) { - DEBUGFUNC("ngbe_release_eeprom_semaphore"); - - wr32m(hw, NGBE_MNGSWSYNC, NGBE_MNGSWSYNC_REQ, 0); +- wr32m(hw, NGBE_MNGSWSYNC, NGBE_MNGSWSYNC_REQ, 0); wr32m(hw, NGBE_SWSEM, NGBE_SWSEM_PF, 0); ngbe_flush(hw); -@@ -276,7 +267,6 @@ s32 ngbe_validate_eeprom_checksum_em(struct ngbe_hw *hw, + } +@@ -276,7 +235,6 @@ s32 ngbe_validate_eeprom_checksum_em(struct ngbe_hw *hw, u32 eeprom_cksum_devcap = 0; int err = 0; @@ -36915,7 +68995,7 @@ index f9a876e9bd..6375ee9b29 100644 UNREFERENCED_PARAMETER(checksum_val); /* Check EEPROM only once */ -@@ -315,8 +305,6 @@ s32 ngbe_save_eeprom_version(struct ngbe_hw *hw) +@@ -315,8 +273,6 @@ s32 ngbe_save_eeprom_version(struct ngbe_hw *hw) u32 etrack_id = 0; u32 offset = (hw->rom.sw_addr + NGBE_EEPROM_VERSION_L) << 1; @@ -38284,9 +70364,18 @@ index 8db0f9ce48..40f1725f61 100644 *link_up = false; *speed = NGBE_LINK_SPEED_UNKNOWN; diff --git a/dpdk/drivers/net/ngbe/base/ngbe_regs.h b/dpdk/drivers/net/ngbe/base/ngbe_regs.h -index 872b008c46..640e385990 100644 +index 872b008c46..c0e79a2ba7 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_regs.h +++ b/dpdk/drivers/net/ngbe/base/ngbe_regs.h +@@ -525,7 +525,7 @@ enum ngbe_5tuple_protocol { + #define NGBE_PSRCTL_LBENA MS(18, 0x1) + #define NGBE_FRMSZ 0x015020 + #define NGBE_FRMSZ_MAX_MASK MS(0, 0xFFFF) +-#define NGBE_FRMSZ_MAX(v) LS(v, 0, 0xFFFF) ++#define NGBE_FRMSZ_MAX(v) LS((v) + 4, 0, 0xFFFF) + #define NGBE_VLANCTL 0x015088 + #define NGBE_VLANCTL_TPID_MASK MS(0, 0xFFFF) + #define NGBE_VLANCTL_TPID(v) LS(v, 0, 0xFFFF) @@ -785,30 +785,30 @@ enum ngbe_5tuple_protocol { #define NGBE_MACRXERRCRCH 0x01192C #define NGBE_MACRXERRLENL 0x011978 @@ -38369,12 +70458,15 @@ index 872b008c46..640e385990 100644 break; diff --git a/dpdk/drivers/net/ngbe/base/ngbe_type.h b/dpdk/drivers/net/ngbe/base/ngbe_type.h -index 12847b7272..4c995e7397 100644 +index 12847b7272..8f87398c17 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_type.h +++ b/dpdk/drivers/net/ngbe/base/ngbe_type.h -@@ -11,9 +11,15 @@ +@@ -9,11 +9,17 @@ + #define NGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ + #define NGBE_FRAME_SIZE_MAX (9728) /* Maximum frame size, +FCS */ - #define NGBE_FRAME_SIZE_DFT (1522) /* Default frame size, +FCS */ +-#define NGBE_FRAME_SIZE_DFT (1522) /* Default frame size, +FCS */ ++#define NGBE_FRAME_SIZE_DFT (1518) /* Default frame size, +FCS */ #define NGBE_NUM_POOL (32) +#define NGBE_PBRXSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ +#define NGBE_PBTXSIZE_MAX 0x00005000 /* 20KB Packet Buffer */ @@ -38388,7 +70480,74 @@ index 12847b7272..4c995e7397 100644 #define NGBE_ALIGN 128 /* as intel did */ #define NGBE_ISB_SIZE 16 -@@ -269,6 +275,9 @@ struct ngbe_mac_info { +@@ -104,6 +110,46 @@ struct ngbe_fc_info { + enum ngbe_fc_mode requested_mode; /* FC mode requested by caller */ + }; + ++/* Flow Control Data Sheet defined values ++ * Calculation and defines taken from 802.1bb Annex O ++ */ ++/* BitTimes (BT) conversion */ ++#define NGBE_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) ++#define NGBE_B2BT(BT) ((BT) * 8) ++ ++/* Calculate Delay to respond to PFC */ ++#define NGBE_PFC_D 672 ++ ++/* Calculate Cable Delay */ ++#define NGBE_CABLE_DC 5556 /* Delay Copper */ ++ ++/* Calculate Interface Delay */ ++#define NGBE_PHY_D 12800 ++#define NGBE_MAC_D 4096 ++#define NGBE_XAUI_D (2 * 1024) ++ ++#define NGBE_ID (NGBE_MAC_D + NGBE_XAUI_D + NGBE_PHY_D) ++ ++/* Calculate Delay incurred from higher layer */ ++#define NGBE_HD 6144 ++ ++/* Calculate PCI Bus delay for low thresholds */ ++#define NGBE_PCI_DELAY 10000 ++ ++/* Calculate delay value in bit times */ ++#define NGBE_DV(_max_frame_link, _max_frame_tc) \ ++ ((36 * \ ++ (NGBE_B2BT(_max_frame_link) + \ ++ NGBE_PFC_D + \ ++ (2 * NGBE_CABLE_DC) + \ ++ (2 * NGBE_ID) + \ ++ NGBE_HD) / 25 + 1) + \ ++ 2 * NGBE_B2BT(_max_frame_tc)) ++ ++#define NGBE_LOW_DV(_max_frame_tc) \ ++ (2 * ((2 * NGBE_B2BT(_max_frame_tc) + \ ++ (36 * NGBE_PCI_DELAY / 25) + 1))) ++ + /* Statistics counters collected by the MAC */ + /* PB[] RxTx */ + struct ngbe_pb_stats { +@@ -130,9 +176,8 @@ struct ngbe_hw_stats { + u64 mng_bmc2host_packets; + u64 mng_host2bmc_packets; + /* Basix RxTx */ +- u64 rx_drop_packets; +- u64 tx_drop_packets; + u64 rx_dma_drop; ++ u64 tx_dma_drop; + u64 tx_secdrp_packets; + u64 rx_packets; + u64 tx_packets; +@@ -158,7 +203,7 @@ struct ngbe_hw_stats { + u64 rx_length_errors; + u64 rx_undersize_errors; + u64 rx_fragment_errors; +- u64 rx_oversize_errors; ++ u64 rx_oversize_cnt; + u64 rx_jabber_errors; + u64 rx_l3_l4_xsum_error; + u64 mac_local_errors; +@@ -269,6 +314,9 @@ struct ngbe_mac_info { s32 (*get_link_capabilities)(struct ngbe_hw *hw, u32 *speed, bool *autoneg); @@ -38398,7 +70557,7 @@ index 12847b7272..4c995e7397 100644 /* LED */ s32 (*led_on)(struct ngbe_hw *hw, u32 index); s32 (*led_off)(struct ngbe_hw *hw, u32 index); -@@ -311,6 +320,7 @@ struct ngbe_mac_info { +@@ -311,6 +359,7 @@ struct ngbe_mac_info { u32 mcft_size; u32 vft_size; u32 num_rar_entries; @@ -38407,27 +70566,43 @@ index 12847b7272..4c995e7397 100644 u32 max_rx_queues; bool get_link_status; diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -index 981592f7f4..b930326379 100644 +index 981592f7f4..3038694042 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c +++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -@@ -89,7 +89,6 @@ static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); +@@ -89,8 +89,8 @@ static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static void ngbe_dev_interrupt_handler(void *param); -static void ngbe_dev_interrupt_delayed_handler(void *param); static void ngbe_configure_msix(struct rte_eth_dev *dev); ++static void ngbe_pbthresh_set(struct rte_eth_dev *dev); #define NGBE_SET_HWSTRIP(h, q) do {\ -@@ -165,6 +164,8 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ +@@ -161,10 +161,15 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { + HW_XSTAT(tx_total_packets), + HW_XSTAT(rx_total_missed_packets), + HW_XSTAT(rx_broadcast_packets), ++ HW_XSTAT(tx_broadcast_packets), + HW_XSTAT(rx_multicast_packets), ++ HW_XSTAT(tx_multicast_packets), HW_XSTAT(rx_management_packets), HW_XSTAT(tx_management_packets), HW_XSTAT(rx_management_dropped), + HW_XSTAT(rx_dma_drop), ++ HW_XSTAT(tx_dma_drop), + HW_XSTAT(tx_secdrp_packets), /* Basic Error */ HW_XSTAT(rx_crc_errors), -@@ -180,6 +181,12 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { +@@ -174,12 +179,18 @@ static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = { + HW_XSTAT(rx_length_errors), + HW_XSTAT(rx_undersize_errors), + HW_XSTAT(rx_fragment_errors), +- HW_XSTAT(rx_oversize_errors), ++ HW_XSTAT(rx_oversize_cnt), + HW_XSTAT(rx_jabber_errors), + HW_XSTAT(rx_l3_l4_xsum_error), HW_XSTAT(mac_local_errors), HW_XSTAT(mac_remote_errors), @@ -38440,7 +70615,7 @@ index 981592f7f4..b930326379 100644 /* MACSEC */ HW_XSTAT(tx_macsec_pkts_untagged), HW_XSTAT(tx_macsec_pkts_encrypted), -@@ -356,6 +363,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -356,6 +367,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; /* Vendor and Device ID need to be set before init of shared code */ @@ -38448,7 +70623,7 @@ index 981592f7f4..b930326379 100644 hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->sub_system_id = pci_dev->id.subsystem_device_id; -@@ -943,12 +951,14 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -943,12 +955,14 @@ ngbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -38464,7 +70639,7 @@ index 981592f7f4..b930326379 100644 /* reinitialize adapter, this calls reset and start */ hw->nb_rx_queues = dev->data->nb_rx_queues; -@@ -959,6 +969,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -959,6 +973,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) hw->mac.start_hw(hw); hw->mac.get_link_status = true; @@ -38473,7 +70648,7 @@ index 981592f7f4..b930326379 100644 /* configure PF module if SRIOV enabled */ ngbe_pf_host_configure(dev); -@@ -983,7 +995,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -983,7 +999,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) } } @@ -38482,15 +70657,16 @@ index 981592f7f4..b930326379 100644 ngbe_configure_msix(dev); /* initialize transmission unit */ -@@ -1004,6 +1016,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -1004,6 +1020,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) goto error; } + hw->mac.setup_pba(hw); ++ ngbe_pbthresh_set(dev); ngbe_configure_port(dev); err = ngbe_dev_rxtx_start(dev); -@@ -1037,7 +1050,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -1037,7 +1055,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL) allowed_speeds |= RTE_ETH_LINK_SPEED_10M; @@ -38499,7 +70675,12 @@ index 981592f7f4..b930326379 100644 PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } -@@ -1131,6 +1144,8 @@ ngbe_dev_stop(struct rte_eth_dev *dev) +@@ -1127,10 +1145,12 @@ ngbe_dev_stop(struct rte_eth_dev *dev) + int vf; + + if (hw->adapter_stopped) +- return 0; ++ goto out; PMD_INIT_FUNC_TRACE(); @@ -38508,7 +70689,7 @@ index 981592f7f4..b930326379 100644 if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP || (hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) { /* gpio0 is used to power on/off control*/ -@@ -1169,6 +1184,8 @@ ngbe_dev_stop(struct rte_eth_dev *dev) +@@ -1169,11 +1189,17 @@ ngbe_dev_stop(struct rte_eth_dev *dev) rte_intr_efd_disable(intr_handle); rte_intr_vec_list_free(intr_handle); @@ -38517,7 +70698,25 @@ index 981592f7f4..b930326379 100644 adapter->rss_reta_updated = 0; hw->adapter_stopped = true; -@@ -1197,6 +1214,8 @@ ngbe_dev_close(struct rte_eth_dev *dev) + dev->data->dev_started = 0; + ++out: ++ /* close phy to prevent reset in dev_close from restarting physical link */ ++ hw->phy.set_phy_power(hw, false); ++ + return 0; + } + +@@ -1191,12 +1217,17 @@ ngbe_dev_close(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + ngbe_pf_reset_hw(hw); + + ngbe_dev_stop(dev); ngbe_dev_free_queues(dev); @@ -38526,7 +70725,36 @@ index 981592f7f4..b930326379 100644 /* reprogram the RAR[0] in case user changed it. */ ngbe_set_rar(hw, 0, hw->mac.addr, 0, true); -@@ -1800,6 +1819,24 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +@@ -1331,9 +1362,8 @@ ngbe_read_stats_registers(struct ngbe_hw *hw, + hw_stats->rx_xoff_packets += rd32(hw, NGBE_PBRXLNKXOFF); + + /* DMA Stats */ +- hw_stats->rx_drop_packets += rd32(hw, NGBE_DMARXDROP); +- hw_stats->tx_drop_packets += rd32(hw, NGBE_DMATXDROP); + hw_stats->rx_dma_drop += rd32(hw, NGBE_DMARXDROP); ++ hw_stats->tx_dma_drop += rd32(hw, NGBE_DMATXDROP); + hw_stats->tx_secdrp_packets += rd32(hw, NGBE_DMATXSECDROP); + hw_stats->rx_packets += rd32(hw, NGBE_DMARXPKT); + hw_stats->tx_packets += rd32(hw, NGBE_DMATXPKT); +@@ -1370,7 +1400,7 @@ ngbe_read_stats_registers(struct ngbe_hw *hw, + rd64(hw, NGBE_MACTX1024TOMAXL); + + hw_stats->rx_undersize_errors += rd64(hw, NGBE_MACRXERRLENL); +- hw_stats->rx_oversize_errors += rd32(hw, NGBE_MACRXOVERSIZE); ++ hw_stats->rx_oversize_cnt += rd32(hw, NGBE_MACRXOVERSIZE); + hw_stats->rx_jabber_errors += rd32(hw, NGBE_MACRXJABBER); + + /* MNG Stats */ +@@ -1469,7 +1499,7 @@ ngbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + hw_stats->rx_mac_short_packet_dropped + + hw_stats->rx_length_errors + + hw_stats->rx_undersize_errors + +- hw_stats->rx_oversize_errors + ++ hw_stats->rdb_drp_cnt + + hw_stats->rx_illegal_byte_errors + + hw_stats->rx_error_bytes + + hw_stats->rx_fragment_errors; +@@ -1800,6 +1830,24 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } @@ -38551,7 +70779,7 @@ index 981592f7f4..b930326379 100644 /* return 0 means link status changed, -1 means not changed */ int ngbe_dev_link_update_share(struct rte_eth_dev *dev, -@@ -1837,8 +1874,16 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1837,8 +1885,16 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, return rte_eth_linkstatus_set(dev, &link); } @@ -38569,7 +70797,16 @@ index 981592f7f4..b930326379 100644 intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG; link.link_status = RTE_ETH_LINK_UP; -@@ -2061,9 +2106,6 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +@@ -1874,6 +1930,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, + wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, + NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); + } ++ wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, ++ NGBE_MACRXFLT_PROMISC); + } + + return rte_eth_linkstatus_set(dev, &link); +@@ -2061,9 +2119,6 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ngbe_hw *hw = ngbe_dev_hw(dev); struct ngbe_interrupt *intr = ngbe_dev_intr(dev); @@ -38579,7 +70816,7 @@ index 981592f7f4..b930326379 100644 /* read-on-clear nic registers here */ eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; PMD_DRV_LOG(DEBUG, "eicr %x", eicr); -@@ -2083,6 +2125,8 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +@@ -2083,6 +2138,8 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) if (eicr & NGBE_ICRMISC_GPIO) intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE; @@ -38588,7 +70825,7 @@ index 981592f7f4..b930326379 100644 return 0; } -@@ -2135,7 +2179,6 @@ static int +@@ -2135,7 +2192,6 @@ static int ngbe_dev_interrupt_action(struct rte_eth_dev *dev) { struct ngbe_interrupt *intr = ngbe_dev_intr(dev); @@ -38596,7 +70833,7 @@ index 981592f7f4..b930326379 100644 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); -@@ -2151,31 +2194,11 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) +@@ -2151,31 +2207,11 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) rte_eth_linkstatus_get(dev, &link); ngbe_dev_link_update(dev, 0); @@ -38632,7 +70869,7 @@ index 981592f7f4..b930326379 100644 } PMD_DRV_LOG(DEBUG, "enable intr immediately"); -@@ -2184,53 +2207,6 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) +@@ -2184,53 +2220,6 @@ ngbe_dev_interrupt_action(struct rte_eth_dev *dev) return 0; } @@ -38686,7 +70923,125 @@ index 981592f7f4..b930326379 100644 /** * Interrupt handler triggered by NIC for handling * specific interrupt. -@@ -2641,7 +2617,7 @@ ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, +@@ -2362,6 +2351,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + return -EIO; + } + ++/* Additional bittime to account for NGBE framing */ ++#define NGBE_ETH_FRAMING 20 ++ ++/* ++ * ngbe_fc_hpbthresh_set - calculate high water mark for flow control ++ * ++ * @dv_id: device interface delay ++ * @pb: packet buffer to calculate ++ */ ++static s32 ++ngbe_fc_hpbthresh_set(struct rte_eth_dev *dev) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); ++ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); ++ u32 max_frame_size, tc, dv_id, rx_pb; ++ s32 kb, marker; ++ ++ /* Calculate max LAN frame size */ ++ max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); ++ tc = max_frame_size + NGBE_ETH_FRAMING; ++ ++ /* Calculate delay value for device */ ++ dv_id = NGBE_DV(tc, tc); ++ ++ /* Loopback switch introduces additional latency */ ++ if (pci_dev->max_vfs) ++ dv_id += NGBE_B2BT(tc); ++ ++ /* Delay value is calculated in bit times convert to KB */ ++ kb = NGBE_BT2KB(dv_id); ++ rx_pb = rd32(hw, NGBE_PBRXSIZE) >> 10; ++ ++ marker = rx_pb - kb; ++ ++ /* It is possible that the packet buffer is not large enough ++ * to provide required headroom. In this case throw an error ++ * to user and do the best we can. ++ */ ++ if (marker < 0) { ++ PMD_DRV_LOG(WARNING, "Packet Buffer can not provide enough headroom to support flow control."); ++ marker = tc + 1; ++ } ++ ++ return marker; ++} ++ ++/* ++ * ngbe_fc_lpbthresh_set - calculate low water mark for flow control ++ * ++ * @dv_id: device interface delay ++ */ ++static s32 ++ngbe_fc_lpbthresh_set(struct rte_eth_dev *dev) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); ++ u32 max_frame_size, tc, dv_id; ++ s32 kb; ++ ++ /* Calculate max LAN frame size */ ++ max_frame_size = rd32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK); ++ tc = max_frame_size + NGBE_ETH_FRAMING; ++ ++ /* Calculate delay value for device */ ++ dv_id = NGBE_LOW_DV(tc); ++ ++ /* Delay value is calculated in bit times convert to KB */ ++ kb = NGBE_BT2KB(dv_id); ++ ++ return kb; ++} ++ ++/* ++ * ngbe_pbthresh_setup - calculate and setup high low water marks ++ */ ++static void ++ngbe_pbthresh_set(struct rte_eth_dev *dev) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); ++ ++ hw->fc.high_water = ngbe_fc_hpbthresh_set(dev); ++ hw->fc.low_water = ngbe_fc_lpbthresh_set(dev); ++ ++ /* Low water marks must not be larger than high water marks */ ++ if (hw->fc.low_water > hw->fc.high_water) ++ hw->fc.low_water = 0; ++} ++ + int + ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, +@@ -2481,7 +2557,7 @@ static int + ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + { + struct ngbe_hw *hw = ngbe_dev_hw(dev); +- uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4; ++ uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + struct rte_eth_dev_data *dev_data = dev->data; + + /* If device is started, refuse mtu that requires the support of +@@ -2494,12 +2570,8 @@ ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EINVAL; + } + +- if (hw->mode) +- wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, +- NGBE_FRAME_SIZE_MAX); +- else +- wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, +- NGBE_FRMSZ_MAX(frame_size)); ++ wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, ++ NGBE_FRMSZ_MAX(frame_size)); + + return 0; + } +@@ -2641,7 +2713,7 @@ ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, wr32(hw, NGBE_IVARMISC, tmp); } else { /* rx or tx causes */ @@ -38695,7 +71050,7 @@ index 981592f7f4..b930326379 100644 idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, NGBE_IVAR(queue >> 1)); tmp &= ~(0xFF << idx); -@@ -2893,7 +2869,7 @@ ngbe_timesync_disable(struct rte_eth_dev *dev) +@@ -2893,7 +2965,7 @@ ngbe_timesync_disable(struct rte_eth_dev *dev) /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0); @@ -38746,6 +71101,249 @@ index 7f9c04fb0e..12a18de31d 100644 hw->mac.clear_vmdq(hw, 0, BIT_MASK32); /* clear VMDq map to scan rar 31 */ +diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c +index 86a5ef5486..5df4186c72 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c ++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c +@@ -24,15 +24,11 @@ + + /* Bit Mask to indicate what bits required for building Tx context */ + static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM | +- RTE_MBUF_F_TX_OUTER_IPV6 | +- RTE_MBUF_F_TX_OUTER_IPV4 | + RTE_MBUF_F_TX_IPV6 | + RTE_MBUF_F_TX_IPV4 | + RTE_MBUF_F_TX_VLAN | + RTE_MBUF_F_TX_L4_MASK | + RTE_MBUF_F_TX_TCP_SEG | +- RTE_MBUF_F_TX_TUNNEL_MASK | +- RTE_MBUF_F_TX_OUTER_IP_CKSUM | + NGBE_TX_IEEE1588_TMST); + + #define NGBE_TX_OFFLOAD_NOTSUP_MASK \ +@@ -333,34 +329,15 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq, + } + + vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1); +- +- if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { +- tx_offload_mask.outer_tun_len |= ~0; +- tx_offload_mask.outer_l2_len |= ~0; +- tx_offload_mask.outer_l3_len |= ~0; +- tx_offload_mask.l2_len |= ~0; +- tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1); +- tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2); +- +- switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { +- case RTE_MBUF_F_TX_TUNNEL_IPIP: +- /* for non UDP / GRE tunneling, set to 0b */ +- break; +- default: +- PMD_TX_LOG(ERR, "Tunnel type not supported"); +- return; +- } +- vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len); +- } else { +- tunnel_seed = 0; +- vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len); +- } ++ vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len); + + if (ol_flags & RTE_MBUF_F_TX_VLAN) { + tx_offload_mask.vlan_tci |= ~0; + vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci); + } + ++ tunnel_seed = 0; ++ + txq->ctx_cache[ctx_idx].flags = ol_flags; + txq->ctx_cache[ctx_idx].tx_offload.data[0] = + tx_offload_mask.data[0] & tx_offload.data[0]; +@@ -449,16 +426,10 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) + return cmdtype; + } + +-static inline uint8_t +-tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) ++static inline uint32_t ++tx_desc_ol_flags_to_ptype(uint64_t oflags) + { +- bool tun; +- +- if (ptype) +- return ngbe_encode_ptype(ptype); +- +- /* Only support flags in NGBE_TX_OFFLOAD_MASK */ +- tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK); ++ uint32_t ptype; + + /* L2 level */ + ptype = RTE_PTYPE_L2_ETHER; +@@ -466,41 +437,36 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + ptype |= RTE_PTYPE_L2_ETHER_VLAN; + + /* L3 level */ +- if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM)) +- ptype |= RTE_PTYPE_L3_IPV4; +- else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6)) +- ptype |= RTE_PTYPE_L3_IPV6; +- + if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM)) +- ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4); ++ ptype |= RTE_PTYPE_L3_IPV4; + else if (oflags & (RTE_MBUF_F_TX_IPV6)) +- ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6); ++ ptype |= RTE_PTYPE_L3_IPV6; + + /* L4 level */ + switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) { + case RTE_MBUF_F_TX_TCP_CKSUM: +- ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); ++ ptype |= RTE_PTYPE_L4_TCP; + break; + case RTE_MBUF_F_TX_UDP_CKSUM: +- ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP); ++ ptype |= RTE_PTYPE_L4_UDP; + break; + case RTE_MBUF_F_TX_SCTP_CKSUM: +- ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP); ++ ptype |= RTE_PTYPE_L4_SCTP; + break; + } + + if (oflags & RTE_MBUF_F_TX_TCP_SEG) +- ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); +- +- /* Tunnel */ +- switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) { +- case RTE_MBUF_F_TX_TUNNEL_IPIP: +- case RTE_MBUF_F_TX_TUNNEL_IP: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_IP; +- break; +- } ++ ptype |= RTE_PTYPE_L4_TCP; ++ ++ return ptype; ++} ++ ++static inline uint8_t ++tx_desc_ol_flags_to_ptid(uint64_t oflags) ++{ ++ uint32_t ptype; ++ ++ ptype = tx_desc_ol_flags_to_ptype(oflags); + + return ngbe_encode_ptype(ptype); + } +@@ -622,16 +588,12 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + /* If hardware offload required */ + tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK; + if (tx_ol_req) { +- tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req, +- tx_pkt->packet_type); ++ tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req); + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.vlan_tci = tx_pkt->vlan_tci; + tx_offload.tso_segsz = tx_pkt->tso_segsz; +- tx_offload.outer_l2_len = tx_pkt->outer_l2_len; +- tx_offload.outer_l3_len = tx_pkt->outer_l3_len; +- tx_offload.outer_tun_len = 0; + + /* If new context need be built or reuse the exist ctx*/ + ctx = what_ctx_update(txq, tx_ol_req, tx_offload); +@@ -752,10 +714,6 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + pkt_len -= (tx_offload.l2_len + + tx_offload.l3_len + tx_offload.l4_len); +- pkt_len -= +- (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) +- ? tx_offload.outer_l2_len + +- tx_offload.outer_l3_len : 0; + } + + /* +@@ -1939,12 +1897,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_UDP_TSO | +- RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IP_TNL_TSO | +- RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS; + + if (hw->is_pf) +@@ -2237,6 +2191,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_KEEP_CRC | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | ++ RTE_ETH_RX_OFFLOAD_RSS_HASH | + RTE_ETH_RX_OFFLOAD_SCATTER; + + if (hw->is_pf) +@@ -2460,6 +2415,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) + if (txq != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -2469,6 +2425,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) + if (rxq != NULL) { + ngbe_rx_queue_release_mbufs(rxq); + ngbe_reset_rx_queue(adapter, rxq); ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + } +diff --git a/dpdk/drivers/net/null/rte_eth_null.c b/dpdk/drivers/net/null/rte_eth_null.c +index ca03469d0e..bdaca02d86 100644 +--- a/dpdk/drivers/net/null/rte_eth_null.c ++++ b/dpdk/drivers/net/null/rte_eth_null.c +@@ -186,21 +186,36 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) + static int + eth_dev_start(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + if (dev == NULL) + return -EINVAL; + + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + + static int + eth_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + if (dev == NULL) + return 0; + + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/dpdk/drivers/net/octeontx/octeontx_ethdev.c index 4f1e368c61..f879a0c9fc 100644 --- a/dpdk/drivers/net/octeontx/octeontx_ethdev.c @@ -38883,6 +71481,38 @@ index 0716beb9b1..85e14a998f 100644 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); do { +diff --git a/dpdk/drivers/net/octeontx_ep/otx_ep_ethdev.c b/dpdk/drivers/net/octeontx_ep/otx_ep_ethdev.c +index c3cec6d833..132429998b 100644 +--- a/dpdk/drivers/net/octeontx_ep/otx_ep_ethdev.c ++++ b/dpdk/drivers/net/octeontx_ep/otx_ep_ethdev.c +@@ -75,6 +75,11 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev) + + otx_ep_info("dev started\n"); + ++ for (q = 0; q < eth_dev->data->nb_rx_queues; q++) ++ eth_dev->data->rx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (q = 0; q < eth_dev->data->nb_tx_queues; q++) ++ eth_dev->data->tx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -83,9 +88,15 @@ static int + otx_ep_dev_stop(struct rte_eth_dev *eth_dev) + { + struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev); ++ uint16_t i; + + otx_epvf->fn_list.disable_io_queues(otx_epvf); + ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/octeontx_ep/otx_ep_vf.c b/dpdk/drivers/net/octeontx_ep/otx_ep_vf.c index c9b91fef9e..96366b2a7f 100644 --- a/dpdk/drivers/net/octeontx_ep/otx_ep_vf.c @@ -38897,10 +71527,49 @@ index c9b91fef9e..96366b2a7f 100644 do { diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c -index 047010e15e..ebb5d1ae0e 100644 +index 047010e15e..c5158bbf31 100644 --- a/dpdk/drivers/net/pfe/pfe_ethdev.c +++ b/dpdk/drivers/net/pfe/pfe_ethdev.c -@@ -769,7 +769,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) +@@ -257,6 +257,7 @@ pfe_eth_open(struct rte_eth_dev *dev) + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct hif_client_s *client; + struct hif_shm *hif_shm; ++ uint16_t i; + int rc; + + /* Register client driver with HIF */ +@@ -334,6 +335,10 @@ pfe_eth_open(struct rte_eth_dev *dev) + PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); + } + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + + err0: + return rc; +@@ -377,6 +382,7 @@ static int + pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) + { + struct pfe_eth_priv_s *priv = dev->data->dev_private; ++ uint16_t i; + + dev->data->dev_started = 0; + +@@ -386,6 +392,11 @@ pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) + dev->rx_pkt_burst = &pfe_dummy_recv_pkts; + dev->tx_pkt_burst = &pfe_dummy_xmit_pkts; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +@@ -769,7 +780,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) if (eth_dev == NULL) return -ENOMEM; @@ -39006,6 +71675,41 @@ index c5b5399282..9ea579bfc8 100644 #define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size) #define OSAL_MEMSET(dst, val, length) \ memset(dst, val, length) +diff --git a/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c b/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c +index 6a52f32cc9..4e4d1dc374 100644 +--- a/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c ++++ b/dpdk/drivers/net/qede/base/ecore_init_fw_funcs.c +@@ -1416,7 +1416,7 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, + u32 i; \ + for (i = 0; i < (arr_size); i++) \ + ecore_wr(dev, ptt, ((addr) + (4 * i)), \ +- ((u32 *)&(arr))[i]); \ ++ ((u32 *)(arr))[i]); \ + } while (0) + + #ifndef DWORDS_TO_BYTES +diff --git a/dpdk/drivers/net/qede/base/ecore_int.c b/dpdk/drivers/net/qede/base/ecore_int.c +index 2c4aac9418..d9faf6bfcd 100644 +--- a/dpdk/drivers/net/qede/base/ecore_int.c ++++ b/dpdk/drivers/net/qede/base/ecore_int.c +@@ -366,7 +366,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, + + tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) +- DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); ++ DP_NOTICE(p_hwfn, false, "ICPL error - %08x\n", tmp); + + tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { +@@ -378,7 +378,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); + + DP_NOTICE(p_hwfn, false, +- "ICPL erorr - %08x [Address %08x:%08x]\n", ++ "ICPL error - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); + } + diff --git a/dpdk/drivers/net/qede/qede_debug.c b/dpdk/drivers/net/qede/qede_debug.c index 2297d245c4..18f2d988fb 100644 --- a/dpdk/drivers/net/qede/qede_debug.c @@ -39048,7 +71752,7 @@ index 2297d245c4..18f2d988fb 100644 */ OSAL_VFREE(p_hwfn, feature->dump_buf); diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c -index 3e9aaeecd3..2a3123f0c8 100644 +index 3e9aaeecd3..153a540dbe 100644 --- a/dpdk/drivers/net/qede/qede_ethdev.c +++ b/dpdk/drivers/net/qede/qede_ethdev.c @@ -358,7 +358,7 @@ qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) @@ -39060,7 +71764,15 @@ index 3e9aaeecd3..2a3123f0c8 100644 qdev->ops = qed_ops; } -@@ -2338,7 +2338,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +@@ -2142,6 +2142,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + rss_params.rss_enable = 1; + } + ++ rss_params.update_rss_ind_table = 1; + rss_params.update_rss_config = 1; + /* tbl_size has to be set with capabilities */ + rss_params.rss_table_size_log = 7; +@@ -2338,7 +2339,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) if (fp->rxq != NULL) { bufsz = (uint16_t)rte_pktmbuf_data_room_size( fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; @@ -39478,6 +72190,41 @@ index 0b99a8d6fe..937d339fb8 100644 sizeof(caps)); /* Update bulletin of all future possible VFs with link configuration */ +diff --git a/dpdk/drivers/net/ring/rte_eth_ring.c b/dpdk/drivers/net/ring/rte_eth_ring.c +index db10f035df..d32424aa53 100644 +--- a/dpdk/drivers/net/ring/rte_eth_ring.c ++++ b/dpdk/drivers/net/ring/rte_eth_ring.c +@@ -109,15 +109,30 @@ eth_dev_start(struct rte_eth_dev *dev) + static int + eth_dev_stop(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_started = 0; + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + } + + static int + eth_dev_set_link_down(struct rte_eth_dev *dev) + { ++ uint16_t i; ++ + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/sfc/sfc.c b/dpdk/drivers/net/sfc/sfc.c index ed714fe02f..2cead4e045 100644 --- a/dpdk/drivers/net/sfc/sfc.c @@ -39505,10 +72252,18 @@ index d4cd162541..da2d1603cf 100644 entry->type == SFC_DP_TX ? "Tx" : "unknown", diff --git a/dpdk/drivers/net/sfc/sfc_dp_rx.h b/dpdk/drivers/net/sfc/sfc_dp_rx.h -index 760540ba22..246adbd87c 100644 +index 760540ba22..8a504bdcf1 100644 --- a/dpdk/drivers/net/sfc/sfc_dp_rx.h +++ b/dpdk/drivers/net/sfc/sfc_dp_rx.h -@@ -158,7 +158,7 @@ typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id, +@@ -69,6 +69,7 @@ struct sfc_dp_rx_qcreate_info { + /** Receive queue flags initializer */ + unsigned int flags; + #define SFC_RXQ_FLAG_RSS_HASH 0x1 ++#define SFC_RXQ_FLAG_INGRESS_MPORT 0x2 + + /** Rx queue size */ + unsigned int rxq_entries; +@@ -158,7 +159,7 @@ typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id, struct sfc_dp_rxq **dp_rxqp); /** @@ -39517,7 +72272,7 @@ index 760540ba22..246adbd87c 100644 */ typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq); -@@ -191,7 +191,7 @@ typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq, +@@ -191,7 +192,7 @@ typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq, /** * Receive queue purge function called after queue flush. * @@ -39540,10 +72295,20 @@ index 5e2052d142..e81847e75a 100644 static inline void sfc_ef100_evq_prime(volatile void *evq_prime, unsigned int evq_hw_index, diff --git a/dpdk/drivers/net/sfc/sfc_ef100_rx.c b/dpdk/drivers/net/sfc/sfc_ef100_rx.c -index 5d16bf281d..45253ed7dc 100644 +index 5d16bf281d..969878bb28 100644 --- a/dpdk/drivers/net/sfc/sfc_ef100_rx.c +++ b/dpdk/drivers/net/sfc/sfc_ef100_rx.c -@@ -851,7 +851,7 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr, +@@ -810,6 +810,9 @@ sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id, + if (rxq->nic_dma_info->nb_regions > 0) + rxq->flags |= SFC_EF100_RXQ_NIC_DMA_MAP; + ++ if (info->flags & SFC_RXQ_FLAG_INGRESS_MPORT) ++ rxq->flags |= SFC_EF100_RXQ_INGRESS_MPORT; ++ + sfc_ef100_rx_debug(rxq, "RxQ doorbell is %p", rxq->doorbell); + + *dp_rxqp = &rxq->dp; +@@ -851,7 +854,7 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr, unsup_rx_prefix_fields = efx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout); @@ -39552,6 +72317,42 @@ index 5d16bf281d..45253ed7dc 100644 if ((unsup_rx_prefix_fields & ((1U << EFX_RX_PREFIX_FIELD_LENGTH) | (1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0) +@@ -876,11 +879,18 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr, + else + rxq->flags &= ~SFC_EF100_RXQ_USER_MARK; + ++ ++ /* ++ * At the moment, this feature is used only ++ * by the representor proxy Rx queue and is ++ * essential for representor support, so if ++ * it has been requested but is unsupported, ++ * point this inconsistency out to the user. ++ */ + if ((unsup_rx_prefix_fields & +- (1U << EFX_RX_PREFIX_FIELD_INGRESS_MPORT)) == 0) +- rxq->flags |= SFC_EF100_RXQ_INGRESS_MPORT; +- else +- rxq->flags &= ~SFC_EF100_RXQ_INGRESS_MPORT; ++ (1U << EFX_RX_PREFIX_FIELD_INGRESS_MPORT)) && ++ (rxq->flags & SFC_EF100_RXQ_INGRESS_MPORT)) ++ return ENOTSUP; + + rxq->prefix_size = pinfo->erpl_length; + rxq->rearm_data = sfc_ef100_mk_mbuf_rearm_data(rxq->dp.dpq.port_id, +diff --git a/dpdk/drivers/net/sfc/sfc_ef100_tx.c b/dpdk/drivers/net/sfc/sfc_ef100_tx.c +index 4c2205f7a4..1b6374775f 100644 +--- a/dpdk/drivers/net/sfc/sfc_ef100_tx.c ++++ b/dpdk/drivers/net/sfc/sfc_ef100_tx.c +@@ -405,7 +405,7 @@ sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq *txq, + m->l2_len + m->l3_len) >> 1; + } + +- rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova_default(m), ++ rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m), + rte_pktmbuf_data_len(m), &dma_addr); + if (unlikely(rc != 0)) + return rc; diff --git a/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c b/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c index 712c207617..78bd430363 100644 --- a/dpdk/drivers/net/sfc/sfc_ef10_essb_rx.c @@ -39579,7 +72380,7 @@ index 821e2227bb..412254e3d7 100644 } diff --git a/dpdk/drivers/net/sfc/sfc_ethdev.c b/dpdk/drivers/net/sfc/sfc_ethdev.c -index d4210b63dd..184f6e7c67 100644 +index d4210b63dd..5059422ed3 100644 --- a/dpdk/drivers/net/sfc/sfc_ethdev.c +++ b/dpdk/drivers/net/sfc/sfc_ethdev.c @@ -94,7 +94,6 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) @@ -39602,6 +72403,15 @@ index d4210b63dd..184f6e7c67 100644 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) { uint64_t rte_hf = 0; unsigned int i; +@@ -2059,7 +2053,7 @@ sfc_process_mport_journal_cb(void *data, efx_mport_desc_t *mport, + struct sfc_mport_journal_ctx *ctx = data; + + if (ctx == NULL || ctx->sa == NULL) { +- sfc_err(ctx->sa, "received NULL context or SFC adapter"); ++ SFC_GENERIC_LOG(ERR, "received NULL context or SFC adapter"); + return EINVAL; + } + diff --git a/dpdk/drivers/net/sfc/sfc_flow.c b/dpdk/drivers/net/sfc/sfc_flow.c index fc74c8035e..509fde4a86 100644 --- a/dpdk/drivers/net/sfc/sfc_flow.c @@ -39663,6 +72473,189 @@ index ab67aa9237..ddddefad7b 100644 * interrupts: LSC (link status change) and RXQ (receive indication). * It allows to register interrupt callback for entire device which is * not intended to be used for receive indication (i.e. link status +diff --git a/dpdk/drivers/net/sfc/sfc_mae.c b/dpdk/drivers/net/sfc/sfc_mae.c +index b34c9afd5b..eb97524dc3 100644 +--- a/dpdk/drivers/net/sfc/sfc_mae.c ++++ b/dpdk/drivers/net/sfc/sfc_mae.c +@@ -281,8 +281,10 @@ sfc_mae_attach(struct sfc_adapter *sa) + bounce_eh->buf_size = limits.eml_encap_header_size_limit; + bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh", + bounce_eh->buf_size, 0); +- if (bounce_eh->buf == NULL) ++ if (bounce_eh->buf == NULL) { ++ rc = ENOMEM; + goto fail_mae_alloc_bounce_eh; ++ } + + mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios; + mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios; +@@ -1180,6 +1182,8 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa, + } + + if (fw_rsrc->refcnt == 1) { ++ efx_mae_action_set_clear_fw_rsrc_ids(action_set->spec); ++ + rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id); + if (rc == 0) { + sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x", +@@ -4131,12 +4135,10 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, + break; + case SFC_FT_RULE_GROUP: + /* +- * Packets that go to the rule's AR have FT mark set (from the +- * JUMP rule OR's RECIRC_ID). Remove this mark in matching +- * packets. The user may have provided their own action +- * MARK above, so don't check the return value here. ++ * Packets that go to the rule's AR have FT mark set (from ++ * the JUMP rule OR's RECIRC_ID). Reset the mark to zero. + */ +- (void)efx_mae_action_set_populate_mark(ctx.spec, 0); ++ efx_mae_action_set_populate_mark_reset(ctx.spec); + + ctx.ft_group_hit_counter = &spec_mae->ft->group_hit_counter; + break; +@@ -4144,8 +4146,25 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, + SFC_ASSERT(B_FALSE); + } + ++ /* ++ * A DPDK flow entry must specify a fate action, which the parser ++ * converts into a DELIVER action in a libefx action set. An ++ * attempt to replace the action in the action set should ++ * fail. If it succeeds then report an error, as the ++ * parsed flow entry did not contain a fate action. ++ */ ++ rc = efx_mae_action_set_populate_drop(ctx.spec); ++ if (rc == 0) { ++ rc = rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, NULL, ++ "no fate action found"); ++ goto fail_check_fate_action; ++ } ++ + spec_mae->action_set = sfc_mae_action_set_attach(sa, &ctx); + if (spec_mae->action_set != NULL) { ++ sfc_mae_mac_addr_del(sa, ctx.src_mac); ++ sfc_mae_mac_addr_del(sa, ctx.dst_mac); + sfc_mae_encap_header_del(sa, ctx.encap_header); + efx_mae_action_set_spec_fini(sa->nic, ctx.spec); + return 0; +@@ -4158,6 +4177,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, + return 0; + + fail_action_set_add: ++fail_check_fate_action: + fail_workaround_jump_delivery: + fail_nb_count: + sfc_mae_encap_header_del(sa, ctx.encap_header); +diff --git a/dpdk/drivers/net/sfc/sfc_repr.c b/dpdk/drivers/net/sfc/sfc_repr.c +index 9d88d554c1..2a60f1cc31 100644 +--- a/dpdk/drivers/net/sfc/sfc_repr.c ++++ b/dpdk/drivers/net/sfc/sfc_repr.c +@@ -9,6 +9,8 @@ + + #include <stdint.h> + ++#include <rte_flow_driver.h> ++#include <rte_flow.h> + #include <rte_mbuf.h> + #include <rte_ethdev.h> + #include <rte_malloc.h> +@@ -289,6 +291,7 @@ static int + sfc_repr_dev_start(struct rte_eth_dev *dev) + { + struct sfc_repr *sr = sfc_repr_by_eth_dev(dev); ++ uint16_t i; + int ret; + + sfcr_info(sr, "entry"); +@@ -300,6 +303,11 @@ sfc_repr_dev_start(struct rte_eth_dev *dev) + if (ret != 0) + goto fail_start; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + sfcr_info(sr, "done"); + + return 0; +@@ -364,6 +372,7 @@ static int + sfc_repr_dev_stop(struct rte_eth_dev *dev) + { + struct sfc_repr *sr = sfc_repr_by_eth_dev(dev); ++ uint16_t i; + int ret; + + sfcr_info(sr, "entry"); +@@ -378,6 +387,11 @@ sfc_repr_dev_stop(struct rte_eth_dev *dev) + + sfc_repr_unlock(sr); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + sfcr_info(sr, "done"); + + return 0; +@@ -533,6 +547,7 @@ sfc_repr_dev_infos_get(struct rte_eth_dev *dev, + + dev_info->device = dev->device; + ++ dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX; + dev_info->max_rx_queues = SFC_REPR_RXQ_MAX; + dev_info->max_tx_queues = SFC_REPR_TXQ_MAX; + dev_info->default_rxconf.rx_drop_en = 1; +@@ -839,6 +854,8 @@ sfc_repr_dev_close(struct rte_eth_dev *dev) + + (void)sfc_repr_proxy_del_port(srs->pf_port_id, srs->repr_id); + ++ sfc_mae_clear_switch_port(srs->switch_domain_id, srs->switch_port_id); ++ + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + dev->dev_ops = NULL; +@@ -882,6 +899,29 @@ sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + return 0; + } + ++static int ++sfc_repr_flow_pick_transfer_proxy(struct rte_eth_dev *dev, ++ uint16_t *transfer_proxy_port, ++ struct rte_flow_error *error) ++{ ++ struct sfc_repr_shared *srs = sfc_repr_shared_by_eth_dev(dev); ++ ++ return rte_flow_pick_transfer_proxy(srs->pf_port_id, ++ transfer_proxy_port, error); ++} ++ ++const struct rte_flow_ops sfc_repr_flow_ops = { ++ .pick_transfer_proxy = sfc_repr_flow_pick_transfer_proxy, ++}; ++ ++static int ++sfc_repr_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused, ++ const struct rte_flow_ops **ops) ++{ ++ *ops = &sfc_repr_flow_ops; ++ return 0; ++} ++ + static const struct eth_dev_ops sfc_repr_dev_ops = { + .dev_configure = sfc_repr_dev_configure, + .dev_start = sfc_repr_dev_start, +@@ -894,6 +934,7 @@ static const struct eth_dev_ops sfc_repr_dev_ops = { + .rx_queue_release = sfc_repr_rx_queue_release, + .tx_queue_setup = sfc_repr_tx_queue_setup, + .tx_queue_release = sfc_repr_tx_queue_release, ++ .flow_ops_get = sfc_repr_dev_flow_ops_get, + }; + + diff --git a/dpdk/drivers/net/sfc/sfc_repr_proxy.c b/dpdk/drivers/net/sfc/sfc_repr_proxy.c index 535b07ea52..8660d419a3 100644 --- a/dpdk/drivers/net/sfc/sfc_repr_proxy.c @@ -39708,7 +72701,7 @@ index 535b07ea52..8660d419a3 100644 } diff --git a/dpdk/drivers/net/sfc/sfc_rx.c b/dpdk/drivers/net/sfc/sfc_rx.c -index 7104284106..cd58d60a36 100644 +index 7104284106..d9a57ed80f 100644 --- a/dpdk/drivers/net/sfc/sfc_rx.c +++ b/dpdk/drivers/net/sfc/sfc_rx.c @@ -1057,7 +1057,7 @@ sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool) @@ -39720,7 +72713,17 @@ index 7104284106..cd58d60a36 100644 * size is odd, lost space is (nic_align_end - 1). More * accurate formula is below. */ -@@ -1702,7 +1702,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) +@@ -1221,6 +1221,9 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index, + else + rxq_info->rxq_flags = 0; + ++ if (rxq_info->type_flags & EFX_RXQ_FLAG_INGRESS_MPORT) ++ rxq_info->rxq_flags |= SFC_RXQ_FLAG_INGRESS_MPORT; ++ + rxq->buf_size = buf_size; + + rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_NIC_DMA_ADDR_RX_RING, +@@ -1702,7 +1705,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) /* * Finalize only ethdev queues since other ones are finalized only @@ -39729,7 +72732,7 @@ index 7104284106..cd58d60a36 100644 */ ethdev_qid = sas->ethdev_rxq_count; while (--ethdev_qid >= (int)nb_rx_queues) { -@@ -1775,7 +1775,7 @@ sfc_rx_configure(struct sfc_adapter *sa) +@@ -1775,7 +1778,7 @@ sfc_rx_configure(struct sfc_adapter *sa) reconfigure = true; @@ -39751,6 +72754,36 @@ index 70259660c0..81f5aa3cc4 100644 if (*cache == NULL) { rc = ENOMEM; goto fail_cache; +diff --git a/dpdk/drivers/net/sfc/sfc_switch.c b/dpdk/drivers/net/sfc/sfc_switch.c +index 5c10e8fc74..8f1ee97fa8 100644 +--- a/dpdk/drivers/net/sfc/sfc_switch.c ++++ b/dpdk/drivers/net/sfc/sfc_switch.c +@@ -489,6 +489,7 @@ sfc_mae_clear_switch_port(uint16_t switch_domain_id, + uint16_t switch_port_id) + { + struct sfc_mae_switch_domain *domain; ++ struct sfc_mae_switch_port *port; + + rte_spinlock_lock(&sfc_mae_switch.lock); + +@@ -504,6 +505,17 @@ sfc_mae_clear_switch_port(uint16_t switch_domain_id, + domain->mae_admin_port = NULL; + } + ++ TAILQ_FOREACH(port, &domain->ports, switch_domain_ports) { ++ if (port->id == switch_port_id) { ++ /* ++ * Invalidate the field to prevent wrong ++ * look-ups from flow rule handling path. ++ */ ++ port->ethdev_port_id = RTE_MAX_ETHPORTS; ++ break; ++ } ++ } ++ + rte_spinlock_unlock(&sfc_mae_switch.lock); + return 0; + } diff --git a/dpdk/drivers/net/sfc/sfc_tx.c b/dpdk/drivers/net/sfc/sfc_tx.c index 0dccf21f7c..f376f24f7b 100644 --- a/dpdk/drivers/net/sfc/sfc_tx.c @@ -39786,6 +72819,50 @@ index 0dccf21f7c..f376f24f7b 100644 */ ethdev_qid = sas->ethdev_txq_count; while (--ethdev_qid >= (int)nb_tx_queues) { +diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic.c b/dpdk/drivers/net/softnic/rte_eth_softnic.c +index 8c098cad5b..009cc911f4 100644 +--- a/dpdk/drivers/net/softnic/rte_eth_softnic.c ++++ b/dpdk/drivers/net/softnic/rte_eth_softnic.c +@@ -164,6 +164,7 @@ pmd_dev_start(struct rte_eth_dev *dev) + { + struct pmd_internals *p = dev->data->dev_private; + int status; ++ uint16_t i; + + /* Firmware */ + status = softnic_cli_script_process(p, +@@ -176,6 +177,11 @@ pmd_dev_start(struct rte_eth_dev *dev) + /* Link UP */ + dev->data->dev_link.link_status = RTE_ETH_LINK_UP; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return 0; + } + +@@ -183,6 +189,7 @@ static int + pmd_dev_stop(struct rte_eth_dev *dev) + { + struct pmd_internals *p = dev->data->dev_private; ++ uint16_t i; + + /* Link DOWN */ + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; +@@ -201,6 +208,11 @@ pmd_dev_stop(struct rte_eth_dev *dev) + tm_hierarchy_free(p); + softnic_mtr_free(p); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c b/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c index ca70eab678..ad96288e7e 100644 --- a/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c @@ -39800,7 +72877,7 @@ index ca70eab678..ad96288e7e 100644 * Question: are the two masks equivalent? * diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c -index f1b48cae82..e020a2417b 100644 +index f1b48cae82..29b4860656 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c @@ -67,6 +67,7 @@ @@ -39820,7 +72897,53 @@ index f1b48cae82..e020a2417b 100644 static void tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum, uint32_t *l4_raw_cksum) -@@ -880,11 +881,49 @@ tap_link_set_up(struct rte_eth_dev *dev) +@@ -544,7 +545,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, + { + void *l3_hdr = packet + l2_len; + +- if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) { ++ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { + struct rte_ipv4_hdr *iph = l3_hdr; + uint16_t cksum; + +@@ -627,16 +628,25 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, + + nb_segs = mbuf->nb_segs; + if (txq->csum && +- ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) || ++ ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM || + (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM || + (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) { ++ unsigned int l4_len = 0; ++ + is_cksum = 1; + ++ if ((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == ++ RTE_MBUF_F_TX_UDP_CKSUM) ++ l4_len = sizeof(struct rte_udp_hdr); ++ else if ((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == ++ RTE_MBUF_F_TX_TCP_CKSUM) ++ l4_len = sizeof(struct rte_tcp_hdr); ++ + /* Support only packets with at least layer 4 + * header included in the first segment + */ + seg_len = rte_pktmbuf_data_len(mbuf); +- l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len; ++ l234_hlen = mbuf->l2_len + mbuf->l3_len + l4_len; + if (seg_len < l234_hlen) + return -1; + +@@ -646,7 +656,7 @@ tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs, + rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *), + l234_hlen); + tap_tx_l3_cksum(m_copy, mbuf->ol_flags, +- mbuf->l2_len, mbuf->l3_len, mbuf->l4_len, ++ mbuf->l2_len, mbuf->l3_len, l4_len, + &l4_cksum, &l4_phdr_cksum, + &l4_raw_cksum); + iovecs[k].iov_base = m_copy; +@@ -880,11 +890,49 @@ tap_link_set_up(struct rte_eth_dev *dev) return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE); } @@ -39870,7 +72993,7 @@ index f1b48cae82..e020a2417b 100644 err = tap_intr_handle_set(dev, 1); if (err) return err; -@@ -901,6 +940,34 @@ tap_dev_start(struct rte_eth_dev *dev) +@@ -901,6 +949,34 @@ tap_dev_start(struct rte_eth_dev *dev) return err; } @@ -39905,7 +73028,7 @@ index f1b48cae82..e020a2417b 100644 /* This function gets called when the current port gets stopped. */ static int -@@ -1084,6 +1151,9 @@ tap_dev_close(struct rte_eth_dev *dev) +@@ -1084,6 +1160,9 @@ tap_dev_close(struct rte_eth_dev *dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) { rte_free(dev->process_private); @@ -39915,7 +73038,7 @@ index f1b48cae82..e020a2417b 100644 return 0; } -@@ -1135,6 +1205,8 @@ tap_dev_close(struct rte_eth_dev *dev) +@@ -1135,6 +1214,8 @@ tap_dev_close(struct rte_eth_dev *dev) TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u", tuntap_types[internals->type], rte_socket_id()); @@ -39924,7 +73047,7 @@ index f1b48cae82..e020a2417b 100644 if (internals->ioctl_sock != -1) { close(internals->ioctl_sock); internals->ioctl_sock = -1; -@@ -2099,8 +2171,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, +@@ -2099,8 +2180,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name, close(pmd->ioctl_sock); /* mac_addrs must not be freed alone because part of dev_private */ dev->data->mac_addrs = NULL; @@ -39934,7 +73057,56 @@ index f1b48cae82..e020a2417b 100644 error_exit_nodev: TAP_LOG(ERR, "%s Unable to initialize %s", -@@ -2445,6 +2517,16 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) +@@ -2167,29 +2248,6 @@ set_remote_iface(const char *key __rte_unused, + return 0; + } + +-static int parse_user_mac(struct rte_ether_addr *user_mac, +- const char *value) +-{ +- unsigned int index = 0; +- char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL; +- +- if (user_mac == NULL || value == NULL) +- return 0; +- +- strlcpy(mac_temp, value, sizeof(mac_temp)); +- mac_byte = strtok(mac_temp, ":"); +- +- while ((mac_byte != NULL) && +- (strlen(mac_byte) <= 2) && +- (strlen(mac_byte) == strspn(mac_byte, +- ETH_TAP_CMP_MAC_FMT))) { +- user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16); +- mac_byte = strtok(NULL, ":"); +- } +- +- return index; +-} +- + static int + set_mac_type(const char *key __rte_unused, + const char *value, +@@ -2203,15 +2261,15 @@ set_mac_type(const char *key __rte_unused, + if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) { + static int iface_idx; + +- /* fixed mac = 00:64:74:61:70:<iface_idx> */ +- memcpy((char *)user_mac->addr_bytes, "\0dtap", ++ /* fixed mac = 02:64:74:61:70:<iface_idx> */ ++ memcpy((char *)user_mac->addr_bytes, "\002dtap", + RTE_ETHER_ADDR_LEN); + user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] = + iface_idx++ + '0'; + goto success; + } + +- if (parse_user_mac(user_mac, value) != 6) ++ if (rte_ether_unformat_addr(value, user_mac) < 0) + goto error; + success: + TAP_LOG(DEBUG, "TAP user MAC param (%s)", value); +@@ -2445,6 +2503,16 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) ret = tap_mp_attach_queues(name, eth_dev); if (ret != 0) return -1; @@ -39973,8 +73145,3269 @@ index 98f6a76011..15283f8917 100644 * License string that must be acknowledged by the kernel * * @return +diff --git a/dpdk/drivers/net/tap/tap_bpf_insns.h b/dpdk/drivers/net/tap/tap_bpf_insns.h +index 1a91bbad13..53fa76c4e6 100644 +--- a/dpdk/drivers/net/tap/tap_bpf_insns.h ++++ b/dpdk/drivers/net/tap/tap_bpf_insns.h +@@ -1,10 +1,10 @@ + /* SPDX-License-Identifier: BSD-3-Clause +- * Copyright 2017 Mellanox Technologies, Ltd ++ * Auto-generated from tap_bpf_program.c ++ * This not the original source file. Do NOT edit it. + */ + + #include <tap_bpf.h> + +-/* bpf_insn array matching cls_q section. See tap_bpf_program.c file */ + static struct bpf_insn cls_q_insns[] = { + {0x61, 2, 1, 52, 0x00000000}, + {0x18, 3, 0, 0, 0xdeadbeef}, +@@ -23,18 +23,17 @@ static struct bpf_insn cls_q_insns[] = { + {0x95, 0, 0, 0, 0x00000000}, + }; + +-/* bpf_insn array matching l3_l4 section. see tap_bpf_program.c file */ + static struct bpf_insn l3_l4_hash_insns[] = { + {0xbf, 7, 1, 0, 0x00000000}, +- {0x61, 8, 7, 16, 0x00000000}, +- {0x61, 6, 7, 76, 0x00000000}, ++ {0x61, 6, 7, 16, 0x00000000}, ++ {0x61, 8, 7, 76, 0x00000000}, + {0x61, 9, 7, 80, 0x00000000}, + {0x18, 1, 0, 0, 0xdeadbeef}, + {0x00, 0, 0, 0, 0x00000000}, + {0x63, 10, 1, -4, 0x00000000}, + {0xbf, 2, 10, 0, 0x00000000}, + {0x07, 2, 0, 0, 0xfffffffc}, +- {0x18, 1, 1, 0, 0x0000cafe}, ++ {0x18, 1, 0, 0, 0x00000000}, + {0x00, 0, 0, 0, 0x00000000}, + {0x85, 0, 0, 0, 0x00000001}, + {0x55, 0, 0, 21, 0x00000000}, +@@ -58,7 +57,7 @@ static struct bpf_insn l3_l4_hash_insns[] = { + {0x07, 1, 0, 0, 0xffffffd0}, + {0xb7, 2, 0, 0, 0x00000023}, + {0x85, 0, 0, 0, 0x00000006}, +- {0x05, 0, 0, 1632, 0x00000000}, ++ {0x05, 0, 0, 1680, 0x00000000}, + {0xb7, 1, 0, 0, 0x0000000e}, + {0x61, 2, 7, 20, 0x00000000}, + {0x15, 2, 0, 10, 0x00000000}, +@@ -66,1630 +65,1678 @@ static struct bpf_insn l3_l4_hash_insns[] = { + {0x55, 2, 0, 8, 0x0000a888}, + {0xbf, 2, 7, 0, 0x00000000}, + {0xb7, 7, 0, 0, 0x00000000}, +- {0xbf, 1, 6, 0, 0x00000000}, ++ {0xbf, 1, 8, 0, 0x00000000}, + {0x07, 1, 0, 0, 0x00000012}, +- {0x2d, 1, 9, 1622, 0x00000000}, ++ {0x2d, 1, 9, 1670, 0x00000000}, + {0xb7, 1, 0, 0, 0x00000012}, +- {0x69, 8, 6, 16, 0x00000000}, ++ {0x69, 6, 8, 16, 0x00000000}, + {0xbf, 7, 2, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x0000ffff}, + {0x7b, 10, 7, -56, 0x00000000}, +- {0x57, 8, 0, 0, 0x0000ffff}, +- {0x15, 8, 0, 409, 0x0000dd86}, ++ {0x15, 6, 0, 443, 0x0000dd86}, + {0xb7, 7, 0, 0, 0x00000003}, +- {0x55, 8, 0, 1614, 0x00000008}, +- {0x0f, 6, 1, 0, 0x00000000}, ++ {0x55, 6, 0, 1662, 0x00000008}, ++ {0x0f, 8, 1, 0, 0x00000000}, + {0xb7, 7, 0, 0, 0x00000000}, +- {0xbf, 1, 6, 0, 0x00000000}, ++ {0xbf, 1, 8, 0, 0x00000000}, + {0x07, 1, 0, 0, 0x00000018}, +- {0x2d, 1, 9, 1609, 0x00000000}, +- {0x71, 3, 6, 12, 0x00000000}, +- {0xbf, 1, 3, 0, 0x00000000}, +- {0x67, 1, 0, 0, 0x00000038}, +- {0xc7, 1, 0, 0, 0x00000020}, +- {0x77, 1, 0, 0, 0x0000001f}, +- {0x57, 1, 0, 0, 0x2cc681d1}, +- {0x67, 3, 0, 0, 0x00000018}, ++ {0x2d, 1, 9, 1657, 0x00000000}, ++ {0xb7, 1, 0, 0, 0x00000000}, ++ {0x71, 3, 8, 12, 0x00000000}, ++ {0x71, 2, 8, 9, 0x00000000}, ++ {0x15, 2, 0, 1, 0x00000011}, ++ {0x55, 2, 0, 21, 0x00000006}, ++ {0x71, 2, 8, 7, 0x00000000}, ++ {0x71, 4, 8, 6, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x67, 5, 0, 0, 0x00000008}, ++ {0x57, 5, 0, 0, 0x00001f00}, ++ {0x4f, 5, 2, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x4f, 4, 5, 0, 0x00000000}, ++ {0x55, 4, 0, 12, 0x00000000}, ++ {0xbf, 2, 8, 0, 0x00000000}, ++ {0x07, 2, 0, 0, 0x00000014}, ++ {0x71, 4, 2, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000018}, ++ {0x71, 1, 2, 1, 0x00000000}, ++ {0x67, 1, 0, 0, 0x00000010}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 4, 2, 3, 0x00000000}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 2, 2, 2, 0x00000000}, ++ {0x67, 2, 0, 0, 0x00000008}, ++ {0x4f, 1, 2, 0, 0x00000000}, + {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x40000000}, ++ {0x67, 4, 0, 0, 0x00000038}, ++ {0xc7, 4, 0, 0, 0x00000038}, ++ {0xb7, 2, 0, 0, 0x00000000}, ++ {0x65, 4, 0, 1, 0xffffffff}, ++ {0xb7, 7, 0, 0, 0x2cc681d1}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x598d03a2}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb31a0745}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x66340e8a}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xcc681d15}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000004}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x98d03a2b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000002}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x31a07456}, ++ {0x71, 4, 8, 13, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6340e8ad}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc681d15b}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d03a2b7}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1a07456f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x340e8ade}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x681d15bd}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd03a2b7b}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa07456f6}, ++ {0x71, 3, 8, 14, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x40e8aded}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000038}, ++ {0xc7, 4, 0, 0, 0x00000038}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x81d15bdb}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x03a2b7b7}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x07456f6f}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x0e8adedf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1d15bdbf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000004}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3a2b7b7e}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000002}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7456f6fd}, ++ {0x71, 4, 8, 15, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe8adedfa}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd15bdbf4}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa2b7b7e9}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x456f6fd3}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8adedfa7}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x15bdbf4f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x2b7b7e9e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x56f6fd3d}, ++ {0x71, 3, 8, 16, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xadedfa7b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000038}, ++ {0xc7, 4, 0, 0, 0x00000038}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5bdbf4f7}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb7b7e9ef}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6f6fd3df}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xdedfa7bf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xbdbf4f7f}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000004}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7b7e9eff}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000002}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf6fd3dff}, ++ {0x71, 4, 8, 17, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xedfa7bfe}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdbf4f7fc}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb7e9eff9}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6fd3dff2}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdfa7bfe5}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbf4f7fca}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7e9eff94}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfd3dff28}, ++ {0x71, 3, 8, 18, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfa7bfe51}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x67, 6, 0, 0, 0x00000038}, ++ {0xc7, 6, 0, 0, 0x00000038}, ++ {0xbf, 4, 5, 0, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xf4f7fca2}, ++ {0x6d, 2, 6, 1, 0x00000000}, ++ {0xbf, 4, 5, 0, 0x00000000}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000040}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xe9eff945}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000020}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd3dff28a}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000010}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xa7bfe514}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000008}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x4f7fca28}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x9eff9450}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x3dff28a0}, ++ {0x71, 5, 8, 19, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x7bfe5141}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000038}, ++ {0xc7, 3, 0, 0, 0x00000038}, ++ {0xbf, 7, 4, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf7fca283}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 7, 4, 0, 0x00000000}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xeff94506}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xdff28a0c}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xbfe51418}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7fca2831}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000004}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xff945063}, ++ {0xbf, 3, 5, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000002}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xff28a0c6}, ++ {0x57, 5, 0, 0, 0x00000001}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xfe51418c}, ++ {0xbf, 4, 1, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000020}, ++ {0xc7, 4, 0, 0, 0x00000020}, ++ {0xbf, 3, 7, 0, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xfca28319}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 3, 7, 0, 0x00000000}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x40000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf9450633}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x20000000}, ++ {0x79, 6, 10, -56, 0x00000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf28a0c67}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x10000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xe51418ce}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x08000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xca28319d}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x04000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x9450633b}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x02000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x28a0c676}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x01000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x51418ced}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00800000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xa28319db}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00400000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x450633b6}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00200000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8a0c676c}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00100000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x1418ced8}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00080000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x28319db1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00040000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x50633b63}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00020000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xa0c676c6}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00010000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x418ced8d}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00008000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8319db1a}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00004000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x0633b634}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00002000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x0c676c68}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00001000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x18ced8d1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000800}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x319db1a3}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000400}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x633b6347}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000200}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xc676c68f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000100}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8ced8d1f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000080}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x19db1a3e}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000040}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x33b6347d}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000020}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x676c68fa}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000010}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xced8d1f4}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000008}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x9db1a3e9}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000004}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x3b6347d2}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000002}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x76c68fa5}, ++ {0x57, 1, 0, 0, 0x00000001}, ++ {0x15, 1, 0, 1194, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xed8d1f4a}, ++ {0x05, 0, 0, 1192, 0x00000000}, ++ {0x0f, 8, 1, 0, 0x00000000}, ++ {0xb7, 7, 0, 0, 0x00000000}, ++ {0xbf, 1, 8, 0, 0x00000000}, ++ {0x07, 1, 0, 0, 0x0000002c}, ++ {0x2d, 1, 9, 1216, 0x00000000}, ++ {0x61, 2, 8, 8, 0x00000000}, ++ {0xdc, 2, 0, 0, 0x00000040}, ++ {0xc7, 2, 0, 0, 0x00000020}, ++ {0x71, 3, 8, 6, 0x00000000}, ++ {0x15, 3, 0, 2, 0x00000011}, ++ {0xb7, 1, 0, 0, 0x00000000}, ++ {0x55, 3, 0, 12, 0x00000006}, ++ {0xbf, 3, 8, 0, 0x00000000}, ++ {0x07, 3, 0, 0, 0x00000028}, ++ {0x71, 4, 3, 0, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000018}, ++ {0x71, 1, 3, 1, 0x00000000}, ++ {0x67, 1, 0, 0, 0x00000010}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 4, 3, 3, 0x00000000}, ++ {0x4f, 1, 4, 0, 0x00000000}, ++ {0x71, 3, 3, 2, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000008}, ++ {0x4f, 1, 3, 0, 0x00000000}, ++ {0xbf, 4, 2, 0, 0x00000000}, ++ {0x77, 4, 0, 0, 0x0000001f}, ++ {0x57, 4, 0, 0, 0x2cc681d1}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x40000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x598d03a2}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x20000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb31a0745}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x10000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x66340e8a}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x08000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xcc681d15}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x04000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x98d03a2b}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x02000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x31a07456}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x01000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x6340e8ad}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00800000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xc681d15b}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00400000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x8d03a2b7}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00200000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x1a07456f}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00100000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x340e8ade}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00080000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x681d15bd}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00040000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd03a2b7b}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00020000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xa07456f6}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00010000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x40e8aded}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00008000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x81d15bdb}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00004000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x03a2b7b7}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00002000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x07456f6f}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00001000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x0e8adedf}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000800}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x1d15bdbf}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000400}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x3a2b7b7e}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000200}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x7456f6fd}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000100}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xe8adedfa}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd15bdbf4}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xa2b7b7e9}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x456f6fd3}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x8adedfa7}, ++ {0xbf, 3, 2, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x15bdbf4f}, ++ {0x61, 3, 8, 12, 0x00000000}, ++ {0xbf, 5, 2, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x2b7b7e9e}, ++ {0xdc, 3, 0, 0, 0x00000040}, ++ {0xbf, 5, 2, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x56f6fd3d}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 2, 0, 0, 0x00000001}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xadedfa7b}, + {0xb7, 2, 0, 0, 0x00000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x598d03a2}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x5bdbf4f7}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x40000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb7b7e9ef}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x20000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb31a0745}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6f6fd3df}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x10000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x66340e8a}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdedfa7bf}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x08000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcc681d15}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbdbf4f7f}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x04000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x98d03a2b}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7b7e9eff}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x02000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x31a07456}, +- {0x57, 3, 0, 0, 0x01000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6340e8ad}, +- {0x71, 3, 6, 13, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf6fd3dff}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x01000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xedfa7bfe}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00800000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc681d15b}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdbf4f7fc}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00400000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d03a2b7}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb7e9eff9}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00200000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1a07456f}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6fd3dff2}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00100000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x340e8ade}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdfa7bfe5}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00080000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x681d15bd}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbf4f7fca}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00040000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd03a2b7b}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7e9eff94}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00020000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa07456f6}, +- {0x57, 3, 0, 0, 0x00010000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x40e8aded}, +- {0x71, 3, 6, 14, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfd3dff28}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00010000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfa7bfe51}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00008000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x81d15bdb}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf4f7fca2}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00004000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x03a2b7b7}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe9eff945}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00002000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x07456f6f}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd3dff28a}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00001000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0e8adedf}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa7bfe514}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000800}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1d15bdbf}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x4f7fca28}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000400}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3a2b7b7e}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x9eff9450}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000200}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7456f6fd}, +- {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe8adedfa}, +- {0x71, 3, 6, 15, 0x00000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3dff28a0}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000100}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7bfe5141}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000080}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd15bdbf4}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf7fca283}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000040}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa2b7b7e9}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xeff94506}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000020}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x456f6fd3}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdff28a0c}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000010}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8adedfa7}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xbfe51418}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000008}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x15bdbf4f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000004}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2b7b7e9e}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000002}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x56f6fd3d}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7fca2831}, ++ {0x61, 4, 8, 16, 0x00000000}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000004}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xff945063}, ++ {0xdc, 4, 0, 0, 0x00000040}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000002}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xff28a0c6}, ++ {0xc7, 4, 0, 0, 0x00000020}, + {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xadedfa7b}, +- {0x71, 4, 6, 16, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000038}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0xb7, 3, 0, 0, 0xffffffff}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5bdbf4f7}, +- {0x67, 4, 0, 0, 0x00000018}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7b7e9ef}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6f6fd3df}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdedfa7bf}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbdbf4f7f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7b7e9eff}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf6fd3dff}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xedfa7bfe}, +- {0x71, 4, 6, 17, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000010}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdbf4f7fc}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7e9eff9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6fd3dff2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdfa7bfe5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbf4f7fca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7e9eff94}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfd3dff28}, +- {0x57, 4, 0, 0, 0x00010000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfa7bfe51}, +- {0x71, 4, 6, 18, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000008}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf4f7fca2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe9eff945}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd3dff28a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa7bfe514}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4f7fca28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9eff9450}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3dff28a0}, +- {0x57, 4, 0, 0, 0x00000100}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7bfe5141}, +- {0x71, 4, 6, 19, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf7fca283}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xeff94506}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdff28a0c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbfe51418}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7fca2831}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff945063}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff28a0c6}, +- {0x57, 4, 0, 0, 0x00000001}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfe51418c}, +- {0x71, 4, 6, 20, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000008}, +- {0x71, 5, 6, 21, 0x00000000}, +- {0x4f, 4, 5, 0, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000030}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfca28319}, +- {0x67, 4, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfe51418c}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xfca28319}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x40000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf9450633}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf9450633}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x20000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf28a0c67}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf28a0c67}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x10000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe51418ce}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe51418ce}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x08000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xca28319d}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xca28319d}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x04000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9450633b}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9450633b}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x02000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28a0c676}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x28a0c676}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x01000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x51418ced}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x51418ced}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00800000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa28319db}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa28319db}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00400000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x450633b6}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x450633b6}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00200000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8a0c676c}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8a0c676c}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00100000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1418ced8}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1418ced8}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00080000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28319db1}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x28319db1}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00040000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x50633b63}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x50633b63}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00020000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa0c676c6}, +- {0x57, 4, 0, 0, 0x00010000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x418ced8d}, +- {0x71, 3, 6, 22, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00008000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8319db1a}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00004000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0633b634}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa0c676c6}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00010000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x418ced8d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00008000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8319db1a}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00004000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x0633b634}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00002000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x0c676c68}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00001000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x18ced8d1}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000800}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x319db1a3}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000400}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x633b6347}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000200}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc676c68f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000100}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8ced8d1f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x19db1a3e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x33b6347d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x676c68fa}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xced8d1f4}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9db1a3e9}, ++ {0x61, 3, 8, 20, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3b6347d2}, ++ {0xdc, 3, 0, 0, 0x00000040}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x76c68fa5}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xed8d1f4a}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xdb1a3e94}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x40000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb6347d28}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x20000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6c68fa51}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x10000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd8d1f4a3}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x08000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb1a3e946}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x04000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6347d28d}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x02000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc68fa51a}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x01000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d1f4a35}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00800000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1a3e946b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00400000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x347d28d7}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00200000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x68fa51ae}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00100000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd1f4a35c}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00080000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa3e946b9}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00040000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x47d28d73}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00020000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8fa51ae7}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00010000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1f4a35cf}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00008000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3e946b9e}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00004000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7d28d73c}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00002000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0c676c68}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xfa51ae78}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00001000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x18ced8d1}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf4a35cf1}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000800}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x319db1a3}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe946b9e3}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000400}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x633b6347}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd28d73c7}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000200}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc676c68f}, +- {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8ced8d1f}, +- {0x71, 3, 6, 23, 0x00000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa51ae78e}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000100}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x4a35cf1c}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000080}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x19db1a3e}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x946b9e38}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000040}, +- {0x79, 5, 10, -56, 0x00000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x33b6347d}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x28d73c71}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000020}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x676c68fa}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x51ae78e3}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000010}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xced8d1f4}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa35cf1c6}, + {0xbf, 4, 3, 0, 0x00000000}, + {0x57, 4, 0, 0, 0x00000008}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9db1a3e9}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000004}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3b6347d2}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000002}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x76c68fa5}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x46b9e38d}, ++ {0x61, 4, 8, 24, 0x00000000}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000004}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d73c71b}, ++ {0xdc, 4, 0, 0, 0x00000040}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000002}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1ae78e36}, ++ {0xc7, 4, 0, 0, 0x00000020}, + {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1177, 0x00000000}, +- {0xa7, 1, 0, 0, 0xed8d1f4a}, +- {0x05, 0, 0, 1175, 0x00000000}, +- {0x0f, 6, 1, 0, 0x00000000}, +- {0xb7, 7, 0, 0, 0x00000000}, +- {0xbf, 1, 6, 0, 0x00000000}, +- {0x07, 1, 0, 0, 0x0000002c}, +- {0x2d, 1, 9, 1202, 0x00000000}, +- {0x61, 4, 6, 8, 0x00000000}, +- {0xbf, 1, 4, 0, 0x00000000}, +- {0x67, 1, 0, 0, 0x00000038}, +- {0xc7, 1, 0, 0, 0x00000020}, +- {0x77, 1, 0, 0, 0x0000001f}, +- {0x57, 1, 0, 0, 0x2cc681d1}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x35cf1c6c}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6b9e38d9}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000040}, +- {0xb7, 2, 0, 0, 0x00000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x598d03a2}, ++ {0x57, 3, 0, 0, 0x40000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xd73c71b2}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000020}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb31a0745}, ++ {0x57, 3, 0, 0, 0x20000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xae78e364}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000010}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x66340e8a}, ++ {0x57, 3, 0, 0, 0x10000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5cf1c6c9}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000008}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcc681d15}, ++ {0x57, 3, 0, 0, 0x08000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb9e38d92}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000004}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x98d03a2b}, ++ {0x57, 3, 0, 0, 0x04000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x73c71b25}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000002}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x31a07456}, ++ {0x57, 3, 0, 0, 0x02000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe78e364b}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6340e8ad}, ++ {0x57, 3, 0, 0, 0x01000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xcf1c6c96}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00800000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9e38d92c}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00400000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3c71b259}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00200000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x78e364b2}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00100000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf1c6c964}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00080000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe38d92c9}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00040000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc71b2593}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00020000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8e364b27}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00010000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1c6c964e}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00008000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc681d15b}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x38d92c9c}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00004000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d03a2b7}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x71b25938}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00002000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1a07456f}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xe364b270}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00001000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x340e8ade}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc6c964e0}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000800}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x681d15bd}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x8d92c9c0}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000400}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd03a2b7b}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x1b259380}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000200}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa07456f6}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x364b2700}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x40e8aded}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6c964e01}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xd92c9c03}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb2593807}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x64b2700f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xc964e01e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x92c9c03d}, ++ {0x61, 3, 8, 28, 0x00000000}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x2593807a}, ++ {0xdc, 3, 0, 0, 0x00000040}, ++ {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x4b2700f4}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x964e01e8}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x2c9c03d1}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 5, 7, 0, 0x00000000}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x40000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x593807a3}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x20000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xb2700f46}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x10000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x64e01e8d}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x08000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc9c03d1a}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x04000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x93807a35}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x02000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x2700f46b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x01000000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x4e01e8d6}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00800000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x9c03d1ad}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00400000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3807a35b}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00200000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x700f46b6}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00100000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe01e8d6c}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00080000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xc03d1ad9}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00040000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x807a35b3}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00020000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x00f46b66}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00010000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x01e8d6cc}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00008000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x03d1ad99}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00004000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x07a35b32}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00002000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x0f46b665}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00001000}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1e8d6cca}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000800}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x3d1ad994}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000400}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x7a35b328}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000200}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xf46b6651}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000100}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xe8d6cca2}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000080}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd1ad9944}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000040}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xa35b3289}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000020}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x46b66512}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000010}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x8d6cca25}, ++ {0xbf, 4, 3, 0, 0x00000000}, ++ {0x57, 4, 0, 0, 0x00000008}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x1ad9944a}, ++ {0x61, 4, 8, 32, 0x00000000}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000004}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x35b32894}, ++ {0xdc, 4, 0, 0, 0x00000040}, ++ {0xbf, 6, 3, 0, 0x00000000}, ++ {0x57, 6, 0, 0, 0x00000002}, ++ {0x15, 6, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0x6b665129}, ++ {0xc7, 4, 0, 0, 0x00000020}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 5, 0, 0, 0xd6cca253}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xad9944a7}, ++ {0x6d, 2, 4, 1, 0x00000000}, ++ {0xbf, 7, 5, 0, 0x00000000}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x40000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5b32894f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x20000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb665129f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x10000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x6cca253e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x08000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xd9944a7d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x04000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xb32894fb}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x02000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x665129f6}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x01000000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xcca253ec}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00800000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x81d15bdb}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9944a7d9}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00400000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x03a2b7b7}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x32894fb2}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00200000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x07456f6f}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x65129f65}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00100000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0e8adedf}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xca253eca}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00080000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1d15bdbf}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x944a7d95}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00040000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3a2b7b7e}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x2894fb2a}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00020000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7456f6fd}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x5129f655}, + {0xbf, 3, 4, 0, 0x00000000}, + {0x57, 3, 0, 0, 0x00010000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe8adedfa}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa253ecab}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00008000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x44a7d956}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00004000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x894fb2ac}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00002000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x129f6558}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00001000}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x253ecab1}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000800}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x4a7d9563}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000400}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x94fb2ac7}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000200}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x29f6558f}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000100}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x53ecab1e}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000080}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xa7d9563d}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000040}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x4fb2ac7a}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000020}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x9f6558f5}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000010}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x3ecab1ea}, ++ {0xbf, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x00000008}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0x7d9563d5}, ++ {0x61, 3, 8, 36, 0x00000000}, + {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0xb7, 3, 0, 0, 0xffffffff}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd15bdbf4}, ++ {0x57, 5, 0, 0, 0x00000004}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xfb2ac7ab}, ++ {0xdc, 3, 0, 0, 0x00000040}, + {0xbf, 5, 4, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000002}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xf6558f56}, ++ {0xc7, 3, 0, 0, 0x00000020}, ++ {0x57, 4, 0, 0, 0x00000001}, ++ {0x15, 4, 0, 1, 0x00000000}, ++ {0xa7, 7, 0, 0, 0xecab1eac}, ++ {0xbf, 4, 7, 0, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd9563d59}, ++ {0x6d, 2, 3, 1, 0x00000000}, ++ {0xbf, 4, 7, 0, 0x00000000}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa2b7b7e9}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb2ac7ab2}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x456f6fd3}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x79, 6, 10, -56, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x6558f564}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8adedfa7}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xcab1eac8}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x15bdbf4f}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x9563d590}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2b7b7e9e}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x2ac7ab20}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x56f6fd3d}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xadedfa7b}, +- {0x61, 4, 6, 12, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x558f5641}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x01000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xab1eac83}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00800000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x563d5906}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00400000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xac7ab20c}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00200000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x58f56418}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00100000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb1eac831}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00080000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x63d59063}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00040000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xc7ab20c7}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00020000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x8f56418f}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00010000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x1eac831e}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00008000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x3d59063c}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00004000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x7ab20c78}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00002000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xf56418f0}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00001000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xeac831e1}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000800}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xd59063c2}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000400}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xab20c784}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000200}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x56418f09}, ++ {0xbf, 5, 3, 0, 0x00000000}, ++ {0x57, 5, 0, 0, 0x00000100}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xac831e12}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5bdbf4f7}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x59063c25}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7b7e9ef}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xb20c784b}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6f6fd3df}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x6418f097}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdedfa7bf}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0xc831e12f}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbdbf4f7f}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x9063c25f}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7b7e9eff}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x20c784be}, ++ {0xbf, 5, 3, 0, 0x00000000}, + {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf6fd3dff}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xedfa7bfe}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdbf4f7fc}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb7e9eff9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6fd3dff2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdfa7bfe5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbf4f7fca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7e9eff94}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfd3dff28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfa7bfe51}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf4f7fca2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe9eff945}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd3dff28a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa7bfe514}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4f7fca28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9eff9450}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3dff28a0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7bfe5141}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf7fca283}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xeff94506}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdff28a0c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbfe51418}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7fca2831}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff945063}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff28a0c6}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfe51418c}, +- {0x61, 4, 6, 16, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfca28319}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf9450633}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf28a0c67}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe51418ce}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xca28319d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9450633b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28a0c676}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x51418ced}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa28319db}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x450633b6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8a0c676c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1418ced8}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28319db1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x50633b63}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa0c676c6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x418ced8d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8319db1a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0633b634}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0c676c68}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x18ced8d1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x319db1a3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x633b6347}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc676c68f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8ced8d1f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x19db1a3e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x33b6347d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x676c68fa}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xced8d1f4}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9db1a3e9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3b6347d2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x76c68fa5}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xed8d1f4a}, +- {0x61, 4, 6, 20, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xdb1a3e94}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb6347d28}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6c68fa51}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd8d1f4a3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb1a3e946}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6347d28d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc68fa51a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d1f4a35}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1a3e946b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x347d28d7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x68fa51ae}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd1f4a35c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa3e946b9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x47d28d73}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8fa51ae7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1f4a35cf}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3e946b9e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7d28d73c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfa51ae78}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf4a35cf1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe946b9e3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd28d73c7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa51ae78e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4a35cf1c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x946b9e38}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x28d73c71}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x51ae78e3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa35cf1c6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x46b9e38d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d73c71b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1ae78e36}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x35cf1c6c}, +- {0x61, 4, 6, 24, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6b9e38d9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd73c71b2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xae78e364}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5cf1c6c9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb9e38d92}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x73c71b25}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe78e364b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcf1c6c96}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9e38d92c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3c71b259}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x78e364b2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf1c6c964}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe38d92c9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc71b2593}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8e364b27}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1c6c964e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x38d92c9c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x71b25938}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe364b270}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc6c964e0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d92c9c0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1b259380}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x364b2700}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6c964e01}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd92c9c03}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb2593807}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x64b2700f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc964e01e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x92c9c03d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2593807a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4b2700f4}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x964e01e8}, +- {0x61, 4, 6, 28, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2c9c03d1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x593807a3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb2700f46}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x64e01e8d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc9c03d1a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x93807a35}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2700f46b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4e01e8d6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9c03d1ad}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3807a35b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x700f46b6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe01e8d6c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc03d1ad9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x807a35b3}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x00f46b66}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x01e8d6cc}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x03d1ad99}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x07a35b32}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0f46b665}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1e8d6cca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3d1ad994}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7a35b328}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf46b6651}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe8d6cca2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd1ad9944}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa35b3289}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x46b66512}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8d6cca25}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1ad9944a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x35b32894}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6b665129}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd6cca253}, +- {0x61, 4, 6, 32, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xad9944a7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5b32894f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb665129f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6cca253e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd9944a7d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb32894fb}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x665129f6}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcca253ec}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9944a7d9}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x32894fb2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x65129f65}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xca253eca}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x944a7d95}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2894fb2a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5129f655}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa253ecab}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x44a7d956}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x894fb2ac}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x129f6558}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x253ecab1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4a7d9563}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x94fb2ac7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x29f6558f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x53ecab1e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000020}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xa7d9563d}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4fb2ac7a}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9f6558f5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3ecab1ea}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7d9563d5}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfb2ac7ab}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf6558f56}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xecab1eac}, +- {0x61, 4, 6, 36, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000080}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd9563d59}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000040}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb2ac7ab2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000020}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6558f564}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000010}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcab1eac8}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000008}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9563d590}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000004}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2ac7ab20}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000002}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x558f5641}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000001}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xab1eac83}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00008000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x563d5906}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00004000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xac7ab20c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00002000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x58f56418}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00001000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb1eac831}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000800}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x63d59063}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000400}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc7ab20c7}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000200}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8f56418f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00000100}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1eac831e}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00800000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3d59063c}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00400000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7ab20c78}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00200000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf56418f0}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00100000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xeac831e1}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00080000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xd59063c2}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00040000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xab20c784}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00020000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x56418f09}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x00010000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xac831e12}, +- {0xbf, 5, 4, 0, 0x00000000}, ++ {0x15, 5, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x418f097c}, ++ {0x57, 3, 0, 0, 0x00000001}, ++ {0x15, 3, 0, 1, 0x00000000}, ++ {0xa7, 4, 0, 0, 0x831e12f9}, ++ {0xbf, 5, 1, 0, 0x00000000}, + {0x67, 5, 0, 0, 0x00000020}, + {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x59063c25}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x40000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xb20c784b}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x20000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x6418f097}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x10000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc831e12f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x08000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9063c25f}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x04000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x20c784be}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x57, 5, 0, 0, 0x02000000}, +- {0x1d, 5, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x418f097c}, +- {0x57, 4, 0, 0, 0x01000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x831e12f9}, +- {0x71, 4, 6, 40, 0x00000000}, +- {0x67, 4, 0, 0, 0x00000008}, +- {0x71, 5, 6, 41, 0x00000000}, +- {0x4f, 4, 5, 0, 0x00000000}, +- {0xbf, 5, 4, 0, 0x00000000}, +- {0x67, 5, 0, 0, 0x00000030}, +- {0xc7, 5, 0, 0, 0x00000020}, +- {0x6d, 5, 3, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x063c25f3}, +- {0x67, 4, 0, 0, 0x00000010}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x40000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x0c784be7}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x20000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x18f097cf}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x10000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x31e12f9f}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x08000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x63c25f3f}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x04000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc784be7f}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x02000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x8f097cff}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x01000000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x1e12f9fe}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00800000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3c25f3fc}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00400000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x784be7f8}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00200000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf097cff0}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00100000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe12f9fe0}, +- {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00080000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xc25f3fc1}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00040000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x84be7f83}, ++ {0xa7, 3, 0, 0, 0x063c25f3}, ++ {0x6d, 2, 5, 1, 0x00000000}, + {0xbf, 3, 4, 0, 0x00000000}, +- {0x57, 3, 0, 0, 0x00020000}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x097cff07}, +- {0x57, 4, 0, 0, 0x00010000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x12f9fe0f}, +- {0x71, 3, 6, 42, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00008000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x25f3fc1f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00004000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x4be7f83f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00002000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x97cff07f}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00001000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x2f9fe0fe}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000800}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x5f3fc1fd}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000400}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xbe7f83fb}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000200}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7cff07f7}, +- {0x57, 3, 0, 0, 0x00000100}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf9fe0fee}, +- {0x71, 3, 6, 43, 0x00000000}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000080}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xf3fc1fdc}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000040}, +- {0x79, 5, 10, -56, 0x00000000}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xe7f83fb8}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000020}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xcff07f70}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000010}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x9fe0fee1}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000008}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x3fc1fdc2}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000004}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0x7f83fb85}, +- {0xbf, 4, 3, 0, 0x00000000}, +- {0x57, 4, 0, 0, 0x00000002}, +- {0x1d, 4, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xff07f70a}, +- {0x57, 3, 0, 0, 0x00000001}, +- {0x1d, 3, 2, 1, 0x00000000}, +- {0xa7, 1, 0, 0, 0xfe0fee15}, +- {0x71, 2, 0, 201, 0x00000000}, +- {0x67, 2, 0, 0, 0x00000008}, +- {0x71, 3, 0, 200, 0x00000000}, +- {0x4f, 2, 3, 0, 0x00000000}, +- {0x71, 3, 0, 203, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, +- {0x71, 4, 0, 202, 0x00000000}, +- {0x4f, 3, 4, 0, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000010}, +- {0x4f, 3, 2, 0, 0x00000000}, +- {0x67, 1, 0, 0, 0x00000020}, +- {0x77, 1, 0, 0, 0x00000020}, + {0xbf, 2, 1, 0, 0x00000000}, +- {0x3f, 2, 3, 0, 0x00000000}, +- {0x2f, 2, 3, 0, 0x00000000}, +- {0x1f, 1, 2, 0, 0x00000000}, +- {0x57, 1, 0, 0, 0x0000000f}, +- {0x67, 1, 0, 0, 0x00000002}, +- {0x0f, 0, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x40000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x0c784be7}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x20000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x18f097cf}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x10000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x31e12f9f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x08000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x63c25f3f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x04000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xc784be7f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x02000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x8f097cff}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x01000000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x1e12f9fe}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00800000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x3c25f3fc}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00400000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x784be7f8}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00200000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf097cff0}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00100000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xe12f9fe0}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00080000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xc25f3fc1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00040000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x84be7f83}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00020000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x097cff07}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00010000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x12f9fe0f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00008000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x25f3fc1f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00004000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x4be7f83f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00002000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x97cff07f}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00001000}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x2f9fe0fe}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000800}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x5f3fc1fd}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000400}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xbe7f83fb}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000200}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x7cff07f7}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000100}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf9fe0fee}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000080}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xf3fc1fdc}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000040}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xe7f83fb8}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000020}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xcff07f70}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000010}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x9fe0fee1}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000008}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x3fc1fdc2}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000004}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0x7f83fb85}, ++ {0xbf, 2, 1, 0, 0x00000000}, ++ {0x57, 2, 0, 0, 0x00000002}, ++ {0x15, 2, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xff07f70a}, ++ {0x57, 1, 0, 0, 0x00000001}, ++ {0x15, 1, 0, 1, 0x00000000}, ++ {0xa7, 3, 0, 0, 0xfe0fee15}, ++ {0x71, 1, 0, 201, 0x00000000}, ++ {0x67, 1, 0, 0, 0x00000008}, ++ {0x71, 2, 0, 200, 0x00000000}, ++ {0x4f, 1, 2, 0, 0x00000000}, ++ {0x71, 2, 0, 202, 0x00000000}, ++ {0x67, 2, 0, 0, 0x00000010}, ++ {0x71, 4, 0, 203, 0x00000000}, ++ {0x67, 4, 0, 0, 0x00000018}, ++ {0x4f, 4, 2, 0, 0x00000000}, ++ {0x4f, 4, 1, 0, 0x00000000}, ++ {0x67, 3, 0, 0, 0x00000020}, ++ {0x77, 3, 0, 0, 0x00000020}, ++ {0x9f, 3, 4, 0, 0x00000000}, ++ {0x57, 3, 0, 0, 0x0000000f}, ++ {0x67, 3, 0, 0, 0x00000002}, ++ {0x0f, 0, 3, 0, 0x00000000}, + {0x71, 1, 0, 137, 0x00000000}, + {0x67, 1, 0, 0, 0x00000008}, + {0x71, 2, 0, 136, 0x00000000}, + {0x4f, 1, 2, 0, 0x00000000}, + {0x71, 2, 0, 138, 0x00000000}, ++ {0x67, 2, 0, 0, 0x00000010}, + {0x71, 3, 0, 139, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000008}, ++ {0x67, 3, 0, 0, 0x00000018}, + {0x4f, 3, 2, 0, 0x00000000}, +- {0x67, 3, 0, 0, 0x00000010}, + {0x4f, 3, 1, 0, 0x00000000}, + {0x07, 3, 0, 0, 0x7cafe800}, +- {0x63, 5, 3, 52, 0x00000000}, ++ {0x63, 6, 3, 52, 0x00000000}, + {0xb7, 7, 0, 0, 0x00000001}, + {0xbf, 0, 7, 0, 0x00000000}, + {0x95, 0, 0, 0, 0x00000000}, +diff --git a/dpdk/drivers/net/tap/tap_bpf_program.c b/dpdk/drivers/net/tap/tap_bpf_program.c +index 20c310e5e7..d9bb65831a 100644 +--- a/dpdk/drivers/net/tap/tap_bpf_program.c ++++ b/dpdk/drivers/net/tap/tap_bpf_program.c +@@ -131,6 +131,8 @@ rss_l3_l4(struct __sk_buff *skb) + __u8 *key = 0; + __u32 len; + __u32 queue = 0; ++ bool mf = 0; ++ __u16 frag_off = 0; + + rsskey = map_lookup_elem(&map_keys, &key_idx); + if (!rsskey) { +@@ -155,6 +157,8 @@ rss_l3_l4(struct __sk_buff *skb) + return TC_ACT_OK; + + __u8 *src_dst_addr = data + off + offsetof(struct iphdr, saddr); ++ __u8 *frag_off_addr = data + off + offsetof(struct iphdr, frag_off); ++ __u8 *prot_addr = data + off + offsetof(struct iphdr, protocol); + __u8 *src_dst_port = data + off + sizeof(struct iphdr); + struct ipv4_l3_l4_tuple v4_tuple = { + .src_addr = IPv4(*(src_dst_addr + 0), +@@ -165,11 +169,25 @@ rss_l3_l4(struct __sk_buff *skb) + *(src_dst_addr + 5), + *(src_dst_addr + 6), + *(src_dst_addr + 7)), +- .sport = PORT(*(src_dst_port + 0), +- *(src_dst_port + 1)), +- .dport = PORT(*(src_dst_port + 2), +- *(src_dst_port + 3)), ++ .sport = 0, ++ .dport = 0, + }; ++ /** Fetch the L4-payer port numbers only in-case of TCP/UDP ++ ** and also if the packet is not fragmented. Since fragmented ++ ** chunks do not have L4 TCP/UDP header. ++ **/ ++ if (*prot_addr == IPPROTO_UDP || *prot_addr == IPPROTO_TCP) { ++ frag_off = PORT(*(frag_off_addr + 0), ++ *(frag_off_addr + 1)); ++ mf = frag_off & 0x2000; ++ frag_off = frag_off & 0x1fff; ++ if (mf == 0 && frag_off == 0) { ++ v4_tuple.sport = PORT(*(src_dst_port + 0), ++ *(src_dst_port + 1)); ++ v4_tuple.dport = PORT(*(src_dst_port + 2), ++ *(src_dst_port + 3)); ++ } ++ } + __u8 input_len = sizeof(v4_tuple) / sizeof(__u32); + if (rsskey->hash_fields & (1 << HASH_FIELD_IPV4_L3)) + input_len--; +@@ -182,6 +200,9 @@ rss_l3_l4(struct __sk_buff *skb) + offsetof(struct ipv6hdr, saddr); + __u8 *src_dst_port = data + off + + sizeof(struct ipv6hdr); ++ __u8 *next_hdr = data + off + ++ offsetof(struct ipv6hdr, nexthdr); ++ + struct ipv6_l3_l4_tuple v6_tuple; + for (j = 0; j < 4; j++) + *((uint32_t *)&v6_tuple.src_addr + j) = +@@ -191,10 +212,18 @@ rss_l3_l4(struct __sk_buff *skb) + *((uint32_t *)&v6_tuple.dst_addr + j) = + __builtin_bswap32(*((uint32_t *) + src_dst_addr + 4 + j)); +- v6_tuple.sport = PORT(*(src_dst_port + 0), +- *(src_dst_port + 1)); +- v6_tuple.dport = PORT(*(src_dst_port + 2), +- *(src_dst_port + 3)); ++ ++ /** Fetch the L4 header port-numbers only if next-header ++ * is TCP/UDP **/ ++ if (*next_hdr == IPPROTO_UDP || *next_hdr == IPPROTO_TCP) { ++ v6_tuple.sport = PORT(*(src_dst_port + 0), ++ *(src_dst_port + 1)); ++ v6_tuple.dport = PORT(*(src_dst_port + 2), ++ *(src_dst_port + 3)); ++ } else { ++ v6_tuple.sport = 0; ++ v6_tuple.dport = 0; ++ } + + __u8 input_len = sizeof(v6_tuple) / sizeof(__u32); + if (rsskey->hash_fields & (1 << HASH_FIELD_IPV6_L3)) diff --git a/dpdk/drivers/net/tap/tap_flow.c b/dpdk/drivers/net/tap/tap_flow.c -index c4f60ce98e..7673823945 100644 +index c4f60ce98e..f53bc297f8 100644 --- a/dpdk/drivers/net/tap/tap_flow.c +++ b/dpdk/drivers/net/tap/tap_flow.c @@ -961,7 +961,7 @@ add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata) @@ -39986,6 +76419,15 @@ index c4f60ce98e..7673823945 100644 * * @param[in] flow * Pointer to rte flow containing the netlink message +@@ -1684,7 +1684,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, + struct rte_flow_item *items = implicit_rte_flows[idx].items; + struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr; + struct rte_flow_item_eth eth_local = { .type = 0 }; +- uint16_t if_index = pmd->remote_if_index; ++ unsigned int if_index = pmd->remote_if_index; + struct rte_flow *remote_flow = NULL; + struct nlmsg *msg = NULL; + int err = 0; @@ -2017,7 +2017,7 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx) break; @@ -40008,6 +76450,118 @@ index 56c343acea..a9097def1a 100644 } /** +diff --git a/dpdk/drivers/net/tap/tap_tcmsgs.c b/dpdk/drivers/net/tap/tap_tcmsgs.c +index b478b5951e..a3aae3c814 100644 +--- a/dpdk/drivers/net/tap/tap_tcmsgs.c ++++ b/dpdk/drivers/net/tap/tap_tcmsgs.c +@@ -19,7 +19,7 @@ struct qdisc { + + struct list_args { + int nlsk_fd; +- uint16_t ifindex; ++ unsigned int ifindex; + void *custom_arg; + }; + +@@ -42,7 +42,7 @@ struct qdisc_custom_arg { + * Overrides the default netlink flags for this msg with those specified. + */ + void +-tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, uint16_t flags) ++tc_init_msg(struct nlmsg *msg, unsigned int ifindex, uint16_t type, uint16_t flags) + { + struct nlmsghdr *n = &msg->nh; + +@@ -70,7 +70,7 @@ tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, uint16_t flags) + * 0 on success, -1 otherwise with errno set. + */ + static int +-qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo) ++qdisc_del(int nlsk_fd, unsigned int ifindex, struct qdisc *qinfo) + { + struct nlmsg msg; + int fd = 0; +@@ -114,7 +114,7 @@ qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo) + * 0 on success, -1 otherwise with errno set. + */ + int +-qdisc_add_multiq(int nlsk_fd, uint16_t ifindex) ++qdisc_add_multiq(int nlsk_fd, unsigned int ifindex) + { + struct tc_multiq_qopt opt = {0}; + struct nlmsg msg; +@@ -144,7 +144,7 @@ qdisc_add_multiq(int nlsk_fd, uint16_t ifindex) + * 0 on success, -1 otherwise with errno set. + */ + int +-qdisc_add_ingress(int nlsk_fd, uint16_t ifindex) ++qdisc_add_ingress(int nlsk_fd, unsigned int ifindex) + { + struct nlmsg msg; + +@@ -208,7 +208,7 @@ qdisc_del_cb(struct nlmsghdr *nh, void *arg) + * 0 on success, -1 otherwise with errno set. + */ + static int +-qdisc_iterate(int nlsk_fd, uint16_t ifindex, ++qdisc_iterate(int nlsk_fd, unsigned int ifindex, + int (*callback)(struct nlmsghdr *, void *), void *arg) + { + struct nlmsg msg; +@@ -238,7 +238,7 @@ qdisc_iterate(int nlsk_fd, uint16_t ifindex, + * 0 on success, -1 otherwise with errno set. + */ + int +-qdisc_flush(int nlsk_fd, uint16_t ifindex) ++qdisc_flush(int nlsk_fd, unsigned int ifindex) + { + return qdisc_iterate(nlsk_fd, ifindex, qdisc_del_cb, NULL); + } +@@ -256,7 +256,7 @@ qdisc_flush(int nlsk_fd, uint16_t ifindex) + * Return -1 otherwise. + */ + int +-qdisc_create_multiq(int nlsk_fd, uint16_t ifindex) ++qdisc_create_multiq(int nlsk_fd, unsigned int ifindex) + { + int err = 0; + +@@ -282,7 +282,7 @@ qdisc_create_multiq(int nlsk_fd, uint16_t ifindex) + * Return -1 otherwise. + */ + int +-qdisc_create_ingress(int nlsk_fd, uint16_t ifindex) ++qdisc_create_ingress(int nlsk_fd, unsigned int ifindex) + { + int err = 0; + +diff --git a/dpdk/drivers/net/tap/tap_tcmsgs.h b/dpdk/drivers/net/tap/tap_tcmsgs.h +index 8cedea8462..a64cb29d6f 100644 +--- a/dpdk/drivers/net/tap/tap_tcmsgs.h ++++ b/dpdk/drivers/net/tap/tap_tcmsgs.h +@@ -24,14 +24,14 @@ + + #define MULTIQ_MAJOR_HANDLE (1 << 16) + +-void tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, ++void tc_init_msg(struct nlmsg *msg, unsigned int ifindex, uint16_t type, + uint16_t flags); +-int qdisc_list(int nlsk_fd, uint16_t ifindex); +-int qdisc_flush(int nlsk_fd, uint16_t ifindex); +-int qdisc_create_ingress(int nlsk_fd, uint16_t ifindex); +-int qdisc_create_multiq(int nlsk_fd, uint16_t ifindex); +-int qdisc_add_ingress(int nlsk_fd, uint16_t ifindex); +-int qdisc_add_multiq(int nlsk_fd, uint16_t ifindex); +-int filter_list_ingress(int nlsk_fd, uint16_t ifindex); ++int qdisc_list(int nlsk_fd, unsigned int ifindex); ++int qdisc_flush(int nlsk_fd, unsigned int ifindex); ++int qdisc_create_ingress(int nlsk_fd, unsigned int ifindex); ++int qdisc_create_multiq(int nlsk_fd, unsigned int ifindex); ++int qdisc_add_ingress(int nlsk_fd, unsigned int ifindex); ++int qdisc_add_multiq(int nlsk_fd, unsigned int ifindex); ++int filter_list_ingress(int nlsk_fd, unsigned int ifindex); + + #endif /* _TAP_TCMSGS_H_ */ diff --git a/dpdk/drivers/net/thunderx/nicvf_svf.c b/dpdk/drivers/net/thunderx/nicvf_svf.c index bccf290599..1bcf73d9fc 100644 --- a/dpdk/drivers/net/thunderx/nicvf_svf.c @@ -40034,7 +76588,7 @@ index 7a30191472..a81d6890fe 100644 c_args: c_args) base_objs = base_lib.extract_all_objects(recursive: true) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c b/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c -index 72901cd0b0..4ed6bd6728 100644 +index 72901cd0b0..aeeae06dfc 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c +++ b/dpdk/drivers/net/txgbe/base/txgbe_eeprom.c @@ -21,8 +21,6 @@ s32 txgbe_init_eeprom_params(struct txgbe_hw *hw) @@ -40082,32 +76636,56 @@ index 72901cd0b0..4ed6bd6728 100644 /* * this release is particularly important because our attempts * above to get the semaphore may have succeeded, and if there -@@ -140,13 +134,12 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { +@@ -117,38 +111,6 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) + status = 0; + } + +- /* Now get the semaphore between SW/FW through the SWESMBI bit */ +- if (status == 0) { +- for (i = 0; i < timeout; i++) { +- /* Set the SW EEPROM semaphore bit to request access */ +- wr32m(hw, TXGBE_MNGSWSYNC, +- TXGBE_MNGSWSYNC_REQ, TXGBE_MNGSWSYNC_REQ); +- +- /* +- * If we set the bit successfully then we got the +- * semaphore. +- */ +- swsm = rd32(hw, TXGBE_MNGSWSYNC); +- if (swsm & TXGBE_MNGSWSYNC_REQ) +- break; +- +- usec_delay(50); +- } +- +- /* +- * Release semaphores and return error if SW EEPROM semaphore +- * was not granted because we don't have access to the EEPROM +- */ +- if (i >= timeout) { - DEBUGOUT("SWESMBI Software EEPROM semaphore not granted.\n"); -+ DEBUGOUT("SWESMBI Software EEPROM semaphore not granted."); - txgbe_release_eeprom_semaphore(hw); - status = TXGBE_ERR_EEPROM; - } - } else { +- txgbe_release_eeprom_semaphore(hw); +- status = TXGBE_ERR_EEPROM; +- } +- } else { - DEBUGOUT("Software semaphore SMBI between device drivers " - "not granted.\n"); -+ DEBUGOUT("Software semaphore SMBI between device drivers not granted."); - } - +- } +- return status; -@@ -160,8 +153,6 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) + } + +@@ -160,9 +122,6 @@ s32 txgbe_get_eeprom_semaphore(struct txgbe_hw *hw) **/ void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw) { - DEBUGFUNC("txgbe_release_eeprom_semaphore"); - - wr32m(hw, TXGBE_MNGSWSYNC, TXGBE_MNGSWSYNC_REQ, 0); +- wr32m(hw, TXGBE_MNGSWSYNC, TXGBE_MNGSWSYNC_REQ, 0); wr32m(hw, TXGBE_SWSEM, TXGBE_SWSEM_PF, 0); txgbe_flush(hw); -@@ -290,8 +281,6 @@ s32 txgbe_ee_write16(struct txgbe_hw *hw, u32 offset, + } +@@ -290,8 +249,6 @@ s32 txgbe_ee_write16(struct txgbe_hw *hw, u32 offset, u32 addr = (offset << 1); int err; @@ -40116,7 +76694,7 @@ index 72901cd0b0..4ed6bd6728 100644 err = hw->mac.acquire_swfw_sync(hw, mask); if (err) return err; -@@ -348,8 +337,6 @@ s32 txgbe_ee_writew_sw(struct txgbe_hw *hw, u32 offset, +@@ -348,8 +305,6 @@ s32 txgbe_ee_writew_sw(struct txgbe_hw *hw, u32 offset, u32 addr = hw->rom.sw_addr + (offset << 1); int err; @@ -40125,7 +76703,7 @@ index 72901cd0b0..4ed6bd6728 100644 err = hw->mac.acquire_swfw_sync(hw, mask); if (err) return err; -@@ -399,11 +386,9 @@ s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw) +@@ -399,11 +354,9 @@ s32 txgbe_calc_eeprom_checksum(struct txgbe_hw *hw) int err; u16 buffer[BUFF_SIZE]; @@ -40138,7 +76716,7 @@ index 72901cd0b0..4ed6bd6728 100644 return err; } -@@ -437,15 +422,13 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, +@@ -437,15 +390,13 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 read_checksum = 0; int err; @@ -40155,7 +76733,7 @@ index 72901cd0b0..4ed6bd6728 100644 return err; } -@@ -457,7 +440,7 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, +@@ -457,7 +408,7 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, err = hw->rom.readw_sw(hw, TXGBE_EEPROM_CHECKSUM, &read_checksum); if (err) { @@ -40164,7 +76742,7 @@ index 72901cd0b0..4ed6bd6728 100644 return err; } -@@ -466,7 +449,7 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, +@@ -466,7 +417,7 @@ s32 txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, */ if (read_checksum != checksum) { err = TXGBE_ERR_EEPROM_CHECKSUM; @@ -40173,7 +76751,7 @@ index 72901cd0b0..4ed6bd6728 100644 } /* If the user cares, return the calculated checksum */ -@@ -485,15 +468,13 @@ s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw) +@@ -485,15 +436,13 @@ s32 txgbe_update_eeprom_checksum(struct txgbe_hw *hw) s32 status; u16 checksum; @@ -40191,7 +76769,7 @@ index 72901cd0b0..4ed6bd6728 100644 } diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.c b/dpdk/drivers/net/txgbe/base/txgbe_hw.c -index 00a8db78bf..776891ee7e 100644 +index 00a8db78bf..1083431055 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_hw.c +++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.c @@ -42,8 +42,6 @@ bool txgbe_device_supports_autoneg_fc(struct txgbe_hw *hw) @@ -40779,7 +77357,32 @@ index 00a8db78bf..776891ee7e 100644 break; } -@@ -2371,7 +2293,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, +@@ -2357,10 +2279,24 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, + } + + if (speed & TXGBE_LINK_SPEED_1GB_FULL) { ++ u32 curr_autoneg; ++ + speedcnt++; + if (highest_link_speed == TXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = TXGBE_LINK_SPEED_1GB_FULL; + ++ status = hw->mac.check_link(hw, &link_speed, &link_up, false); ++ if (status != 0) ++ return status; ++ ++ /* If we already have link at this speed, just jump out */ ++ if (link_speed == TXGBE_LINK_SPEED_1GB_FULL) { ++ curr_autoneg = rd32_epcs(hw, SR_MII_MMD_CTL); ++ if (link_up && (hw->autoneg == ++ !!(curr_autoneg & SR_MII_MMD_CTL_AN_EN))) ++ goto out; ++ } ++ + /* Set the module link speed */ + switch (hw->phy.media_type) { + case txgbe_media_type_fiber: +@@ -2371,7 +2307,7 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, /* QSFP module automatically detects link speed */ break; default: @@ -40788,7 +77391,7 @@ index 00a8db78bf..776891ee7e 100644 break; } -@@ -2437,8 +2359,6 @@ s32 txgbe_init_shared_code(struct txgbe_hw *hw) +@@ -2437,8 +2373,6 @@ s32 txgbe_init_shared_code(struct txgbe_hw *hw) { s32 status; @@ -40797,7 +77400,7 @@ index 00a8db78bf..776891ee7e 100644 /* * Set the mac type */ -@@ -2474,8 +2394,6 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) +@@ -2474,8 +2408,6 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) { s32 err = 0; @@ -40806,7 +77409,7 @@ index 00a8db78bf..776891ee7e 100644 if (hw->vendor_id != PCI_VENDOR_ID_WANGXUN) { DEBUGOUT("Unsupported vendor id: %x", hw->vendor_id); return TXGBE_ERR_DEVICE_NOT_SUPPORTED; -@@ -2497,7 +2415,7 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) +@@ -2497,7 +2429,7 @@ s32 txgbe_set_mac_type(struct txgbe_hw *hw) break; } @@ -40815,7 +77418,7 @@ index 00a8db78bf..776891ee7e 100644 hw->mac.type, err); return err; } -@@ -2506,8 +2424,6 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw) +@@ -2506,8 +2438,6 @@ void txgbe_init_mac_link_ops(struct txgbe_hw *hw) { struct txgbe_mac_info *mac = &hw->mac; @@ -40824,7 +77427,7 @@ index 00a8db78bf..776891ee7e 100644 /* * enable the laser control functions for SFP+ fiber * and MNG not enabled -@@ -2550,8 +2466,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw) +@@ -2550,8 +2480,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw) struct txgbe_phy_info *phy = &hw->phy; s32 err = 0; @@ -40833,7 +77436,7 @@ index 00a8db78bf..776891ee7e 100644 if ((hw->device_id & 0xFF) == TXGBE_DEV_ID_QSFP) { /* Store flag indicating I2C bus access control unit. */ hw->phy.qsfp_shared_i2c_bus = TRUE; -@@ -2598,8 +2512,6 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) +@@ -2598,8 +2526,6 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) { s32 err = 0; @@ -40842,7 +77445,7 @@ index 00a8db78bf..776891ee7e 100644 if (hw->phy.sfp_type == txgbe_sfp_type_unknown) return 0; -@@ -2619,7 +2531,7 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) +@@ -2619,7 +2545,7 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw) msec_delay(hw->rom.semaphore_delay); if (err) { @@ -40851,7 +77454,7 @@ index 00a8db78bf..776891ee7e 100644 return TXGBE_ERR_SFP_SETUP_NOT_COMPLETE; } -@@ -2717,8 +2629,6 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw) +@@ -2717,8 +2643,6 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw) struct txgbe_rom_info *rom = &hw->rom; struct txgbe_mbx_info *mbx = &hw->mbx; @@ -40860,7 +77463,7 @@ index 00a8db78bf..776891ee7e 100644 /* BUS */ bus->set_lan_id = txgbe_set_lan_id_multi_port; -@@ -2845,8 +2755,6 @@ s32 txgbe_get_link_capabilities_raptor(struct txgbe_hw *hw, +@@ -2845,8 +2769,6 @@ s32 txgbe_get_link_capabilities_raptor(struct txgbe_hw *hw, s32 status = 0; u32 autoc = 0; @@ -40869,7 +77472,7 @@ index 00a8db78bf..776891ee7e 100644 /* Check if 1G SFP module. */ if (hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == txgbe_sfp_type_1g_cu_core1 || -@@ -2950,8 +2858,6 @@ u32 txgbe_get_media_type_raptor(struct txgbe_hw *hw) +@@ -2950,8 +2872,6 @@ u32 txgbe_get_media_type_raptor(struct txgbe_hw *hw) { u32 media_type; @@ -40878,7 +77481,7 @@ index 00a8db78bf..776891ee7e 100644 if (hw->phy.ffe_set) txgbe_bp_mode_set(hw); -@@ -3010,8 +2916,6 @@ s32 txgbe_start_mac_link_raptor(struct txgbe_hw *hw, +@@ -3010,8 +2930,6 @@ s32 txgbe_start_mac_link_raptor(struct txgbe_hw *hw, s32 status = 0; bool got_lock = false; @@ -40887,7 +77490,7 @@ index 00a8db78bf..776891ee7e 100644 UNREFERENCED_PARAMETER(autoneg_wait_to_complete); /* reset_pipeline requires us to hold this lock as it writes to -@@ -3094,8 +2998,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +@@ -3094,8 +3012,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) **/ void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) { @@ -40896,7 +77499,7 @@ index 00a8db78bf..776891ee7e 100644 /* Blocked by MNG FW so bail */ if (txgbe_check_reset_blocked(hw)) return; -@@ -3127,7 +3029,7 @@ void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, +@@ -3127,7 +3043,7 @@ void txgbe_set_hard_rate_select_speed(struct txgbe_hw *hw, esdp_reg &= ~(TXGBE_GPIOBIT_4 | TXGBE_GPIOBIT_5); break; default: @@ -40905,7 +77508,7 @@ index 00a8db78bf..776891ee7e 100644 return; } -@@ -3153,8 +3055,6 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, +@@ -3153,8 +3069,6 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, bool link_up = false; u32 autoc_reg = rd32_epcs(hw, SR_AN_MMD_ADV_REG1); @@ -40914,7 +77517,7 @@ index 00a8db78bf..776891ee7e 100644 /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; -@@ -3243,8 +3143,7 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, +@@ -3243,8 +3157,7 @@ s32 txgbe_setup_mac_link_smartspeed(struct txgbe_hw *hw, out: if (link_up && link_speed == TXGBE_LINK_SPEED_1GB_FULL) @@ -40924,7 +77527,7 @@ index 00a8db78bf..776891ee7e 100644 return status; } -@@ -3270,7 +3169,6 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, +@@ -3270,7 +3183,6 @@ s32 txgbe_setup_mac_link(struct txgbe_hw *hw, u64 orig_autoc = 0; u32 link_capabilities = TXGBE_LINK_SPEED_UNKNOWN; @@ -40932,7 +77535,7 @@ index 00a8db78bf..776891ee7e 100644 UNREFERENCED_PARAMETER(autoneg_wait_to_complete); /* Check to see if speed passed in is supported. */ -@@ -3357,8 +3255,6 @@ static s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw, +@@ -3357,8 +3269,6 @@ static s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw, { s32 status; @@ -40941,7 +77544,7 @@ index 00a8db78bf..776891ee7e 100644 /* Setup the PHY according to input speed */ status = hw->phy.setup_link_speed(hw, speed, autoneg_wait_to_complete); -@@ -3467,8 +3363,6 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) +@@ -3467,8 +3377,6 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) s32 status; u32 autoc; @@ -40950,7 +77553,7 @@ index 00a8db78bf..776891ee7e 100644 /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.stop_hw(hw); if (status != 0) -@@ -3624,15 +3518,13 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +@@ -3624,15 +3532,13 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) u32 fdircmd; fdirctrl &= ~TXGBE_FDIRCTL_INITDONE; @@ -40967,7 +77570,7 @@ index 00a8db78bf..776891ee7e 100644 return err; } -@@ -3666,7 +3558,7 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) +@@ -3666,7 +3572,7 @@ s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw) msec_delay(1); } if (i >= TXGBE_FDIR_INIT_DONE_POLL) { @@ -40976,7 +77579,7 @@ index 00a8db78bf..776891ee7e 100644 return TXGBE_ERR_FDIR_REINIT_FAILED; } -@@ -3692,8 +3584,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) +@@ -3692,8 +3598,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) { s32 err = 0; @@ -40985,7 +77588,7 @@ index 00a8db78bf..776891ee7e 100644 err = txgbe_start_hw(hw); if (err != 0) goto out; -@@ -3718,8 +3608,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) +@@ -3718,8 +3622,6 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw) **/ s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval) { @@ -40994,7 +77597,7 @@ index 00a8db78bf..776891ee7e 100644 /* * Workaround silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang -@@ -3752,8 +3640,6 @@ bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw) +@@ -3752,8 +3654,6 @@ bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw) u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; s32 status; @@ -41193,7 +77796,7 @@ index 4d64c6c3e9..7f2489a13f 100644 ret_val = txgbe_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mng.c b/dpdk/drivers/net/txgbe/base/txgbe_mng.c -index dbe512122c..045a2f5de0 100644 +index dbe512122c..6255718ff7 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_mng.c +++ b/dpdk/drivers/net/txgbe/base/txgbe_mng.c @@ -45,10 +45,8 @@ txgbe_hic_unlocked(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) @@ -41229,7 +77832,27 @@ index dbe512122c..045a2f5de0 100644 return TXGBE_ERR_HOST_INTERFACE_COMMAND; } -@@ -159,7 +155,7 @@ txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, +@@ -140,26 +136,12 @@ txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, + for (bi = 0; bi < dword_len; bi++) + buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi); + +- /* +- * If there is any thing in data position pull it in +- * Read Flash command requires reading buffer length from +- * two byes instead of one byte +- */ +- if (resp->cmd == 0x30) { +- for (; bi < dword_len + 2; bi++) +- buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi); +- +- buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3) +- & 0xF00) | resp->buf_len; +- hdr_size += (2 << 2); +- } else { +- buf_len = resp->buf_len; +- } ++ buf_len = resp->buf_len; + if (!buf_len) goto rel_out; if (length < buf_len + hdr_size) { @@ -41238,7 +77861,7 @@ index dbe512122c..045a2f5de0 100644 err = TXGBE_ERR_HOST_INTERFACE_COMMAND; goto rel_out; } -@@ -285,7 +281,6 @@ s32 txgbe_hic_set_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, +@@ -285,7 +267,6 @@ s32 txgbe_hic_set_drv_ver(struct txgbe_hw *hw, u8 maj, u8 min, int i; s32 ret_val = 0; @@ -41246,7 +77869,7 @@ index dbe512122c..045a2f5de0 100644 UNREFERENCED_PARAMETER(len, driver_ver); fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; -@@ -338,8 +333,6 @@ txgbe_hic_reset(struct txgbe_hw *hw) +@@ -338,8 +319,6 @@ txgbe_hic_reset(struct txgbe_hw *hw) int i; s32 err = 0; @@ -41268,7 +77891,7 @@ index 11fcf7e8fe..b62c0b0824 100644 #include "../txgbe_logs.h" diff --git a/dpdk/drivers/net/txgbe/base/txgbe_phy.c b/dpdk/drivers/net/txgbe/base/txgbe_phy.c -index 3f5229ecc2..9f46d5bdb0 100644 +index 3f5229ecc2..a7c11c50df 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_phy.c +++ b/dpdk/drivers/net/txgbe/base/txgbe_phy.c @@ -35,7 +35,7 @@ static bool txgbe_identify_extphy(struct txgbe_hw *hw) @@ -41554,7 +78177,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 } wr32(hw, TXGBE_I2CENA, 0); -@@ -1411,9 +1367,17 @@ static void +@@ -1411,12 +1367,22 @@ static void txgbe_set_sgmii_an37_ability(struct txgbe_hw *hw) { u32 value; @@ -41572,8 +78195,14 @@ index 3f5229ecc2..9f46d5bdb0 100644 + wr32_epcs(hw, SR_MII_MMD_AN_CTL, 0x0105); wr32_epcs(hw, SR_MII_MMD_DIGI_CTL, 0x0200); value = rd32_epcs(hw, SR_MII_MMD_CTL); - value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9); -@@ -1455,6 +1419,10 @@ txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) +- value = (value & ~0x1200) | (0x1 << 12) | (0x1 << 9); ++ value = (value & ~0x1200) | (0x1 << 9); ++ if (hw->autoneg) ++ value |= SR_MII_MMD_CTL_AN_EN; + wr32_epcs(hw, SR_MII_MMD_CTL, value); + } + +@@ -1455,6 +1421,10 @@ txgbe_set_link_to_kr(struct txgbe_hw *hw, bool autoneg) if (!(hw->devarg.auto_neg == 1)) { wr32_epcs(hw, SR_AN_CTRL, 0); wr32_epcs(hw, VR_AN_KR_MODE_CL, 0); @@ -41584,7 +78213,68 @@ index 3f5229ecc2..9f46d5bdb0 100644 } if (hw->devarg.present == 1) { value = rd32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1); -@@ -2320,6 +2288,8 @@ void txgbe_autoc_write(struct txgbe_hw *hw, u64 autoc) +@@ -1551,8 +1521,9 @@ txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) + goto out; + } + +- wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, +- ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA); ++ hw->mac.disable_sec_tx_path(hw); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) +@@ -1725,9 +1696,10 @@ txgbe_set_link_to_kx4(struct txgbe_hw *hw, bool autoneg) + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } else if (hw->fw_version <= TXGBE_FW_N_TXEQ) { + value = (0x1804 & ~0x3F3F); ++ value |= 40 << 8; + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + +- value = (0x50 & ~0x7F) | 40 | (1 << 6); ++ value = (0x50 & ~0x7F) | (1 << 6); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + out: +@@ -1765,8 +1737,9 @@ txgbe_set_link_to_kx(struct txgbe_hw *hw, + goto out; + } + +- wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, +- ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA); ++ hw->mac.disable_sec_tx_path(hw); + + /* 2. Disable xpcs AN-73 */ + if (!autoneg) +@@ -1939,10 +1912,10 @@ txgbe_set_link_to_kx(struct txgbe_hw *hw, + value |= hw->phy.ffe_post | (1 << 6); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } else if (hw->fw_version <= TXGBE_FW_N_TXEQ) { +- value = (0x1804 & ~0x3F3F) | (24 << 8) | 4; ++ value = (0x1804 & ~0x3F3F) | (40 << 8); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL0, value); + +- value = (0x50 & ~0x7F) | 16 | (1 << 6); ++ value = (0x50 & ~0x7F) | (1 << 6); + wr32_epcs(hw, TXGBE_PHY_TX_EQ_CTL1, value); + } + out: +@@ -1972,8 +1945,9 @@ txgbe_set_link_to_sfi(struct txgbe_hw *hw, + goto out; + } + +- wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, +- ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TXE, ~TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, ~TXGBE_MACRXCFG_ENA); ++ hw->mac.disable_sec_tx_path(hw); + + /* 2. Disable xpcs AN-73 */ + wr32_epcs(hw, SR_AN_CTRL, 0x0); +@@ -2320,8 +2294,12 @@ void txgbe_autoc_write(struct txgbe_hw *hw, u64 autoc) } } else if (hw->phy.media_type == txgbe_media_type_fiber) { txgbe_set_link_to_sfi(hw, speed); @@ -41592,8 +78282,20 @@ index 3f5229ecc2..9f46d5bdb0 100644 + txgbe_set_sgmii_an37_ability(hw); } ++ hw->mac.enable_sec_tx_path(hw); ++ if (speed == TXGBE_LINK_SPEED_10GB_FULL) -@@ -2416,8 +2386,6 @@ s32 txgbe_kr_handle(struct txgbe_hw *hw) + mactxcfg = TXGBE_MACTXCFG_SPEED_10G; + else if (speed == TXGBE_LINK_SPEED_1GB_FULL) +@@ -2331,6 +2309,7 @@ void txgbe_autoc_write(struct txgbe_hw *hw, u64 autoc) + wr32m(hw, TXGBE_MACTXCFG, + TXGBE_MACTXCFG_SPEED_MASK | TXGBE_MACTXCFG_TXE, + mactxcfg | TXGBE_MACTXCFG_TXE); ++ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, TXGBE_MACRXCFG_ENA); + } + + void txgbe_bp_down_event(struct txgbe_hw *hw) +@@ -2416,8 +2395,6 @@ s32 txgbe_kr_handle(struct txgbe_hw *hw) u32 value; s32 status = 0; @@ -41602,7 +78304,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 value = rd32_epcs(hw, VR_AN_INTR); BP_LOG("AN INTERRUPT!! value: 0x%x\n", value); if (!(value & VR_AN_INTR_PG_RCV)) { -@@ -2441,8 +2409,6 @@ static s32 txgbe_handle_bp_flow(u32 link_mode, struct txgbe_hw *hw) +@@ -2441,8 +2418,6 @@ static s32 txgbe_handle_bp_flow(u32 link_mode, struct txgbe_hw *hw) s32 status = 0; struct txgbe_backplane_ability local_ability, lp_ability; @@ -41611,7 +78313,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 local_ability.current_link_mode = link_mode; /* 1. Get the local AN73 Base Page Ability */ -@@ -2544,8 +2510,6 @@ static void txgbe_get_bp_ability(struct txgbe_backplane_ability *ability, +@@ -2544,8 +2519,6 @@ static void txgbe_get_bp_ability(struct txgbe_backplane_ability *ability, { u32 value = 0; @@ -41620,7 +78322,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 /* Link Partner Base Page */ if (link_partner == 1) { /* Read the link partner AN73 Base Page Ability Registers */ -@@ -2617,8 +2581,6 @@ static s32 txgbe_check_bp_ability(struct txgbe_backplane_ability *local_ability, +@@ -2617,8 +2590,6 @@ static s32 txgbe_check_bp_ability(struct txgbe_backplane_ability *local_ability, u32 com_link_abi; s32 ret = 0; @@ -41629,7 +78331,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 com_link_abi = local_ability->link_ability & lp_ability->link_ability; BP_LOG("com_link_abi = 0x%x, local_ability = 0x%x, lp_ability = 0x%x\n", com_link_abi, local_ability->link_ability, -@@ -2674,8 +2636,6 @@ static void txgbe_clear_bp_intr(u32 bit, u32 bit_high, struct txgbe_hw *hw) +@@ -2674,8 +2645,6 @@ static void txgbe_clear_bp_intr(u32 bit, u32 bit_high, struct txgbe_hw *hw) { u32 rdata = 0, wdata, i; @@ -41638,7 +78340,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 rdata = rd32_epcs(hw, VR_AN_INTR); BP_LOG("[Before clear]Read VR AN MMD Interrupt Register: 0x%x\n", rdata); -@@ -2700,8 +2660,6 @@ static s32 txgbe_enable_kr_training(struct txgbe_hw *hw) +@@ -2700,8 +2669,6 @@ static s32 txgbe_enable_kr_training(struct txgbe_hw *hw) s32 status = 0; u32 value = 0; @@ -41647,7 +78349,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 BP_LOG("Enable Clause 72 KR Training ...\n"); if (CL72_KRTR_PRBS_MODE_EN != 0xFFFF) { -@@ -2745,8 +2703,6 @@ static s32 txgbe_disable_kr_training(struct txgbe_hw *hw, s32 post, s32 mode) +@@ -2745,8 +2712,6 @@ static s32 txgbe_disable_kr_training(struct txgbe_hw *hw, s32 post, s32 mode) { s32 status = 0; @@ -41656,7 +78358,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 BP_LOG("Disable Clause 72 KR Training ...\n"); /* Read PHY Lane0 TX EQ before Clause 72 KR Training. */ txgbe_read_phy_lane_tx_eq(0, hw, post, mode); -@@ -2763,8 +2719,6 @@ static s32 txgbe_check_kr_training(struct txgbe_hw *hw) +@@ -2763,8 +2728,6 @@ static s32 txgbe_check_kr_training(struct txgbe_hw *hw) int i; int times = hw->devarg.poll ? 35 : 20; @@ -41665,7 +78367,7 @@ index 3f5229ecc2..9f46d5bdb0 100644 for (i = 0; i < times; i++) { value = rd32_epcs(hw, SR_PMA_KR_LP_CEU); BP_LOG("SR PMA MMD 10GBASE-KR LP Coefficient Update Register: 0x%x\n", -@@ -2822,8 +2776,6 @@ static void txgbe_read_phy_lane_tx_eq(u16 lane, struct txgbe_hw *hw, +@@ -2822,8 +2785,6 @@ static void txgbe_read_phy_lane_tx_eq(u16 lane, struct txgbe_hw *hw, u32 addr; u32 tx_main_cursor, tx_pre_cursor, tx_post_cursor, lmain; @@ -41703,6 +78405,38 @@ index 144047ba62..dc22ef53e3 100644 } while (0) #define TXGBE_XPCS_IDAADDR 0x13000 +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_type.h b/dpdk/drivers/net/txgbe/base/txgbe_type.h +index d95467f9f8..e7971ccf1d 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_type.h ++++ b/dpdk/drivers/net/txgbe/base/txgbe_type.h +@@ -355,9 +355,9 @@ struct txgbe_hw_stats { + u64 tx_management_packets; + u64 rx_management_dropped; + u64 rx_dma_drop; +- u64 rx_drop_packets; + + /* Basic Error */ ++ u64 rx_rdb_drop; + u64 rx_crc_errors; + u64 rx_illegal_byte_errors; + u64 rx_error_bytes; +@@ -365,7 +365,7 @@ struct txgbe_hw_stats { + u64 rx_length_errors; + u64 rx_undersize_errors; + u64 rx_fragment_errors; +- u64 rx_oversize_errors; ++ u64 rx_oversize_cnt; + u64 rx_jabber_errors; + u64 rx_l3_l4_xsum_error; + u64 mac_local_errors; +@@ -782,6 +782,7 @@ struct txgbe_hw { + bool allow_unsupported_sfp; + bool need_crosstalk_fix; + bool dev_start; ++ bool autoneg; + struct txgbe_devargs devarg; + + uint64_t isb_dma; diff --git a/dpdk/drivers/net/txgbe/base/txgbe_vf.c b/dpdk/drivers/net/txgbe/base/txgbe_vf.c index fb6d6d90ea..a73502351e 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_vf.c @@ -41762,10 +78496,36 @@ index fb6d6d90ea..a73502351e 100644 } diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -index 47d0e6ea40..49948e62bc 100644 +index 47d0e6ea40..bd587b4f71 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -@@ -376,7 +376,7 @@ txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, +@@ -179,12 +179,16 @@ static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { + HW_XSTAT(tx_total_packets), + HW_XSTAT(rx_total_missed_packets), + HW_XSTAT(rx_broadcast_packets), ++ HW_XSTAT(tx_broadcast_packets), + HW_XSTAT(rx_multicast_packets), ++ HW_XSTAT(tx_multicast_packets), + HW_XSTAT(rx_management_packets), + HW_XSTAT(tx_management_packets), + HW_XSTAT(rx_management_dropped), ++ HW_XSTAT(rx_dma_drop), + + /* Basic Error */ ++ HW_XSTAT(rx_rdb_drop), + HW_XSTAT(rx_crc_errors), + HW_XSTAT(rx_illegal_byte_errors), + HW_XSTAT(rx_error_bytes), +@@ -192,7 +196,7 @@ static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { + HW_XSTAT(rx_length_errors), + HW_XSTAT(rx_undersize_errors), + HW_XSTAT(rx_fragment_errors), +- HW_XSTAT(rx_oversize_errors), ++ HW_XSTAT(rx_oversize_cnt), + HW_XSTAT(rx_jabber_errors), + HW_XSTAT(rx_l3_l4_xsum_error), + HW_XSTAT(mac_local_errors), +@@ -376,7 +380,7 @@ txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, if (hw->mac.type != txgbe_mac_raptor) return -ENOSYS; @@ -41774,7 +78534,27 @@ index 47d0e6ea40..49948e62bc 100644 return -EIO; PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", -@@ -1678,7 +1678,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1482,6 +1486,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) + return -EINVAL; + } + } ++ ++ /* ++ * When DCB/VT is off, maximum number of queues changes ++ */ ++ if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_NONE) { ++ if (nb_tx_q > TXGBE_NONE_MODE_TX_NB_QUEUES) { ++ PMD_INIT_LOG(ERR, ++ "Neither VT nor DCB are enabled, " ++ "nb_tx_q > %d.", ++ TXGBE_NONE_MODE_TX_NB_QUEUES); ++ return -EINVAL; ++ } ++ } + } + return 0; + } +@@ -1678,7 +1695,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) return -ENOMEM; } } @@ -41783,7 +78563,64 @@ index 47d0e6ea40..49948e62bc 100644 txgbe_configure_msix(dev); /* initialize transmission unit */ -@@ -1937,6 +1937,7 @@ txgbe_dev_set_link_up(struct rte_eth_dev *dev) +@@ -1774,6 +1791,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) + speed = (TXGBE_LINK_SPEED_100M_FULL | + TXGBE_LINK_SPEED_1GB_FULL | + TXGBE_LINK_SPEED_10GB_FULL); ++ hw->autoneg = true; + } else { + if (*link_speeds & RTE_ETH_LINK_SPEED_10G) + speed |= TXGBE_LINK_SPEED_10GB_FULL; +@@ -1785,6 +1803,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) + speed |= TXGBE_LINK_SPEED_1GB_FULL; + if (*link_speeds & RTE_ETH_LINK_SPEED_100M) + speed |= TXGBE_LINK_SPEED_100M_FULL; ++ hw->autoneg = false; + } + + err = hw->mac.setup_link(hw, speed, link_up); +@@ -1863,7 +1882,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + + if (hw->adapter_stopped) +- return 0; ++ goto out; + + PMD_INIT_FUNC_TRACE(); + +@@ -1882,14 +1901,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) + vfinfo[vf].clear_to_send = false; + +- if (hw->phy.media_type == txgbe_media_type_copper) { +- /* Turn off the copper */ +- hw->phy.set_phy_power(hw, false); +- } else { +- /* Turn off the laser */ +- hw->mac.disable_tx_laser(hw); +- } +- + txgbe_dev_clear_queues(dev); + + /* Clear stored conf */ +@@ -1920,6 +1931,16 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + dev->data->dev_started = 0; + hw->dev_start = false; + ++out: ++ /* close phy to prevent reset in dev_close from restarting physical link */ ++ if (hw->phy.media_type == txgbe_media_type_copper) { ++ /* Turn off the copper */ ++ hw->phy.set_phy_power(hw, false); ++ } else { ++ /* Turn off the laser */ ++ hw->mac.disable_tx_laser(hw); ++ } ++ + return 0; + } + +@@ -1937,6 +1958,7 @@ txgbe_dev_set_link_up(struct rte_eth_dev *dev) } else { /* Turn on the laser */ hw->mac.enable_tx_laser(hw); @@ -41791,7 +78628,7 @@ index 47d0e6ea40..49948e62bc 100644 txgbe_dev_link_update(dev, 0); } -@@ -1957,6 +1958,7 @@ txgbe_dev_set_link_down(struct rte_eth_dev *dev) +@@ -1957,6 +1979,7 @@ txgbe_dev_set_link_down(struct rte_eth_dev *dev) } else { /* Turn off the laser */ hw->mac.disable_tx_laser(hw); @@ -41799,7 +78636,28 @@ index 47d0e6ea40..49948e62bc 100644 txgbe_dev_link_update(dev, 0); } -@@ -2034,6 +2036,7 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -1977,6 +2000,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + txgbe_pf_reset_hw(hw); + + ret = txgbe_dev_stop(dev); +@@ -2005,8 +2031,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) + rte_delay_ms(100); + } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); + +- /* cancel the delay handler before remove dev */ ++ /* cancel all alarm handler before remove dev */ + rte_eal_alarm_cancel(txgbe_dev_interrupt_delayed_handler, dev); ++ rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev); + + /* uninitialize PF if max_vfs not zero */ + txgbe_pf_host_uninit(dev); +@@ -2034,6 +2061,7 @@ txgbe_dev_close(struct rte_eth_dev *dev) #ifdef RTE_LIB_SECURITY rte_free(dev->security_ctx); @@ -41807,7 +78665,76 @@ index 47d0e6ea40..49948e62bc 100644 #endif return ret; -@@ -3682,7 +3685,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, +@@ -2144,7 +2172,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw, + hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL); + hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL); + hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP); +- hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP); ++ hw_stats->rx_rdb_drop += rd32(hw, TXGBE_PBRXDROP); + + /* MAC Stats */ + hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL); +@@ -2176,7 +2204,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw, + rd64(hw, TXGBE_MACTX1024TOMAXL); + + hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL); +- hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE); ++ hw_stats->rx_oversize_cnt += rd32(hw, TXGBE_MACRXOVERSIZE); + hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER); + + /* MNG Stats */ +@@ -2298,8 +2326,7 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + hw_stats->rx_mac_short_packet_dropped + + hw_stats->rx_length_errors + + hw_stats->rx_undersize_errors + +- hw_stats->rx_oversize_errors + +- hw_stats->rx_drop_packets + ++ hw_stats->rx_rdb_drop + + hw_stats->rx_illegal_byte_errors + + hw_stats->rx_error_bytes + + hw_stats->rx_fragment_errors + +@@ -2759,6 +2786,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, + break; + } + ++ /* Re configure MAC RX */ ++ if (hw->mac.type == txgbe_mac_raptor) ++ wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_PROMISC, ++ TXGBE_MACRXFLT_PROMISC); ++ + return rte_eth_linkstatus_set(dev, &link); + } + +@@ -2935,9 +2967,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) + wr32(hw, TXGBE_PX_INTA, 1); + +- /* clear all cause mask */ +- txgbe_disable_intr(hw); +- + /* read-on-clear nic registers here */ + eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); +@@ -2960,6 +2989,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, + if (eicr & TXGBE_ICRMISC_GPIO) + intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; + ++ ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC] = 0; ++ + return 0; + } + +@@ -3129,7 +3160,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) + } + + /* restore original mask */ +- intr->mask_misc |= TXGBE_ICRMISC_LSC; ++ if (dev->data->dev_conf.intr_conf.lsc == 1) ++ intr->mask_misc |= TXGBE_ICRMISC_LSC; + + intr->mask = intr->mask_orig; + intr->mask_orig = 0; +@@ -3682,7 +3714,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, wr32(hw, TXGBE_IVARMISC, tmp); } else { /* rx or tx causes */ @@ -41816,7 +78743,7 @@ index 47d0e6ea40..49948e62bc 100644 idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); tmp &= ~(0xFF << idx); -@@ -4387,7 +4390,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev) +@@ -4387,7 +4419,7 @@ txgbe_timesync_disable(struct rte_eth_dev *dev) /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0); @@ -41825,6 +78752,18 @@ index 47d0e6ea40..49948e62bc 100644 wr32(hw, TXGBE_TSTIMEINC, 0); return 0; +diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.h b/dpdk/drivers/net/txgbe/txgbe_ethdev.h +index 262dbb5e38..edc3311e19 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ethdev.h ++++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.h +@@ -40,6 +40,7 @@ + /*Default value of Max Rx Queue*/ + #define TXGBE_MAX_RX_QUEUE_NUM 128 + #define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM ++#define TXGBE_NONE_MODE_TX_NB_QUEUES 64 + + #ifndef NBBY + #define NBBY 8 /* number of bits in a byte */ diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c index 84b960b8f9..f52cd8bc19 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c @@ -41856,6 +78795,86 @@ index 84b960b8f9..f52cd8bc19 100644 intr->flags |= TXGBE_FLAG_MAILBOX; /* To avoid compiler warnings set eicr to used. */ +diff --git a/dpdk/drivers/net/txgbe/txgbe_flow.c b/dpdk/drivers/net/txgbe/txgbe_flow.c +index 6d7fd18428..ac9e8605c1 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_flow.c ++++ b/dpdk/drivers/net/txgbe/txgbe_flow.c +@@ -1583,9 +1583,7 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused, + * value. So, we need not do anything for the not provided fields later. + */ + memset(rule, 0, sizeof(struct txgbe_fdir_rule)); +- memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask)); +- rule->mask.vlan_tci_mask = 0; +- rule->mask.flex_bytes_mask = 0; ++ memset(&rule->mask, 0, sizeof(struct txgbe_hw_fdir_mask)); + + /** + * The first not void item should be +@@ -1867,7 +1865,10 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused, + * as we must have a flow type. + */ + rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP; +- ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP]; ++ if (rule->input.flow_type & TXGBE_ATR_FLOW_TYPE_IPV6) ++ ptype = txgbe_ptype_table[TXGBE_PT_IPV6_TCP]; ++ else ++ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP]; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, +@@ -1931,7 +1932,10 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused, + * as we must have a flow type. + */ + rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP; +- ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP]; ++ if (rule->input.flow_type & TXGBE_ATR_FLOW_TYPE_IPV6) ++ ptype = txgbe_ptype_table[TXGBE_PT_IPV6_UDP]; ++ else ++ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP]; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, +@@ -1990,7 +1994,10 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused, + * as we must have a flow type. + */ + rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP; +- ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP]; ++ if (rule->input.flow_type & TXGBE_ATR_FLOW_TYPE_IPV6) ++ ptype = txgbe_ptype_table[TXGBE_PT_IPV6_SCTP]; ++ else ++ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP]; + /*Not supported last point for range*/ + if (item->last) { + rte_flow_error_set(error, EINVAL, +@@ -2141,6 +2148,16 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused, + + rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype)); + ++ if (rule->input.flow_type & TXGBE_ATR_FLOW_TYPE_IPV6) { ++ if (rule->input.flow_type & TXGBE_ATR_L4TYPE_MASK) ++ rule->input.pkt_type &= 0xFFFF; ++ else ++ rule->input.pkt_type &= 0xF8FF; ++ ++ rule->input.flow_type &= TXGBE_ATR_L3TYPE_MASK | ++ TXGBE_ATR_L4TYPE_MASK; ++ } ++ + return txgbe_parse_fdir_act_attr(attr, actions, rule, error); + } + +@@ -2827,8 +2844,10 @@ txgbe_flow_create(struct rte_eth_dev *dev, + ret = memcmp(&fdir_info->mask, + &fdir_rule.mask, + sizeof(struct txgbe_hw_fdir_mask)); +- if (ret) ++ if (ret) { ++ PMD_DRV_LOG(ERR, "only support one global mask"); + goto out; ++ } + + if (fdir_info->flex_bytes_offset != + fdir_rule.flex_bytes_offset) diff --git a/dpdk/drivers/net/txgbe/txgbe_ipsec.c b/dpdk/drivers/net/txgbe/txgbe_ipsec.c index 445733f3ba..3ca3d85ed5 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ipsec.c @@ -41909,11 +78928,538 @@ index 30be287330..0b82fb1a88 100644 hw->mac.clear_vmdq(hw, 0, BIT_MASK32); /* clear VMDq map to scan rar 127 */ +diff --git a/dpdk/drivers/net/txgbe/txgbe_ptypes.c b/dpdk/drivers/net/txgbe/txgbe_ptypes.c +index 0ed757d820..f9da4e4ceb 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ptypes.c ++++ b/dpdk/drivers/net/txgbe/txgbe_ptypes.c +@@ -320,8 +320,6 @@ txgbe_encode_ptype_tunnel(u32 ptype) + ptid |= TXGBE_PTID_TUN_EI; + break; + case RTE_PTYPE_TUNNEL_GRE: +- ptid |= TXGBE_PTID_TUN_EIG; +- break; + case RTE_PTYPE_TUNNEL_VXLAN: + case RTE_PTYPE_TUNNEL_VXLAN_GPE: + case RTE_PTYPE_TUNNEL_NVGRE: +@@ -332,20 +330,6 @@ txgbe_encode_ptype_tunnel(u32 ptype) + return ptid; + } + +- switch (ptype & RTE_PTYPE_INNER_L2_MASK) { +- case RTE_PTYPE_INNER_L2_ETHER: +- ptid |= TXGBE_PTID_TUN_EIGM; +- break; +- case RTE_PTYPE_INNER_L2_ETHER_VLAN: +- ptid |= TXGBE_PTID_TUN_EIGMV; +- break; +- case RTE_PTYPE_INNER_L2_ETHER_QINQ: +- ptid |= TXGBE_PTID_TUN_EIGMV; +- break; +- default: +- break; +- } +- + switch (ptype & RTE_PTYPE_INNER_L3_MASK) { + case RTE_PTYPE_INNER_L3_IPV4: + case RTE_PTYPE_INNER_L3_IPV4_EXT: +diff --git a/dpdk/drivers/net/txgbe/txgbe_ptypes.h b/dpdk/drivers/net/txgbe/txgbe_ptypes.h +index fa6c347d53..6fa8147f05 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ptypes.h ++++ b/dpdk/drivers/net/txgbe/txgbe_ptypes.h +@@ -348,4 +348,9 @@ struct txgbe_nvgrehdr { + __be32 tni; + }; + ++struct txgbe_grehdr { ++ __be16 flags; ++ __be16 proto; ++}; ++ + #endif /* _TXGBE_PTYPE_H_ */ +diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c +index 35b77cb271..f85ec77dd5 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c ++++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c +@@ -516,20 +516,21 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) + return cmdtype; + } + +-static inline uint8_t +-tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) ++static inline uint32_t ++tx_desc_ol_flags_to_ptype(uint64_t oflags) + { ++ uint32_t ptype; + bool tun; + +- if (ptype) +- return txgbe_encode_ptype(ptype); +- + /* Only support flags in TXGBE_TX_OFFLOAD_MASK */ + tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK); + + /* L2 level */ + ptype = RTE_PTYPE_L2_ETHER; + if (oflags & RTE_MBUF_F_TX_VLAN) ++ ptype |= (tun ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN); ++ ++ if (oflags & RTE_MBUF_F_TX_QINQ) /* tunnel + QINQ is not supported */ + ptype |= RTE_PTYPE_L2_ETHER_VLAN; + + /* L3 level */ +@@ -571,7 +572,6 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + ptype |= RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_TUNNEL_GRE; +- ptype |= RTE_PTYPE_INNER_L2_ETHER; + break; + case RTE_MBUF_F_TX_TUNNEL_GENEVE: + ptype |= RTE_PTYPE_L2_ETHER | +@@ -587,6 +587,16 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + break; + } + ++ return ptype; ++} ++ ++static inline uint8_t ++tx_desc_ol_flags_to_ptid(uint64_t oflags) ++{ ++ uint32_t ptype; ++ ++ ptype = tx_desc_ol_flags_to_ptype(oflags); ++ + return txgbe_encode_ptype(ptype); + } + +@@ -694,22 +704,24 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) + static inline uint8_t + txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) + { +- uint64_t l2_none, l2_mac, l2_mac_vlan; ++ uint64_t l2_vxlan, l2_vxlan_mac, l2_vxlan_mac_vlan; ++ uint64_t l2_gre, l2_gre_mac, l2_gre_mac_vlan; + uint8_t ptid = 0; + +- if ((tx_pkt->ol_flags & (RTE_MBUF_F_TX_TUNNEL_VXLAN | +- RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) == 0) +- return ptid; ++ l2_vxlan = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); ++ l2_vxlan_mac = l2_vxlan + sizeof(struct rte_ether_hdr); ++ l2_vxlan_mac_vlan = l2_vxlan_mac + sizeof(struct rte_vlan_hdr); + +- l2_none = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); +- l2_mac = l2_none + sizeof(struct rte_ether_hdr); +- l2_mac_vlan = l2_mac + sizeof(struct rte_vlan_hdr); ++ l2_gre = sizeof(struct txgbe_grehdr); ++ l2_gre_mac = l2_gre + sizeof(struct rte_ether_hdr); ++ l2_gre_mac_vlan = l2_gre_mac + sizeof(struct rte_vlan_hdr); + +- if (tx_pkt->l2_len == l2_none) ++ if (tx_pkt->l2_len == l2_vxlan || tx_pkt->l2_len == l2_gre) + ptid = TXGBE_PTID_TUN_EIG; +- else if (tx_pkt->l2_len == l2_mac) ++ else if (tx_pkt->l2_len == l2_vxlan_mac || tx_pkt->l2_len == l2_gre_mac) + ptid = TXGBE_PTID_TUN_EIGM; +- else if (tx_pkt->l2_len == l2_mac_vlan) ++ else if (tx_pkt->l2_len == l2_vxlan_mac_vlan || ++ tx_pkt->l2_len == l2_gre_mac_vlan) + ptid = TXGBE_PTID_TUN_EIGMV; + + return ptid; +@@ -776,8 +788,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + /* If hardware offload required */ + tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; + if (tx_ol_req) { +- tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req, +- tx_pkt->packet_type); ++ tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req); + if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) + tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); + tx_offload.l2_len = tx_pkt->l2_len; +@@ -2795,6 +2806,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } ++ ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { +@@ -2804,6 +2817,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) + txgbe_rx_queue_release_mbufs(rxq); + txgbe_reset_rx_queue(adapter, rxq); + } ++ ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + } + +@@ -4382,7 +4397,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); +- buf_size = ROUND_UP(buf_size, 0x1 << 10); ++ buf_size = ROUND_DOWN(buf_size, 0x1 << 10); + srrctl |= TXGBE_RXCFG_PKTLEN(buf_size); + + wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl); +@@ -4994,6 +5009,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); ++ else ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; +@@ -5008,6 +5025,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) + } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); ++ else ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; + rte_wmb(); + wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1); + } diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c -index 070f0e6dfd..a280e788fb 100644 +index 070f0e6dfd..1306cc2935 100644 --- a/dpdk/drivers/net/vhost/rte_eth_vhost.c +++ b/dpdk/drivers/net/vhost/rte_eth_vhost.c -@@ -716,10 +716,11 @@ eth_vhost_install_intr(struct rte_eth_dev *dev) +@@ -97,8 +97,9 @@ struct vhost_queue { + uint16_t port; + uint16_t virtqueue_id; + struct vhost_stats stats; +- int intr_enable; + rte_spinlock_t intr_lock; ++ struct epoll_event ev; ++ int kickfd; + }; + + struct pmd_internal { +@@ -519,115 +520,68 @@ find_internal_resource(char *ifname) + return list; + } + +-static int ++static void + eth_vhost_update_intr(struct rte_eth_dev *eth_dev, uint16_t rxq_idx) + { +- struct rte_intr_handle *handle = eth_dev->intr_handle; +- struct rte_epoll_event rev, *elist; +- int epfd, ret; +- +- if (handle == NULL) +- return 0; ++ struct rte_vhost_vring vring; ++ struct vhost_queue *vq; + +- elist = rte_intr_elist_index_get(handle, rxq_idx); +- if (rte_intr_efds_index_get(handle, rxq_idx) == elist->fd) +- return 0; ++ vq = eth_dev->data->rx_queues[rxq_idx]; ++ if (vq == NULL || vq->vid < 0) ++ return; + +- VHOST_LOG(INFO, "kickfd for rxq-%d was changed, updating handler.\n", +- rxq_idx); ++ if (rte_vhost_get_vhost_vring(vq->vid, (rxq_idx << 1) + 1, &vring) < 0) { ++ VHOST_LOG(DEBUG, "Failed to get rxq-%d's vring, skip!\n", rxq_idx); ++ return; ++ } + +- if (elist->fd != -1) +- VHOST_LOG(ERR, "Unexpected previous kickfd value (Got %d, expected -1).\n", +- elist->fd); ++ rte_spinlock_lock(&vq->intr_lock); + +- /* +- * First remove invalid epoll event, and then install +- * the new one. May be solved with a proper API in the +- * future. +- */ +- epfd = elist->epfd; +- rev = *elist; +- ret = rte_epoll_ctl(epfd, EPOLL_CTL_DEL, rev.fd, +- elist); +- if (ret) { +- VHOST_LOG(ERR, "Delete epoll event failed.\n"); +- return ret; ++ /* Remove previous kickfd from proxy epoll */ ++ if (vq->kickfd >= 0 && vq->kickfd != vring.kickfd) { ++ if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) { ++ VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n", ++ vq->kickfd, rxq_idx, strerror(errno)); ++ } else { ++ VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n", ++ vq->kickfd, rxq_idx); ++ } ++ vq->kickfd = -1; + } + +- rev.fd = rte_intr_efds_index_get(handle, rxq_idx); +- if (rte_intr_elist_index_set(handle, rxq_idx, rev)) +- return -rte_errno; +- +- elist = rte_intr_elist_index_get(handle, rxq_idx); +- ret = rte_epoll_ctl(epfd, EPOLL_CTL_ADD, rev.fd, elist); +- if (ret) { +- VHOST_LOG(ERR, "Add epoll event failed.\n"); +- return ret; ++ /* Add new one, if valid */ ++ if (vq->kickfd != vring.kickfd && vring.kickfd >= 0) { ++ if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_ADD, vring.kickfd, &vq->ev) < 0) { ++ VHOST_LOG(ERR, "Failed to register %d in rxq-%d epoll: %s\n", ++ vring.kickfd, rxq_idx, strerror(errno)); ++ } else { ++ vq->kickfd = vring.kickfd; ++ VHOST_LOG(DEBUG, "Registered %d in rxq-%d epoll\n", ++ vq->kickfd, rxq_idx); ++ } + } + +- return 0; ++ rte_spinlock_unlock(&vq->intr_lock); + } + + static int + eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid) + { +- struct rte_vhost_vring vring; +- struct vhost_queue *vq; +- int old_intr_enable, ret = 0; +- +- vq = dev->data->rx_queues[qid]; +- if (!vq) { +- VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid); +- return -1; +- } +- +- rte_spinlock_lock(&vq->intr_lock); +- old_intr_enable = vq->intr_enable; +- vq->intr_enable = 1; +- ret = eth_vhost_update_intr(dev, qid); +- rte_spinlock_unlock(&vq->intr_lock); +- +- if (ret < 0) { +- VHOST_LOG(ERR, "Failed to update rxq%d's intr\n", qid); +- vq->intr_enable = old_intr_enable; +- return ret; +- } ++ struct vhost_queue *vq = dev->data->rx_queues[qid]; + +- ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); +- if (ret < 0) { +- VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid); +- return ret; +- } +- VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid); +- rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); +- rte_wmb(); ++ if (vq->vid >= 0) ++ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1); + +- return ret; ++ return 0; + } + + static int + eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid) + { +- struct rte_vhost_vring vring; +- struct vhost_queue *vq; +- int ret = 0; +- +- vq = dev->data->rx_queues[qid]; +- if (!vq) { +- VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid); +- return -1; +- } +- +- ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring); +- if (ret < 0) { +- VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid); +- return ret; +- } +- VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid); +- rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0); +- rte_wmb(); ++ struct vhost_queue *vq = dev->data->rx_queues[qid]; + +- vq->intr_enable = 0; ++ if (vq->vid >= 0) ++ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0); + + return 0; + } +@@ -638,6 +592,14 @@ eth_vhost_uninstall_intr(struct rte_eth_dev *dev) + struct rte_intr_handle *intr_handle = dev->intr_handle; + + if (intr_handle != NULL) { ++ int i; ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) { ++ int epoll_fd = rte_intr_efds_index_get(dev->intr_handle, i); ++ ++ if (epoll_fd >= 0) ++ close(epoll_fd); ++ } + rte_intr_vec_list_free(intr_handle); + rte_intr_instance_free(intr_handle); + } +@@ -647,79 +609,119 @@ eth_vhost_uninstall_intr(struct rte_eth_dev *dev) + static int + eth_vhost_install_intr(struct rte_eth_dev *dev) + { +- struct rte_vhost_vring vring; +- struct vhost_queue *vq; + int nb_rxq = dev->data->nb_rx_queues; +- int i; +- int ret; ++ struct vhost_queue *vq; + +- /* uninstall firstly if we are reconnecting */ +- if (dev->intr_handle != NULL) +- eth_vhost_uninstall_intr(dev); ++ int ret; ++ int i; + + dev->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE); + if (dev->intr_handle == NULL) { + VHOST_LOG(ERR, "Fail to allocate intr_handle\n"); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto error; ++ } ++ if (rte_intr_efd_counter_size_set(dev->intr_handle, 0)) { ++ ret = -rte_errno; ++ goto error; + } +- if (rte_intr_efd_counter_size_set(dev->intr_handle, sizeof(uint64_t))) +- return -rte_errno; + + if (rte_intr_vec_list_alloc(dev->intr_handle, NULL, nb_rxq)) { +- VHOST_LOG(ERR, +- "Failed to allocate memory for interrupt vector\n"); +- rte_intr_instance_free(dev->intr_handle); +- return -ENOMEM; ++ VHOST_LOG(ERR, "Failed to allocate memory for interrupt vector\n"); ++ ret = -ENOMEM; ++ goto error; + } + +- +- VHOST_LOG(INFO, "Prepare intr vec\n"); ++ VHOST_LOG(DEBUG, "Prepare intr vec\n"); + for (i = 0; i < nb_rxq; i++) { +- if (rte_intr_vec_list_index_set(dev->intr_handle, i, RTE_INTR_VEC_RXTX_OFFSET + i)) +- return -rte_errno; +- if (rte_intr_efds_index_set(dev->intr_handle, i, -1)) +- return -rte_errno; +- vq = dev->data->rx_queues[i]; +- if (!vq) { +- VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i); +- continue; +- } ++ int epoll_fd = epoll_create1(0); + +- ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring); +- if (ret < 0) { +- VHOST_LOG(INFO, +- "Failed to get rxq-%d's vring, skip!\n", i); +- continue; ++ if (epoll_fd < 0) { ++ VHOST_LOG(ERR, "Failed to create proxy epoll fd for rxq-%d\n", i); ++ ret = -errno; ++ goto error; + } + +- if (vring.kickfd < 0) { +- VHOST_LOG(INFO, +- "rxq-%d's kickfd is invalid, skip!\n", i); +- continue; ++ if (rte_intr_vec_list_index_set(dev->intr_handle, i, ++ RTE_INTR_VEC_RXTX_OFFSET + i) || ++ rte_intr_efds_index_set(dev->intr_handle, i, epoll_fd)) { ++ ret = -rte_errno; ++ close(epoll_fd); ++ goto error; + } + +- if (rte_intr_efds_index_set(dev->intr_handle, i, vring.kickfd)) +- continue; +- VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i); ++ vq = dev->data->rx_queues[i]; ++ memset(&vq->ev, 0, sizeof(vq->ev)); ++ vq->ev.events = EPOLLIN; ++ vq->ev.data.fd = epoll_fd; + } + +- if (rte_intr_nb_efd_set(dev->intr_handle, nb_rxq)) +- return -rte_errno; ++ if (rte_intr_nb_efd_set(dev->intr_handle, nb_rxq)) { ++ ret = -rte_errno; ++ goto error; ++ } ++ if (rte_intr_max_intr_set(dev->intr_handle, nb_rxq + 1)) { ++ ret = -rte_errno; ++ goto error; ++ } ++ if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_VDEV)) { ++ ret = -rte_errno; ++ goto error; ++ } + +- if (rte_intr_max_intr_set(dev->intr_handle, nb_rxq + 1)) +- return -rte_errno; ++ return 0; + +- if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_VDEV)) +- return -rte_errno; ++error: ++ eth_vhost_uninstall_intr(dev); ++ return ret; ++} + +- return 0; ++static void ++eth_vhost_configure_intr(struct rte_eth_dev *dev) ++{ ++ int i; ++ ++ VHOST_LOG(DEBUG, "Configure intr vec\n"); ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ eth_vhost_update_intr(dev, i); ++} ++ ++static void ++eth_vhost_unconfigure_intr(struct rte_eth_dev *eth_dev) ++{ ++ struct vhost_queue *vq; ++ int i; ++ ++ VHOST_LOG(DEBUG, "Unconfigure intr vec\n"); ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { ++ vq = eth_dev->data->rx_queues[i]; ++ if (vq == NULL || vq->vid < 0) ++ continue; ++ ++ rte_spinlock_lock(&vq->intr_lock); ++ ++ /* Remove previous kickfd from proxy epoll */ ++ if (vq->kickfd >= 0) { ++ if (epoll_ctl(vq->ev.data.fd, EPOLL_CTL_DEL, vq->kickfd, &vq->ev) < 0) { ++ VHOST_LOG(DEBUG, "Failed to unregister %d from rxq-%d epoll: %s\n", ++ vq->kickfd, i, strerror(errno)); ++ } else { ++ VHOST_LOG(DEBUG, "Unregistered %d from rxq-%d epoll\n", ++ vq->kickfd, i); ++ } ++ vq->kickfd = -1; ++ } ++ ++ rte_spinlock_unlock(&vq->intr_lock); ++ } } static void @@ -41926,7 +79472,7 @@ index 070f0e6dfd..a280e788fb 100644 unsigned int i; int allow_queuing = 1; -@@ -730,13 +731,18 @@ update_queuing_status(struct rte_eth_dev *dev) +@@ -730,13 +732,18 @@ update_queuing_status(struct rte_eth_dev *dev) rte_atomic32_read(&internal->dev_attached) == 0) allow_queuing = 0; @@ -41947,7 +79493,7 @@ index 070f0e6dfd..a280e788fb 100644 rte_pause(); } -@@ -744,8 +750,11 @@ update_queuing_status(struct rte_eth_dev *dev) +@@ -744,8 +751,11 @@ update_queuing_status(struct rte_eth_dev *dev) vq = dev->data->tx_queues[i]; if (vq == NULL) continue; @@ -41961,7 +79507,26 @@ index 070f0e6dfd..a280e788fb 100644 rte_pause(); } } -@@ -827,7 +836,7 @@ new_device(int vid) +@@ -807,16 +817,8 @@ new_device(int vid) + internal->vid = vid; + if (rte_atomic32_read(&internal->started) == 1) { + queue_setup(eth_dev, internal); +- +- if (dev_conf->intr_conf.rxq) { +- if (eth_vhost_install_intr(eth_dev) < 0) { +- VHOST_LOG(INFO, +- "Failed to install interrupt handler."); +- return -1; +- } +- } +- } else { +- VHOST_LOG(INFO, "RX/TX queues not exist yet\n"); ++ if (dev_conf->intr_conf.rxq) ++ eth_vhost_configure_intr(eth_dev); + } + + for (i = 0; i < rte_vhost_get_vring_num(vid); i++) +@@ -827,7 +829,7 @@ new_device(int vid) eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; rte_atomic32_set(&internal->dev_attached, 1); @@ -41970,16 +79535,85 @@ index 070f0e6dfd..a280e788fb 100644 VHOST_LOG(INFO, "Vhost device %d created\n", vid); -@@ -857,7 +866,7 @@ destroy_device(int vid) +@@ -857,7 +859,8 @@ destroy_device(int vid) internal = eth_dev->data->dev_private; rte_atomic32_set(&internal->dev_attached, 0); - update_queuing_status(eth_dev); + update_queuing_status(eth_dev, true); ++ eth_vhost_unconfigure_intr(eth_dev); eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; -@@ -967,6 +976,8 @@ vring_state_changed(int vid, uint16_t vring, int enable) +@@ -886,55 +889,10 @@ destroy_device(int vid) + rte_spinlock_unlock(&state->lock); + + VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid); +- eth_vhost_uninstall_intr(eth_dev); + + rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } + +-static int +-vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id) +-{ +- struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; +- struct pmd_internal *internal = eth_dev->data->dev_private; +- struct vhost_queue *vq; +- struct rte_vhost_vring vring; +- int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1; +- int ret = 0; +- +- /* +- * The vring kickfd may be changed after the new device notification. +- * Update it when the vring state is updated. +- */ +- if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues && +- rte_atomic32_read(&internal->dev_attached) && +- rte_atomic32_read(&internal->started) && +- dev_conf->intr_conf.rxq) { +- ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring); +- if (ret) { +- VHOST_LOG(ERR, "Failed to get vring %d information.\n", +- vring_id); +- return ret; +- } +- +- if (rte_intr_efds_index_set(eth_dev->intr_handle, rx_idx, +- vring.kickfd)) +- return -rte_errno; +- +- vq = eth_dev->data->rx_queues[rx_idx]; +- if (!vq) { +- VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx); +- return -1; +- } +- +- rte_spinlock_lock(&vq->intr_lock); +- if (vq->intr_enable) +- ret = eth_vhost_update_intr(eth_dev, rx_idx); +- rte_spinlock_unlock(&vq->intr_lock); +- } +- +- return ret; +-} +- + static int + vring_state_changed(int vid, uint16_t vring, int enable) + { +@@ -954,9 +912,8 @@ vring_state_changed(int vid, uint16_t vring, int enable) + /* won't be NULL */ + state = vring_states[eth_dev->data->port_id]; + +- if (enable && vring_conf_update(vid, eth_dev, vring)) +- VHOST_LOG(INFO, "Failed to update vring-%d configuration.\n", +- (int)vring); ++ if (eth_dev->data->dev_conf.intr_conf.rxq && vring % 2) ++ eth_vhost_update_intr(eth_dev, (vring - 1) >> 1); + + rte_spinlock_lock(&state->lock); + if (state->cur[vring] == enable) { +@@ -967,6 +924,8 @@ vring_state_changed(int vid, uint16_t vring, int enable) state->max_vring = RTE_MAX(vring, state->max_vring); rte_spinlock_unlock(&state->lock); @@ -41988,25 +79622,88 @@ index 070f0e6dfd..a280e788fb 100644 VHOST_LOG(INFO, "vring%u is %s\n", vring, enable ? "enabled" : "disabled"); -@@ -1152,7 +1163,7 @@ eth_dev_start(struct rte_eth_dev *eth_dev) +@@ -1138,21 +1097,26 @@ eth_dev_start(struct rte_eth_dev *eth_dev) + { + struct pmd_internal *internal = eth_dev->data->dev_private; + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; ++ uint16_t i; + +- queue_setup(eth_dev, internal); +- +- if (rte_atomic32_read(&internal->dev_attached) == 1) { +- if (dev_conf->intr_conf.rxq) { +- if (eth_vhost_install_intr(eth_dev) < 0) { +- VHOST_LOG(INFO, +- "Failed to install interrupt handler."); +- return -1; +- } +- } ++ eth_vhost_uninstall_intr(eth_dev); ++ if (dev_conf->intr_conf.rxq && eth_vhost_install_intr(eth_dev) < 0) { ++ VHOST_LOG(ERR, "Failed to install interrupt handler.\n"); ++ return -1; } ++ queue_setup(eth_dev, internal); ++ if (rte_atomic32_read(&internal->dev_attached) == 1 && ++ dev_conf->intr_conf.rxq) ++ eth_vhost_configure_intr(eth_dev); ++ rte_atomic32_set(&internal->started, 1); - update_queuing_status(eth_dev); + update_queuing_status(eth_dev, false); ++ ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } -@@ -1164,7 +1175,7 @@ eth_dev_stop(struct rte_eth_dev *dev) +@@ -1161,10 +1125,16 @@ static int + eth_dev_stop(struct rte_eth_dev *dev) + { + struct pmd_internal *internal = dev->data->dev_private; ++ uint16_t i; dev->data->dev_started = 0; rte_atomic32_set(&internal->started, 0); - update_queuing_status(dev); + update_queuing_status(dev, true); ++ ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } -@@ -1643,11 +1654,11 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev) +@@ -1205,6 +1175,8 @@ eth_dev_close(struct rte_eth_dev *dev) + rte_free(internal->iface_name); + rte_free(internal); + ++ eth_vhost_uninstall_intr(dev); ++ + dev->data->dev_private = NULL; + + rte_free(vring_states[dev->data->port_id]); +@@ -1232,6 +1204,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + vq->mb_pool = mb_pool; + vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ; + rte_spinlock_init(&vq->intr_lock); ++ vq->kickfd = -1; + dev->data->rx_queues[rx_queue_id] = vq; + + return 0; +@@ -1254,6 +1227,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + + vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ; + rte_spinlock_init(&vq->intr_lock); ++ vq->kickfd = -1; + dev->data->tx_queues[tx_queue_id] = vq; + + return 0; +@@ -1643,11 +1617,11 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev) &open_int, &tso); if (ret < 0) goto out_free; @@ -42042,10 +79739,74 @@ index 01a333ada2..d78b8278c6 100644 endif endif diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c -index c2588369b2..d180162abd 100644 +index c2588369b2..bcad27817e 100644 --- a/dpdk/drivers/net/virtio/virtio_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_ethdev.c -@@ -2028,7 +2028,8 @@ virtio_dev_rss_hash_update(struct rte_eth_dev *dev, +@@ -638,10 +638,13 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx) + hw->cvq = cvq; + } + +- if (hw->use_va) ++ if (hw->use_va) { + vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr); +- else ++ vq->mbuf_addr_mask = UINTPTR_MAX; ++ } else { + vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova); ++ vq->mbuf_addr_mask = UINT64_MAX; ++ } + + if (queue_type == VTNET_TQ) { + struct virtio_tx_region *txr; +@@ -1796,22 +1799,25 @@ static int + virtio_configure_intr(struct rte_eth_dev *dev) + { + struct virtio_hw *hw = dev->data->dev_private; ++ int ret; + + if (!rte_intr_cap_multiple(dev->intr_handle)) { + PMD_INIT_LOG(ERR, "Multiple intr vector not supported"); + return -ENOTSUP; + } + +- if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) { ++ ret = rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "Fail to create eventfd"); +- return -1; ++ return ret; + } + +- if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", +- hw->max_queue_pairs)) { ++ ret = rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", ++ hw->max_queue_pairs); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors", + hw->max_queue_pairs); +- return -ENOMEM; ++ return ret; + } + + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { +@@ -1832,12 +1838,13 @@ virtio_configure_intr(struct rte_eth_dev *dev) + */ + if (virtio_intr_enable(dev) < 0) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); +- return -1; ++ return -EINVAL; + } + +- if (virtio_queues_bind_intr(dev) < 0) { ++ ret = virtio_queues_bind_intr(dev); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt"); +- return -1; ++ return ret; + } + + return 0; +@@ -2028,7 +2035,8 @@ virtio_dev_rss_hash_update(struct rte_eth_dev *dev, return 0; restore_key: @@ -42055,16 +79816,122 @@ index c2588369b2..d180162abd 100644 restore_types: hw->rss_hash_types = old_hash_types; -@@ -2657,7 +2658,7 @@ virtio_dev_configure(struct rte_eth_dev *dev) +@@ -2159,7 +2167,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) + eth_dev->device->numa_node); + if (!hw->rss_key) { + PMD_INIT_LOG(ERR, "Failed to allocate RSS key"); +- return -1; ++ return -ENOMEM; + } + } + +@@ -2181,7 +2189,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) + eth_dev->device->numa_node); + if (!hw->rss_reta) { + PMD_INIT_LOG(ERR, "Failed to allocate RSS reta"); +- return -1; ++ return -ENOMEM; + } + + hw->rss_rx_queues = 0; +@@ -2221,7 +2229,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + /* Tell the host we've known how to drive the device. */ + virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); + if (virtio_ethdev_negotiate_features(hw, req_features) < 0) +- return -1; ++ return -EINVAL; + + hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM); + +@@ -2303,7 +2311,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + if (config->mtu < RTE_ETHER_MIN_MTU) { + PMD_INIT_LOG(ERR, "invalid max MTU value (%u)", + config->mtu); +- return -1; ++ return -EINVAL; + } + + hw->max_mtu = config->mtu; +@@ -2316,9 +2324,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + } + + hw->rss_hash_types = 0; +- if (virtio_with_feature(hw, VIRTIO_NET_F_RSS)) +- if (virtio_dev_rss_init(eth_dev)) +- return -1; ++ if (virtio_with_feature(hw, VIRTIO_NET_F_RSS)) { ++ ret = virtio_dev_rss_init(eth_dev); ++ if (ret < 0) ++ return ret; ++ } + + PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d", + config->max_virtqueue_pairs); +@@ -2340,13 +2350,22 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + return ret; + + if (eth_dev->data->dev_conf.intr_conf.rxq) { +- if (virtio_configure_intr(eth_dev) < 0) { ++ ret = virtio_configure_intr(eth_dev); ++ if (ret < 0) { + PMD_INIT_LOG(ERR, "failed to configure interrupt"); + virtio_free_queues(hw); +- return -1; ++ return ret; + } + } + ++ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) ++ /* Enable vector (0) for Link State Interrupt */ ++ if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) == ++ VIRTIO_MSI_NO_VECTOR) { ++ PMD_DRV_LOG(ERR, "failed to set config vector"); ++ return -EBUSY; ++ } ++ + virtio_reinit_complete(hw); + + return 0; +@@ -2455,6 +2474,9 @@ virtio_dev_speed_capa_get(uint32_t speed) + static int vectorized_check_handler(__rte_unused const char *key, + const char *value, void *ret_val) + { ++ if (value == NULL || ret_val == NULL) ++ return -EINVAL; ++ + if (strcmp(value, "1") == 0) + *(int *)ret_val = 1; + else +@@ -2615,6 +2637,13 @@ virtio_dev_configure(struct rte_eth_dev *dev) + return ret; + } + ++ /* if queues are not allocated, reinit the device */ ++ if (hw->vqs == NULL) { ++ ret = virtio_init_device(dev, hw->req_guest_features); ++ if (ret < 0) ++ return ret; ++ } ++ + if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) && + !virtio_with_feature(hw, VIRTIO_NET_F_RSS)) { + PMD_DRV_LOG(ERR, "RSS support requested but not supported by the device"); +@@ -2656,14 +2685,6 @@ virtio_dev_configure(struct rte_eth_dev *dev) + hw->has_tx_offload = tx_offload_enabled(hw); hw->has_rx_offload = rx_offload_enabled(hw); - if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) +- if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - /* Enable vector (0) for Link State Intrerrupt */ -+ /* Enable vector (0) for Link State Interrupt */ - if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) == - VIRTIO_MSI_NO_VECTOR) { - PMD_DRV_LOG(ERR, "failed to set config vector"); -@@ -2775,7 +2776,7 @@ virtio_dev_start(struct rte_eth_dev *dev) +- if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) == +- VIRTIO_MSI_NO_VECTOR) { +- PMD_DRV_LOG(ERR, "failed to set config vector"); +- return -EBUSY; +- } +- + if (virtio_with_packed_queue(hw)) { + #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT) + if ((hw->use_vec_rx || hw->use_vec_tx) && +@@ -2775,7 +2796,7 @@ virtio_dev_start(struct rte_eth_dev *dev) } } @@ -42073,11 +79940,66 @@ index c2588369b2..d180162abd 100644 * in device configure, but it could be unmapped when device is * stopped. */ +@@ -2834,6 +2855,11 @@ virtio_dev_start(struct rte_eth_dev *dev) + set_rxtx_funcs(dev); + hw->started = 1; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + /* Initialize Link state */ + virtio_dev_link_update(dev, 0); + +@@ -2923,6 +2949,7 @@ virtio_dev_stop(struct rte_eth_dev *dev) + struct virtio_hw *hw = dev->data->dev_private; + struct rte_eth_link link; + struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; ++ uint16_t i; + + PMD_INIT_LOG(DEBUG, "stop"); + dev->data->dev_started = 0; +@@ -2950,6 +2977,11 @@ virtio_dev_stop(struct rte_eth_dev *dev) + out_unlock: + rte_spinlock_unlock(&hw->state_lock); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + diff --git a/dpdk/drivers/net/virtio/virtio_pci.c b/dpdk/drivers/net/virtio/virtio_pci.c -index 182cfc9eae..632451dcbe 100644 +index 182cfc9eae..46b9ba6682 100644 --- a/dpdk/drivers/net/virtio/virtio_pci.c +++ b/dpdk/drivers/net/virtio/virtio_pci.c -@@ -235,7 +235,7 @@ legacy_get_isr(struct virtio_hw *hw) +@@ -33,22 +33,6 @@ + + struct virtio_pci_internal virtio_pci_internal[RTE_MAX_ETHPORTS]; + +-static inline int +-check_vq_phys_addr_ok(struct virtqueue *vq) +-{ +- /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, +- * and only accepts 32 bit page frame number. +- * Check if the allocated physical memory exceeds 16TB. +- */ +- if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> +- (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { +- PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); +- return 0; +- } +- +- return 1; +-} +- + #define PCI_MSIX_ENABLE 0x8000 + + static enum virtio_msix_status +@@ -235,7 +219,7 @@ legacy_get_isr(struct virtio_hw *hw) return dst; } @@ -42086,8 +80008,35 @@ index 182cfc9eae..632451dcbe 100644 static uint16_t legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) { +@@ -273,8 +257,15 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) + { + uint32_t src; + +- if (!check_vq_phys_addr_ok(vq)) ++ /* Virtio PCI device VIRTIO_PCI_QUEUE_PFN register is 32bit, ++ * and only accepts 32 bit page frame number. ++ * Check if the allocated physical memory exceeds 16TB. ++ */ ++ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> ++ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { ++ PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); + return -1; ++ } + + rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_SEL); +@@ -476,9 +467,6 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) + uint64_t desc_addr, avail_addr, used_addr; + uint16_t notify_off; + +- if (!check_vq_phys_addr_ok(vq)) +- return -1; +- + desc_addr = vq->vq_ring_mem; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, diff --git a/dpdk/drivers/net/virtio/virtio_pci_ethdev.c b/dpdk/drivers/net/virtio/virtio_pci_ethdev.c -index 54645dc62e..1f6bdeddda 100644 +index 54645dc62e..f786ac2c9e 100644 --- a/dpdk/drivers/net/virtio/virtio_pci_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_pci_ethdev.c @@ -122,10 +122,20 @@ static int @@ -42112,11 +80061,67 @@ index 54645dc62e..1f6bdeddda 100644 ret = virtio_dev_stop(eth_dev); virtio_dev_close(eth_dev); +@@ -138,6 +148,9 @@ eth_virtio_pci_uninit(struct rte_eth_dev *eth_dev) + static int vdpa_check_handler(__rte_unused const char *key, + const char *value, void *ret_val) + { ++ if (value == NULL || ret_val == NULL) ++ return -EINVAL; ++ + if (strcmp(value, "1") == 0) + *(int *)ret_val = 1; + else diff --git a/dpdk/drivers/net/virtio/virtio_rxtx.c b/dpdk/drivers/net/virtio/virtio_rxtx.c -index 2e115ded02..4795893ec7 100644 +index 2e115ded02..14a8224109 100644 --- a/dpdk/drivers/net/virtio/virtio_rxtx.c +++ b/dpdk/drivers/net/virtio/virtio_rxtx.c -@@ -814,7 +814,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -404,29 +404,36 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) + if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + + m->l4_len)) { + struct rte_ipv4_hdr *iph; +- struct rte_ipv6_hdr *ip6h; + struct rte_tcp_hdr *th; +- uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; ++ uint16_t prev_cksum, new_cksum; ++ uint32_t ip_paylen; + uint32_t tmp; + + iph = rte_pktmbuf_mtod_offset(m, + struct rte_ipv4_hdr *, m->l2_len); + th = RTE_PTR_ADD(iph, m->l3_len); ++ ++ /* ++ * Calculate IPv4 header checksum with current total length value ++ * (whatever it is) to have correct checksum after update on edits ++ * done by TSO. ++ */ + if ((iph->version_ihl >> 4) == 4) { + iph->hdr_checksum = 0; + iph->hdr_checksum = rte_ipv4_cksum(iph); +- ip_len = iph->total_length; +- ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - +- m->l3_len); +- } else { +- ip6h = (struct rte_ipv6_hdr *)iph; +- ip_paylen = ip6h->payload_len; + } + ++ /* ++ * Do not use IPv4 total length and IPv6 payload length fields to get ++ * TSO payload length since it could not fit into 16 bits. ++ */ ++ ip_paylen = rte_cpu_to_be_32(rte_pktmbuf_pkt_len(m) - m->l2_len - ++ m->l3_len); ++ + /* calculate the new phdr checksum not including ip_paylen */ + prev_cksum = th->cksum; + tmp = prev_cksum; +- tmp += ip_paylen; ++ tmp += (ip_paylen & 0xffff) + (ip_paylen >> 16); + tmp = (tmp & 0xffff) + (tmp >> 16); + new_cksum = tmp; + +@@ -814,7 +821,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id __rte_unused, const struct rte_eth_txconf *tx_conf) { @@ -42125,7 +80130,7 @@ index 2e115ded02..4795893ec7 100644 struct virtio_hw *hw = dev->data->dev_private; struct virtqueue *vq = hw->vqs[vq_idx]; struct virtnet_tx *txvq; -@@ -858,7 +858,7 @@ int +@@ -858,7 +865,7 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) { @@ -42134,7 +80139,7 @@ index 2e115ded02..4795893ec7 100644 struct virtio_hw *hw = dev->data->dev_private; struct virtqueue *vq = hw->vqs[vq_idx]; -@@ -962,7 +962,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) +@@ -962,7 +969,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) return -EINVAL; } @@ -42357,7 +80362,7 @@ index cc830a660f..77820bf967 100644 dev->net_status &= (~VIRTIO_NET_S_LINK_UP); if (virtio_user_dev_server_reconnect(dev) >= 0) diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c -index 35aa76b1ff..f9cada05e4 100644 +index 35aa76b1ff..4044774ea0 100644 --- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -417,7 +417,7 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev) @@ -42369,11 +80374,51 @@ index 35aa76b1ff..f9cada05e4 100644 return -rte_errno; } +@@ -585,11 +585,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + dev->frontend_features = 0; + dev->unsupported_features = 0; + dev->backend_type = backend_type; +- +- if (*ifname) { +- dev->ifname = *ifname; +- *ifname = NULL; +- } ++ dev->ifname = *ifname; + + if (virtio_user_dev_setup(dev) < 0) { + PMD_INIT_LOG(ERR, "(%s) backend set up fails", dev->path); +@@ -662,6 +658,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + } + } + ++ *ifname = NULL; + return 0; + } + diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c -index 0271098f0d..16eca2f940 100644 +index 0271098f0d..9ba4ea3b33 100644 --- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c -@@ -666,6 +666,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev) +@@ -89,10 +89,15 @@ virtio_user_set_status(struct virtio_hw *hw, uint8_t status) + if (status & VIRTIO_CONFIG_STATUS_FEATURES_OK && + ~old_status & VIRTIO_CONFIG_STATUS_FEATURES_OK) + virtio_user_dev_set_features(dev); +- if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) +- virtio_user_start_device(dev); +- else if (status == VIRTIO_CONFIG_STATUS_RESET) ++ ++ if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) { ++ if (virtio_user_start_device(dev)) { ++ virtio_user_dev_update_status(dev); ++ return; ++ } ++ } else if (status == VIRTIO_CONFIG_STATUS_RESET) { + virtio_user_reset(hw); ++ } + + virtio_user_dev_set_status(dev, status); + } +@@ -666,6 +671,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev) /* previously called by pci probing for physical dev */ if (eth_virtio_dev_init(eth_dev) < 0) { PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); @@ -42395,10 +80440,41 @@ index 65bf792eb0..c98d696e62 100644 struct rte_mbuf * virtqueue_detach_unused(struct virtqueue *vq) diff --git a/dpdk/drivers/net/virtio/virtqueue.h b/dpdk/drivers/net/virtio/virtqueue.h -index 855f57a956..99c68cf622 100644 +index 855f57a956..39288c13f8 100644 --- a/dpdk/drivers/net/virtio/virtqueue.h +++ b/dpdk/drivers/net/virtio/virtqueue.h -@@ -227,7 +227,7 @@ struct virtio_net_ctrl_rss { +@@ -113,17 +113,26 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, + + #define VIRTQUEUE_MAX_NAME_SZ 32 + ++#ifdef RTE_ARCH_32 ++#define VIRTIO_MBUF_ADDR_MASK(vq) ((vq)->mbuf_addr_mask) ++#else ++#define VIRTIO_MBUF_ADDR_MASK(vq) UINT64_MAX ++#endif ++ + /** + * Return the IOVA (or virtual address in case of virtio-user) of mbuf + * data buffer. + * + * The address is firstly casted to the word size (sizeof(uintptr_t)) +- * before casting it to uint64_t. This is to make it work with different +- * combination of word size (64 bit and 32 bit) and virtio device +- * (virtio-pci and virtio-user). ++ * before casting it to uint64_t. It is then masked with the expected ++ * address length (64 bits for virtio-pci, word size for virtio-user). ++ * ++ * This is to make it work with different combination of word size (64 ++ * bit and 32 bit) and virtio device (virtio-pci and virtio-user). + */ + #define VIRTIO_MBUF_ADDR(mb, vq) \ +- ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset))) ++ ((*(uint64_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)) & \ ++ VIRTIO_MBUF_ADDR_MASK(vq)) + + /** + * Return the physical address (or virtual address in case of +@@ -227,7 +236,7 @@ struct virtio_net_ctrl_rss { * Control link announce acknowledgement * * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that @@ -42407,7 +80483,15 @@ index 855f57a956..99c68cf622 100644 * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives * this command. */ -@@ -312,7 +312,7 @@ struct virtqueue { +@@ -297,6 +306,7 @@ struct virtqueue { + void *vq_ring_virt_mem; /**< linear address of vring*/ + unsigned int vq_ring_size; + uint16_t mbuf_addr_offset; ++ uint64_t mbuf_addr_mask; + + union { + struct virtnet_rx rxq; +@@ -312,7 +322,7 @@ struct virtqueue { struct vq_desc_extra vq_descx[0]; }; @@ -42416,8 +80500,16 @@ index 855f57a956..99c68cf622 100644 #define VIRTIO_NET_CTRL_MQ 4 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 +@@ -775,6 +785,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + start_dp[idx].len = vq->hw->vtnet_hdr_size; ++ head_flags |= VRING_DESC_F_NEXT; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + idx++; + if (idx >= vq->vq_nentries) { diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c -index d1ef1cad08..d4aea74026 100644 +index d1ef1cad08..a48a355d39 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -822,6 +822,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) @@ -42432,6 +80524,145 @@ index d1ef1cad08..d4aea74026 100644 if (hw->intr.lsc_only) rqd->conf.intrIdx = 1; else +@@ -885,6 +890,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + { + int ret; + struct vmxnet3_hw *hw = dev->data->dev_private; ++ uint16_t i; + + PMD_INIT_FUNC_TRACE(); + +@@ -980,6 +986,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + */ + __vmxnet3_dev_link_update(dev, 0); + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; ++ + return VMXNET3_SUCCESS; + } + +@@ -992,6 +1003,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) + struct rte_eth_link link; + struct vmxnet3_hw *hw = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; ++ uint16_t i; + int ret; + + PMD_INIT_FUNC_TRACE(); +@@ -1047,6 +1059,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) + hw->adapter_stopped = 1; + dev->data->dev_started = 0; + ++ for (i = 0; i < dev->data->nb_rx_queues; i++) ++ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ for (i = 0; i < dev->data->nb_tx_queues; i++) ++ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; ++ + return 0; + } + +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c +index d745064bc4..a01f2c3cdd 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c +@@ -412,8 +412,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + + nb_tx = 0; + while (nb_tx < nb_pkts) { +- Vmxnet3_GenericDesc *gdesc; +- vmxnet3_buf_info_t *tbi; ++ Vmxnet3_GenericDesc *gdesc = NULL; ++ vmxnet3_buf_info_t *tbi = NULL; + uint32_t first2fill, avail, dw2; + struct rte_mbuf *txm = tx_pkts[nb_tx]; + struct rte_mbuf *m_seg = txm; +@@ -457,18 +457,18 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + continue; + } + ++ /* Skip empty packets */ ++ if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) { ++ txq->stats.drop_total++; ++ rte_pktmbuf_free(txm); ++ nb_tx++; ++ continue; ++ } ++ + if (txm->nb_segs == 1 && + rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) { + struct Vmxnet3_TxDataDesc *tdd; + +- /* Skip empty packets */ +- if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) { +- txq->stats.drop_total++; +- rte_pktmbuf_free(txm); +- nb_tx++; +- continue; +- } +- + tdd = (struct Vmxnet3_TxDataDesc *) + ((uint8 *)txq->data_ring.base + + txq->cmd_ring.next2fill * +@@ -481,6 +481,10 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; + first2fill = txq->cmd_ring.next2fill; + do { ++ /* Skip empty segments */ ++ if (unlikely(m_seg->data_len == 0)) ++ continue; ++ + /* Remember the transmit buffer for cleanup */ + tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill; + +@@ -490,10 +494,6 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill; + +- /* Skip empty segments */ +- if (unlikely(m_seg->data_len == 0)) +- continue; +- + if (copy_size) { + uint64 offset = + (uint64)txq->cmd_ring.next2fill * +@@ -514,6 +514,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + /* use the right gen for non-SOP desc */ + dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } while ((m_seg = m_seg->next) != NULL); ++ /* We must have executed the complete preceding loop at least ++ * once without skipping an empty segment, as we can't have ++ * a packet with only empty segments. ++ * Thus, tbi and gdesc have been initialized. ++ */ + + /* set the last buf_info for the pkt */ + tbi->m = txm; +@@ -1264,11 +1269,18 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev) + for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) { + /* Passing 0 as alloc_num will allocate full ring */ + ret = vmxnet3_post_rx_bufs(rxq, j); +- if (ret <= 0) { ++ ++ /* Zero number of descriptors in the configuration of the RX queue */ ++ if (ret == 0) { + PMD_INIT_LOG(ERR, +- "ERROR: Posting Rxq: %d buffers ring: %d", +- i, j); +- return -ret; ++ "Invalid configuration in Rx queue: %d, buffers ring: %d\n", ++ i, j); ++ return -EINVAL; ++ } ++ /* Return the error number */ ++ if (ret < 0) { ++ PMD_INIT_LOG(ERR, "Posting Rxq: %d buffers ring: %d", i, j); ++ return ret; + } + /* + * Updating device with the index:next2fill to fill the diff --git a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c index de26d2aef3..ebc2cd5d0d 100644 --- a/dpdk/drivers/raw/dpaa2_qdma/dpaa2_qdma.c @@ -42484,6 +80715,28 @@ index dca1518a83..8f6203392b 100644 #define PORT_FEATURE_ID_UAFU FEATURE_ID_AFU /* +diff --git a/dpdk/drivers/raw/ifpga/base/opae_hw_api.c b/dpdk/drivers/raw/ifpga/base/opae_hw_api.c +index 11c9887c7f..45efe70473 100644 +--- a/dpdk/drivers/raw/ifpga/base/opae_hw_api.c ++++ b/dpdk/drivers/raw/ifpga/base/opae_hw_api.c +@@ -380,7 +380,7 @@ static pthread_mutex_t *opae_adapter_mutex_open(struct opae_adapter *adapter) + PROT_READ | PROT_WRITE, MAP_SHARED, + shm_id, 0); + adapter->lock = (pthread_mutex_t *)ptr; +- if (ptr) { ++ if (ptr != MAP_FAILED) { + dev_info(NULL, + "shared memory %s address is %p\n", + shm_name, ptr); +@@ -499,7 +499,7 @@ static void *opae_adapter_shm_alloc(struct opae_adapter *adapter) + adapter->shm.size = size; + adapter->shm.ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, + MAP_SHARED, shm_id, 0); +- if (adapter->shm.ptr) { ++ if (adapter->shm.ptr != MAP_FAILED) { + dev_info(NULL, + "shared memory %s address is %p\n", + shm_name, adapter->shm.ptr); diff --git a/dpdk/drivers/raw/ifpga/base/opae_spi.c b/dpdk/drivers/raw/ifpga/base/opae_spi.c index 9efeecb791..ca3d41fb92 100644 --- a/dpdk/drivers/raw/ifpga/base/opae_spi.c @@ -43609,6 +81862,186 @@ index cdf7667d5d..c9ff33aa59 100644 * for those db bits. * @peer_db_set: Set doorbell bit to generate peer interrupt for that bit. * @vector_bind: Bind vector source [intr] to msix vector [msix]. +diff --git a/dpdk/drivers/raw/skeleton/skeleton_rawdev.c b/dpdk/drivers/raw/skeleton/skeleton_rawdev.c +index 16ecae3d92..03b2019e99 100644 +--- a/dpdk/drivers/raw/skeleton/skeleton_rawdev.c ++++ b/dpdk/drivers/raw/skeleton/skeleton_rawdev.c +@@ -421,7 +421,7 @@ static int skeleton_rawdev_enqueue_bufs(struct rte_rawdev *dev, + * help in complex implementation which require more information than + * just an integer - for example, a queue-pair. + */ +- q_id = *((int *)context); ++ q_id = *((uint16_t *)context); + + for (i = 0; i < count; i++) + queue_buf[q_id].bufs[i] = buffers[i]->buf_addr; +@@ -443,7 +443,7 @@ static int skeleton_rawdev_dequeue_bufs(struct rte_rawdev *dev, + * help in complex implementation which require more information than + * just an integer - for example, a queue-pair. + */ +- q_id = *((int *)context); ++ q_id = *((uint16_t *)context); + + for (i = 0; i < count; i++) + buffers[i]->buf_addr = queue_buf[q_id].bufs[i]; +@@ -659,6 +659,8 @@ skeldev_get_selftest(const char *key __rte_unused, + void *opaque) + { + int *flag = opaque; ++ if (value == NULL || opaque == NULL) ++ return -EINVAL; + *flag = atoi(value); + return 0; + } +diff --git a/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c b/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c +index 484468eeb4..cad05ed60f 100644 +--- a/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c ++++ b/dpdk/drivers/raw/skeleton/skeleton_rawdev_test.c +@@ -368,42 +368,34 @@ static int + test_rawdev_enqdeq(void) + { + int ret; +- unsigned int count = 1; + uint16_t queue_id = 0; +- struct rte_rawdev_buf buffers[1]; +- struct rte_rawdev_buf *deq_buffers = NULL; +- +- buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3); +- if (!buffers[0].buf_addr) +- goto cleanup; +- snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d", ++ struct rte_rawdev_buf buffer; ++ struct rte_rawdev_buf *buffers[1]; ++ struct rte_rawdev_buf deq_buffer; ++ struct rte_rawdev_buf *deq_buffers[1]; ++ ++ buffers[0] = &buffer; ++ buffer.buf_addr = malloc(strlen(TEST_DEV_NAME) + 3); ++ if (!buffer.buf_addr) ++ return TEST_FAILED; ++ snprintf(buffer.buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d", + TEST_DEV_NAME, 0); + +- ret = rte_rawdev_enqueue_buffers(test_dev_id, +- (struct rte_rawdev_buf **)&buffers, +- count, &queue_id); +- RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count, ++ ret = rte_rawdev_enqueue_buffers(test_dev_id, buffers, ++ RTE_DIM(buffers), &queue_id); ++ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, RTE_DIM(buffers), + "Unable to enqueue buffers"); + +- deq_buffers = malloc(sizeof(struct rte_rawdev_buf) * count); +- if (!deq_buffers) +- goto cleanup; +- +- ret = rte_rawdev_dequeue_buffers(test_dev_id, +- (struct rte_rawdev_buf **)&deq_buffers, +- count, &queue_id); +- RTE_TEST_ASSERT_EQUAL((unsigned int)ret, count, ++ deq_buffers[0] = &deq_buffer; ++ ret = rte_rawdev_dequeue_buffers(test_dev_id, deq_buffers, ++ RTE_DIM(deq_buffers), &queue_id); ++ RTE_TEST_ASSERT_EQUAL((unsigned int)ret, RTE_DIM(buffers), + "Unable to dequeue buffers"); ++ RTE_TEST_ASSERT_EQUAL(deq_buffers[0]->buf_addr, buffers[0]->buf_addr, ++ "Did not retrieve expected object"); + +- if (deq_buffers) +- free(deq_buffers); +- ++ free(buffer.buf_addr); + return TEST_SUCCESS; +-cleanup: +- if (buffers[0].buf_addr) +- free(buffers[0].buf_addr); +- +- return TEST_FAILED; + } + + static void skeldev_test_run(int (*setup)(void), +diff --git a/dpdk/drivers/regex/mlx5/mlx5_regex.h b/dpdk/drivers/regex/mlx5/mlx5_regex.h +index 89495301ac..831eb153eb 100644 +--- a/dpdk/drivers/regex/mlx5/mlx5_regex.h ++++ b/dpdk/drivers/regex/mlx5/mlx5_regex.h +@@ -37,7 +37,7 @@ struct mlx5_regex_qp { + struct mlx5_regex_hw_qp *qps; /* Pointer to qp array. */ + uint16_t nb_obj; /* Number of qp objects. */ + struct mlx5_regex_cq cq; /* CQ struct. */ +- uint32_t free_qps; ++ uint64_t free_qps; + struct mlx5_regex_job *jobs; + struct ibv_mr *metadata; + struct ibv_mr *outputs; +diff --git a/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c b/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c +index 9a2db7e43f..32a9181fb3 100644 +--- a/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c ++++ b/dpdk/drivers/regex/mlx5/mlx5_regex_fastpath.c +@@ -174,8 +174,8 @@ send_doorbell(struct mlx5_regex_priv *priv, struct mlx5_regex_hw_qp *qp) + (MLX5_SEND_WQE_BB << (priv->has_umr ? 2 : 0)) + + (priv->has_umr ? MLX5_REGEX_UMR_WQE_SIZE : 0); + uint8_t *wqe = (uint8_t *)(uintptr_t)qp->qp_obj.wqes + wqe_offset; +- uint32_t actual_pi = (priv->has_umr ? (qp->db_pi * 4 + 3) : qp->db_pi) & +- MLX5_REGEX_MAX_WQE_INDEX; ++ uint32_t actual_pi = (priv->has_umr ? ((1 + qp->db_pi) * 4) : qp->db_pi) ++ & MLX5_REGEX_MAX_WQE_INDEX; + + /* Or the fm_ce_se instead of set, avoid the fence be cleared. */ + ((struct mlx5_wqe_ctrl_seg *)wqe)->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; +@@ -375,7 +375,7 @@ mlx5_regexdev_enqueue_gga(struct rte_regexdev *dev, uint16_t qp_id, + struct mlx5_regex_hw_qp *qp_obj; + size_t hw_qpid, nb_left = nb_ops, nb_desc; + +- while ((hw_qpid = ffs(queue->free_qps))) { ++ while ((hw_qpid = ffsll(queue->free_qps))) { + hw_qpid--; /* ffs returns 1 for bit 0 */ + qp_obj = &queue->qps[hw_qpid]; + nb_desc = get_free(qp_obj, priv->has_umr); +@@ -384,7 +384,7 @@ mlx5_regexdev_enqueue_gga(struct rte_regexdev *dev, uint16_t qp_id, + if (nb_desc > nb_left) + nb_desc = nb_left; + else +- queue->free_qps &= ~(1 << hw_qpid); ++ queue->free_qps &= ~(1ULL << hw_qpid); + prep_regex_umr_wqe_set(priv, queue, qp_obj, ops, + nb_desc); + send_doorbell(priv, qp_obj); +@@ -409,7 +409,7 @@ mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id, + struct mlx5_regex_hw_qp *qp_obj; + size_t hw_qpid, job_id, i = 0; + +- while ((hw_qpid = ffs(queue->free_qps))) { ++ while ((hw_qpid = ffsll(queue->free_qps))) { + hw_qpid--; /* ffs returns 1 for bit 0 */ + qp_obj = &queue->qps[hw_qpid]; + while (get_free(qp_obj, priv->has_umr)) { +@@ -423,7 +423,7 @@ mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id, + goto out; + } + } +- queue->free_qps &= ~(1 << hw_qpid); ++ queue->free_qps &= ~(1ULL << hw_qpid); + send_doorbell(priv, qp_obj); + } + +@@ -556,7 +556,7 @@ mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id, + cq->ci = (cq->ci + 1) & 0xffffff; + rte_wmb(); + cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->ci); +- queue->free_qps |= (1 << hw_qpid); ++ queue->free_qps |= (1ULL << hw_qpid); + } + + out: +@@ -595,7 +595,7 @@ setup_qps(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *queue) + (uintptr_t)job->output); + wqe += 64; + } +- queue->free_qps |= 1 << hw_qpid; ++ queue->free_qps |= 1ULL << hw_qpid; + } + } + diff --git a/dpdk/drivers/regex/mlx5/mlx5_rxp.c b/dpdk/drivers/regex/mlx5/mlx5_rxp.c index 5ead3ca341..51b6e71376 100644 --- a/dpdk/drivers/regex/mlx5/mlx5_rxp.c @@ -43622,6 +82055,23 @@ index 5ead3ca341..51b6e71376 100644 DRV_LOG(ERR, "can't allocate qps memory"); rte_errno = ENOMEM; return -rte_errno; +diff --git a/dpdk/drivers/regex/octeontx2/meson.build b/dpdk/drivers/regex/octeontx2/meson.build +index 3f81add5bf..c5c75ecb10 100644 +--- a/dpdk/drivers/regex/octeontx2/meson.build ++++ b/dpdk/drivers/regex/octeontx2/meson.build +@@ -8,10 +8,10 @@ if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + subdir_done() + endif + +-lib = cc.find_library('librxp_compiler', required: false) ++lib = cc.find_library('rxp_compiler', required: false) + if lib.found() + ext_deps += lib +- ext_deps += cc.find_library('libstdc++', required: true) ++ ext_deps += cc.find_library('stdc++', required: true) + cflags += ['-DREE_COMPILER_SDK'] + endif + diff --git a/dpdk/drivers/vdpa/ifc/base/ifcvf.c b/dpdk/drivers/vdpa/ifc/base/ifcvf.c index 721cb1da8a..f3c29f94b3 100644 --- a/dpdk/drivers/vdpa/ifc/base/ifcvf.c @@ -43705,7 +82155,7 @@ index 6aef25ea45..3d567695cc 100644 #define STATIC static diff --git a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c -index 3853c4cf7e..6a915b0d5e 100644 +index 3853c4cf7e..c9b10527cc 100644 --- a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -356,6 +356,8 @@ vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx) @@ -43717,6 +82167,82 @@ index 3853c4cf7e..6a915b0d5e 100644 irq_set = (struct vfio_irq_set *)irq_set_buf; irq_set->argsz = sizeof(irq_set_buf); +@@ -846,6 +848,8 @@ ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal) + vdpa_ifcvf_stop(internal); + vdpa_disable_vfio_intr(internal); + ++ rte_atomic32_set(&internal->running, 0); ++ + ret = rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false); + if (ret && ret != -ENOTSUP) + goto error; +@@ -898,7 +902,12 @@ ifcvf_dev_config(int vid) + internal = list->internal; + internal->vid = vid; + rte_atomic32_set(&internal->dev_attached, 1); +- update_datapath(internal); ++ if (update_datapath(internal) < 0) { ++ DRV_LOG(ERR, "failed to update datapath for vDPA device %s", ++ vdev->device->name); ++ rte_atomic32_set(&internal->dev_attached, 0); ++ return -1; ++ } + + if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0) + DRV_LOG(NOTICE, "vDPA (%s): software relay is used.", +@@ -940,7 +949,12 @@ ifcvf_dev_close(int vid) + internal->sw_fallback_running = false; + } else { + rte_atomic32_set(&internal->dev_attached, 0); +- update_datapath(internal); ++ if (update_datapath(internal) < 0) { ++ DRV_LOG(ERR, "failed to update datapath for vDPA device %s", ++ vdev->device->name); ++ internal->configured = 0; ++ return -1; ++ } + } + + internal->configured = 0; +@@ -1247,6 +1261,11 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + goto error; + } + internal->sw_lm = sw_fallback_lm; ++ if (!internal->sw_lm && !internal->hw.lm_cfg) { ++ DRV_LOG(ERR, "Device %s does not support HW assist live migration, please enable sw-live-migration!", ++ pci_dev->name); ++ goto error; ++ } + + internal->vdev = rte_vdpa_register_device(&pci_dev->device, &ifcvf_ops); + if (internal->vdev == NULL) { +@@ -1259,7 +1278,15 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + pthread_mutex_unlock(&internal_list_lock); + + rte_atomic32_set(&internal->started, 1); +- update_datapath(internal); ++ if (update_datapath(internal) < 0) { ++ DRV_LOG(ERR, "failed to update datapath %s", pci_dev->name); ++ rte_atomic32_set(&internal->started, 0); ++ rte_vdpa_unregister_device(internal->vdev); ++ pthread_mutex_lock(&internal_list_lock); ++ TAILQ_REMOVE(&internal_list, list, next); ++ pthread_mutex_unlock(&internal_list_lock); ++ goto error; ++ } + + rte_kvargs_free(kvlist); + return 0; +@@ -1288,7 +1315,8 @@ ifcvf_pci_remove(struct rte_pci_device *pci_dev) + + internal = list->internal; + rte_atomic32_set(&internal->started, 0); +- update_datapath(internal); ++ if (update_datapath(internal) < 0) ++ DRV_LOG(ERR, "failed to update datapath %s", pci_dev->name); + + rte_pci_unmap_device(internal->pdev); + rte_vfio_container_destroy(internal->vfio_container_fd); diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c index b9e84dd9bf..9c1c70037c 100644 --- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -43964,6 +82490,18 @@ index db971bad48..98d788d734 100644 DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.", priv->virtq_db_addr); } +diff --git a/dpdk/drivers/vdpa/sfc/meson.build b/dpdk/drivers/vdpa/sfc/meson.build +index b55f9cd691..933f3f18f3 100644 +--- a/dpdk/drivers/vdpa/sfc/meson.build ++++ b/dpdk/drivers/vdpa/sfc/meson.build +@@ -8,7 +8,6 @@ if ((arch_subdir != 'x86' or not dpdk_conf.get('RTE_ARCH_64')) and + reason = 'only supported on x86_64 and aarch64' + endif + +-fmt_name = 'sfc_vdpa' + extra_flags = [] + + foreach flag: extra_flags diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa.c index fccdd8c687..53f598facc 100644 --- a/dpdk/drivers/vdpa/sfc/sfc_vdpa.c @@ -44216,6 +82754,21 @@ index 1087b0dad1..4efebb3902 100644 cmdline_printf(cl, "Active_slaves:%d " "packets received:Tot:%d Arp:%d IPv4:%d\n", +diff --git a/dpdk/examples/cmdline/parse_obj_list.h b/dpdk/examples/cmdline/parse_obj_list.h +index 6516d3e2c2..1223ac1e8b 100644 +--- a/dpdk/examples/cmdline/parse_obj_list.h ++++ b/dpdk/examples/cmdline/parse_obj_list.h +@@ -12,8 +12,9 @@ + + #include <sys/queue.h> + #include <cmdline_parse.h> ++#include <cmdline_parse_string.h> + +-#define OBJ_NAME_LEN_MAX 64 ++#define OBJ_NAME_LEN_MAX sizeof(cmdline_fixed_string_t) + + struct object { + SLIST_ENTRY(object) next; diff --git a/dpdk/examples/distributor/main.c b/dpdk/examples/distributor/main.c index c681e237ea..8995806b4e 100644 --- a/dpdk/examples/distributor/main.c @@ -44371,6 +82924,132 @@ index d074acc905..dd576bcf77 100644 /* Start device. 8< */ ret = rte_eth_dev_start(portid); if (ret < 0) +diff --git a/dpdk/examples/ethtool/ethtool-app/ethapp.c b/dpdk/examples/ethtool/ethtool-app/ethapp.c +index 78e86534e8..435d266bbc 100644 +--- a/dpdk/examples/ethtool/ethtool-app/ethapp.c ++++ b/dpdk/examples/ethtool/ethtool-app/ethapp.c +@@ -49,6 +49,13 @@ struct pcmd_intintint_params { + uint16_t rx; + }; + ++struct pcmd_pause_params { ++ cmdline_fixed_string_t cmd; ++ uint16_t port; ++ cmdline_fixed_string_t mode; ++ cmdline_fixed_string_t autoneg; ++ cmdline_fixed_string_t an_status; ++}; + + /* Parameter-less commands */ + cmdline_parse_token_string_t pcmd_quit_token_cmd = +@@ -116,12 +123,18 @@ cmdline_parse_token_num_t pcmd_intintint_token_rx = + + /* Pause commands */ + cmdline_parse_token_string_t pcmd_pause_token_cmd = +- TOKEN_STRING_INITIALIZER(struct pcmd_intstr_params, cmd, "pause"); ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, cmd, "pause"); + cmdline_parse_token_num_t pcmd_pause_token_port = +- TOKEN_NUM_INITIALIZER(struct pcmd_intstr_params, port, RTE_UINT16); +-cmdline_parse_token_string_t pcmd_pause_token_opt = +- TOKEN_STRING_INITIALIZER(struct pcmd_intstr_params, +- opt, "all#tx#rx#none"); ++ TOKEN_NUM_INITIALIZER(struct pcmd_pause_params, port, RTE_UINT16); ++cmdline_parse_token_string_t pcmd_pause_token_mode = ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, ++ mode, "full#tx#rx#none"); ++cmdline_parse_token_string_t pcmd_pause_token_autoneg = ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, ++ autoneg, "autoneg"); ++cmdline_parse_token_string_t pcmd_pause_token_an_status = ++ TOKEN_STRING_INITIALIZER(struct pcmd_pause_params, ++ an_status, "on#off"); + + /* VLAN commands */ + cmdline_parse_token_string_t pcmd_vlan_token_cmd = +@@ -348,13 +361,12 @@ pcmd_module_eeprom_callback(void *ptr_params, + fclose(fp_eeprom); + } + +- + static void + pcmd_pause_callback(void *ptr_params, + __rte_unused struct cmdline *ctx, + void *ptr_data) + { +- struct pcmd_intstr_params *params = ptr_params; ++ struct pcmd_pause_params *params = ptr_params; + struct ethtool_pauseparam info; + int stat; + +@@ -366,39 +378,38 @@ pcmd_pause_callback(void *ptr_params, + stat = rte_ethtool_get_pauseparam(params->port, &info); + } else { + memset(&info, 0, sizeof(info)); +- if (strcasecmp("all", params->opt) == 0) { ++ if (strcasecmp("full", params->mode) == 0) { + info.tx_pause = 1; + info.rx_pause = 1; +- } else if (strcasecmp("tx", params->opt) == 0) { ++ } else if (strcasecmp("tx", params->mode) == 0) { + info.tx_pause = 1; + info.rx_pause = 0; +- } else if (strcasecmp("rx", params->opt) == 0) { ++ } else if (strcasecmp("rx", params->mode) == 0) { + info.tx_pause = 0; + info.rx_pause = 1; + } else { + info.tx_pause = 0; + info.rx_pause = 0; + } +- /* Assume auto-negotiation wanted */ +- info.autoneg = 1; ++ ++ if (strcasecmp("on", params->an_status) == 0) ++ info.autoneg = 1; ++ else ++ info.autoneg = 0; ++ + stat = rte_ethtool_set_pauseparam(params->port, &info); + } + if (stat == 0) { +- if (info.rx_pause && info.tx_pause) +- printf("Port %i: Tx & Rx Paused\n", params->port); +- else if (info.rx_pause) +- printf("Port %i: Rx Paused\n", params->port); +- else if (info.tx_pause) +- printf("Port %i: Tx Paused\n", params->port); +- else +- printf("Port %i: Tx & Rx not paused\n", params->port); ++ printf("Pause parameters for Port %i:\n", params->port); ++ printf("Rx pause: %s\n", info.rx_pause ? "on" : "off"); ++ printf("Tx pause: %s\n", info.tx_pause ? "on" : "off"); ++ printf("Autoneg: %s\n", info.autoneg ? "on" : "off"); + } else if (stat == -ENOTSUP) + printf("Port %i: Operation not supported\n", params->port); + else + printf("Port %i: Error %i\n", params->port, stat); + } + +- + static void + pcmd_open_callback(__rte_unused void *ptr_params, + __rte_unused struct cmdline *ctx, +@@ -735,11 +746,13 @@ cmdline_parse_inst_t pcmd_pause = { + .f = pcmd_pause_callback, + .data = NULL, + .help_str = +- "pause <port_id> <all|tx|rx|none>\n Pause/unpause port", ++ "pause <port_id> <full|tx|rx|none> autoneg <on|off>\n Pause/unpause port", + .tokens = { + (void *)&pcmd_pause_token_cmd, + (void *)&pcmd_pause_token_port, +- (void *)&pcmd_pause_token_opt, ++ (void *)&pcmd_pause_token_mode, ++ (void *)&pcmd_pause_token_autoneg, ++ (void *)&pcmd_pause_token_an_status, + NULL + }, + }; diff --git a/dpdk/examples/ethtool/lib/rte_ethtool.c b/dpdk/examples/ethtool/lib/rte_ethtool.c index 86286d38a6..ffaad96498 100644 --- a/dpdk/examples/ethtool/lib/rte_ethtool.c @@ -44419,6 +83098,54 @@ index 52a7bf952d..511c176a2a 100644 if (!val->val) return -ENOMEM; +diff --git a/dpdk/examples/fips_validation/main.c b/dpdk/examples/fips_validation/main.c +index dc40bffe7d..a3d7aad7dd 100644 +--- a/dpdk/examples/fips_validation/main.c ++++ b/dpdk/examples/fips_validation/main.c +@@ -713,7 +713,7 @@ prepare_aead_op(void) + RTE_LOG(ERR, USER1, "Not enough memory\n"); + return -ENOMEM; + } +- env.digest_len = vec.cipher_auth.digest.len; ++ env.digest_len = vec.aead.digest.len; + + sym->aead.data.length = vec.pt.len; + sym->aead.digest.data = env.digest; +@@ -722,7 +722,7 @@ prepare_aead_op(void) + ret = prepare_data_mbufs(&vec.ct); + if (ret < 0) + return ret; +- ++ env.digest_len = vec.aead.digest.len; + sym->aead.data.length = vec.ct.len; + sym->aead.digest.data = vec.aead.digest.val; + sym->aead.digest.phys_addr = rte_malloc_virt2iova( +@@ -863,7 +863,7 @@ prepare_hmac_xform(struct rte_crypto_sym_xform *xform) + if (rte_cryptodev_sym_capability_check_auth(cap, + auth_xform->key.length, + auth_xform->digest_length, 0) != 0) { +- RTE_LOG(ERR, USER1, "PMD %s key length %u IV length %u\n", ++ RTE_LOG(ERR, USER1, "PMD %s key length %u Digest length %u\n", + info.device_name, auth_xform->key.length, + auth_xform->digest_length); + return -EPERM; +@@ -992,7 +992,7 @@ prepare_cmac_xform(struct rte_crypto_sym_xform *xform) + if (rte_cryptodev_sym_capability_check_auth(cap, + auth_xform->key.length, + auth_xform->digest_length, 0) != 0) { +- RTE_LOG(ERR, USER1, "PMD %s key length %u IV length %u\n", ++ RTE_LOG(ERR, USER1, "PMD %s key length %u Digest length %u\n", + info.device_name, auth_xform->key.length, + auth_xform->digest_length); + return -EPERM; +@@ -1850,6 +1850,7 @@ fips_test_one_file(void) + if (env.digest) { + rte_free(env.digest); + env.digest = NULL; ++ env.digest_len = 0; + } + if (env.mbuf) + rte_pktmbuf_free(env.mbuf); diff --git a/dpdk/examples/flow_classify/flow_classify.c b/dpdk/examples/flow_classify/flow_classify.c index 6185b34060..97708b7084 100644 --- a/dpdk/examples/flow_classify/flow_classify.c @@ -44432,6 +83159,19 @@ index 6185b34060..97708b7084 100644 in[CB_FLD_DST_ADDR]); return ret; } +diff --git a/dpdk/examples/ip_pipeline/thread.c b/dpdk/examples/ip_pipeline/thread.c +index b2e9732705..aeec954394 100644 +--- a/dpdk/examples/ip_pipeline/thread.c ++++ b/dpdk/examples/ip_pipeline/thread.c +@@ -432,7 +432,7 @@ thread_pipeline_disable(uint32_t thread_id, + static inline struct thread_msg_req * + thread_msg_recv(struct rte_ring *msgq_req) + { +- struct thread_msg_req *req; ++ struct thread_msg_req *req = NULL; + + int status = rte_ring_sc_dequeue(msgq_req, (void **) &req); + diff --git a/dpdk/examples/ip_reassembly/main.c b/dpdk/examples/ip_reassembly/main.c index fb3cac3bd0..6e4c11c3c7 100644 --- a/dpdk/examples/ip_reassembly/main.c @@ -44526,7 +83266,7 @@ index 1a1ec7861c..c217b9e475 100644 attr.egress = 0; diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c -index bf3dbf6b5c..a1faff6a59 100644 +index bf3dbf6b5c..365824889f 100644 --- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c +++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -169,7 +169,7 @@ uint32_t single_sa_idx; @@ -44601,7 +83341,49 @@ index bf3dbf6b5c..a1faff6a59 100644 ", tx_offloads=0x%" PRIx64 "\n", portid, local_port_conf.rxmode.offloads, local_port_conf.txmode.offloads); -@@ -3379,13 +3379,14 @@ main(int32_t argc, char **argv) +@@ -2370,12 +2370,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) + qconf = &lcore_conf[lcore_id]; + qconf->tx_queue_id[portid] = tx_queueid; + +- /* Pre-populate pkt offloads based on capabilities */ +- qconf->outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4; +- qconf->outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6; +- if (local_port_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) +- qconf->outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM; +- + tx_queueid++; + + /* init RX queues */ +@@ -3253,6 +3247,7 @@ main(int32_t argc, char **argv) + uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; + uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; + struct eh_conf *eh_conf = NULL; ++ uint32_t ipv4_cksum_port_mask = 0; + size_t sess_sz; + + nb_bufs_in_pool = 0; +@@ -3360,6 +3355,20 @@ main(int32_t argc, char **argv) + &req_tx_offloads[portid]); + port_init(portid, req_rx_offloads[portid], + req_tx_offloads[portid]); ++ if ((req_tx_offloads[portid] & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ++ ipv4_cksum_port_mask |= 1U << portid; ++ } ++ ++ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { ++ if (rte_lcore_is_enabled(lcore_id) == 0) ++ continue; ++ ++ /* Pre-populate pkt offloads based on capabilities */ ++ lcore_conf[lcore_id].outbound.ipv4_offloads = RTE_MBUF_F_TX_IPV4; ++ lcore_conf[lcore_id].outbound.ipv6_offloads = RTE_MBUF_F_TX_IPV6; ++ /* Update per lcore checksum offload support only if all ports support it */ ++ if (ipv4_cksum_port_mask == enabled_port_mask) ++ lcore_conf[lcore_id].outbound.ipv4_offloads |= RTE_MBUF_F_TX_IP_CKSUM; + } + + /* +@@ -3379,13 +3388,14 @@ main(int32_t argc, char **argv) if ((enabled_port_mask & (1 << portid)) == 0) continue; @@ -44632,6 +83414,29 @@ index 2d4a26c962..b66ff2b650 100644 if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { RTE_LOG(ERR, IPSEC, "No Flow director rule for Egress traffic\n"); +diff --git a/dpdk/examples/ipsec-secgw/ipsec.h b/dpdk/examples/ipsec-secgw/ipsec.h +index bc87b1a51d..080579c51a 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec.h ++++ b/dpdk/examples/ipsec-secgw/ipsec.h +@@ -232,11 +232,18 @@ struct ipsec_ctx { + uint64_t ipv6_offloads; + }; + ++/* ++ * This structure is used for the key in hash table. ++ * Padding is to force the struct to use 8 bytes, ++ * to ensure memory is not read past this structs boundary ++ * (hash key calculation reads 8 bytes if this struct is size 5 bytes). ++ */ + struct cdev_key { + uint16_t lcore_id; + uint8_t cipher_algo; + uint8_t auth_algo; + uint8_t aead_algo; ++ uint8_t padding[3]; /* padding to 8-byte size should be zeroed */ + }; + + struct socket_ctx { diff --git a/dpdk/examples/ipsec-secgw/ipsec_process.c b/dpdk/examples/ipsec-secgw/ipsec_process.c index 3fc4b3a84f..285e9c7f4c 100644 --- a/dpdk/examples/ipsec-secgw/ipsec_process.c @@ -44705,7 +83510,7 @@ index 7419e85db2..5fe91b62e4 100644 /* diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c -index 30bc693e06..89131f71e5 100644 +index 30bc693e06..49d16f055b 100644 --- a/dpdk/examples/ipsec-secgw/sa.c +++ b/dpdk/examples/ipsec-secgw/sa.c @@ -897,7 +897,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, @@ -44726,7 +83531,42 @@ index 30bc693e06..89131f71e5 100644 "IPv4(%d) and IPv6 (%d) SP rules\n", __func__, spi, rc4, rc6); return -EINVAL; -@@ -1458,9 +1458,16 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, +@@ -1223,6 +1223,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + struct ipsec_sa *sa; + uint32_t i, idx; + uint16_t iv_length, aad_length; ++ uint16_t auth_iv_length = 0; + int inline_status; + int32_t rc; + struct rte_ipsec_session *ips; +@@ -1315,7 +1316,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + + /* AES_GMAC uses salt like AEAD algorithms */ + if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) +- iv_length = 12; ++ auth_iv_length = 12; + + if (inbound) { + sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +@@ -1339,7 +1340,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + sa_ctx->xf[idx].a.auth.op = + RTE_CRYPTO_AUTH_OP_VERIFY; + sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET; +- sa_ctx->xf[idx].a.auth.iv.length = iv_length; ++ sa_ctx->xf[idx].a.auth.iv.length = auth_iv_length; + + } else { /* outbound */ + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER; +@@ -1363,7 +1364,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], + sa_ctx->xf[idx].b.auth.op = + RTE_CRYPTO_AUTH_OP_GENERATE; + sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET; +- sa_ctx->xf[idx].b.auth.iv.length = iv_length; ++ sa_ctx->xf[idx].b.auth.iv.length = auth_iv_length; + + } + +@@ -1458,9 +1459,16 @@ fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss, RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT : RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; prm->ipsec_xform.options.udp_encap = ss->udp_encap; @@ -44743,7 +83583,7 @@ index 30bc693e06..89131f71e5 100644 if (IS_IP4_TUNNEL(ss->flags)) { prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; prm->tun.hdr_len = sizeof(*v4); -@@ -1513,13 +1520,13 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) +@@ -1513,13 +1521,13 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) .version_ihl = IPVERSION << 4 | sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER, .time_to_live = IPDEFTTL, @@ -44759,7 +83599,7 @@ index 30bc693e06..89131f71e5 100644 }; if (IS_IP6_TUNNEL(lsa->flags)) { -@@ -1550,7 +1557,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) +@@ -1550,7 +1558,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size) } /* @@ -44768,6 +83608,76 @@ index 30bc693e06..89131f71e5 100644 * one per session. */ static int +@@ -1766,10 +1774,18 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, + struct ipsec_sa *rule; + uint32_t idx_sa; + enum rte_security_session_action_type rule_type; ++ struct rte_eth_dev_info dev_info; ++ int ret; + + *rx_offloads = 0; + *tx_offloads = 0; + ++ ret = rte_eth_dev_info_get(port_id, &dev_info); ++ if (ret != 0) ++ rte_exit(EXIT_FAILURE, ++ "Error during getting device (port %u) info: %s\n", ++ port_id, strerror(-ret)); ++ + /* Check for inbound rules that use offloads and use this port */ + for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) { + rule = &sa_in[idx_sa]; +@@ -1785,13 +1801,43 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, + for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { + rule = &sa_out[idx_sa]; + rule_type = ipsec_get_action_type(rule); +- if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || +- rule_type == +- RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) +- && rule->portid == port_id) { +- *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; +- if (rule->mss) +- *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; ++ if (rule->portid == port_id) { ++ switch (rule_type) { ++ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: ++ /* Checksum offload is not needed for inline ++ * protocol as all processing for Outbound IPSec ++ * packets will be implicitly taken care and for ++ * non-IPSec packets, there is no need of ++ * IPv4 Checksum offload. ++ */ ++ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; ++ if (rule->mss) ++ *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO | ++ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM); ++ break; ++ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: ++ *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; ++ if (rule->mss) ++ *tx_offloads |= ++ RTE_ETH_TX_OFFLOAD_TCP_TSO; ++ if (dev_info.tx_offload_capa & ++ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ++ *tx_offloads |= ++ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; ++ break; ++ default: ++ /* Enable IPv4 checksum offload even if ++ * one of lookaside SA's are present. ++ */ ++ if (dev_info.tx_offload_capa & ++ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ++ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; ++ break; ++ } ++ } else { ++ if (dev_info.tx_offload_capa & ++ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ++ *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + } + } + return 0; diff --git a/dpdk/examples/ipsec-secgw/sp4.c b/dpdk/examples/ipsec-secgw/sp4.c index beddd7bc1d..fc4101a4a2 100644 --- a/dpdk/examples/ipsec-secgw/sp4.c @@ -44795,10 +83705,10 @@ index 328e085288..cce4da7862 100644 tokens[ti]); return; diff --git a/dpdk/examples/ipsec-secgw/test/common_defs.sh b/dpdk/examples/ipsec-secgw/test/common_defs.sh -index f22eb3ab12..3ef06bc761 100644 +index f22eb3ab12..6e04ffc1a6 100644 --- a/dpdk/examples/ipsec-secgw/test/common_defs.sh +++ b/dpdk/examples/ipsec-secgw/test/common_defs.sh -@@ -20,7 +20,7 @@ REMOTE_MAC=`ssh ${REMOTE_HOST} ip addr show dev ${REMOTE_IFACE}` +@@ -20,13 +20,13 @@ REMOTE_MAC=`ssh ${REMOTE_HOST} ip addr show dev ${REMOTE_IFACE}` st=$? REMOTE_MAC=`echo ${REMOTE_MAC} | sed -e 's/^.*ether //' -e 's/ brd.*$//'` if [[ $st -ne 0 || -z "${REMOTE_MAC}" ]]; then @@ -44807,6 +83717,13 @@ index f22eb3ab12..3ef06bc761 100644 exit 127 fi + LOCAL_IFACE=dtap0 + +-LOCAL_MAC="00:64:74:61:70:30" ++LOCAL_MAC="02:64:74:61:70:30" + + REMOTE_IPV4=192.168.31.14 + LOCAL_IPV4=192.168.31.92 @@ -40,7 +40,7 @@ DPDK_VARS="" # by default ipsec-secgw can't deal with multi-segment packets @@ -44838,6 +83755,18 @@ index d324ee2241..e99ef5c38a 100644 signal(SIGUSR1, signal_handler); signal(SIGUSR2, signal_handler); signal(SIGRTMIN, signal_handler); +diff --git a/dpdk/examples/l2fwd-cat/Makefile b/dpdk/examples/l2fwd-cat/Makefile +index 23a09550a4..d06053451a 100644 +--- a/dpdk/examples/l2fwd-cat/Makefile ++++ b/dpdk/examples/l2fwd-cat/Makefile +@@ -35,6 +35,7 @@ endif + endif + + CFLAGS += -DALLOW_EXPERIMENTAL_API ++CFLAGS += -D_GNU_SOURCE + + LDFLAGS += -lpqos + diff --git a/dpdk/examples/l2fwd-cat/l2fwd-cat.c b/dpdk/examples/l2fwd-cat/l2fwd-cat.c index d9cf00c9df..6e16705e99 100644 --- a/dpdk/examples/l2fwd-cat/l2fwd-cat.c @@ -44852,7 +83781,7 @@ index d9cf00c9df..6e16705e99 100644 argc -= ret; argv += ret; diff --git a/dpdk/examples/l2fwd-crypto/main.c b/dpdk/examples/l2fwd-crypto/main.c -index 4d9f8861af..b1e2613ccf 100644 +index 4d9f8861af..8863a93f10 100644 --- a/dpdk/examples/l2fwd-crypto/main.c +++ b/dpdk/examples/l2fwd-crypto/main.c @@ -252,11 +252,9 @@ struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; @@ -44910,6 +83839,15 @@ index 4d9f8861af..b1e2613ccf 100644 enabled_portcount++; } +@@ -2810,7 +2808,7 @@ main(int argc, char **argv) + /* Enable Ethernet ports */ + enabled_portcount = initialize_ports(&options); + if (enabled_portcount < 1) +- rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n"); ++ rte_exit(EXIT_FAILURE, "Failed to initialize Ethernet ports\n"); + + /* Initialize the port/queue configuration of each logical core */ + RTE_ETH_FOREACH_DEV(portid) { diff --git a/dpdk/examples/l2fwd-event/l2fwd_event_generic.c b/dpdk/examples/l2fwd-event/l2fwd_event_generic.c index f31569a744..1977e23261 100644 --- a/dpdk/examples/l2fwd-event/l2fwd_event_generic.c @@ -44996,7 +83934,7 @@ index b8b3be2b8a..20e5b59af9 100644 static int diff --git a/dpdk/examples/l3fwd/l3fwd.h b/dpdk/examples/l3fwd/l3fwd.h -index 38ca19133c..4ccdb28b4a 100644 +index 38ca19133c..2d30e6fffa 100644 --- a/dpdk/examples/l3fwd/l3fwd.h +++ b/dpdk/examples/l3fwd/l3fwd.h @@ -81,6 +81,10 @@ struct lcore_conf { @@ -45010,6 +83948,25 @@ index 38ca19133c..4ccdb28b4a 100644 /* ethernet addresses of ports */ extern uint64_t dest_eth_addr[RTE_MAX_ETHPORTS]; extern struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; +@@ -96,6 +100,8 @@ extern xmm_t val_eth[RTE_MAX_ETHPORTS]; + + extern struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + ++extern uint32_t max_pkt_len; ++ + /* Send burst of packets on an output interface */ + static inline int + send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) +@@ -182,6 +188,9 @@ is_valid_ipv4_pkt(struct rte_ipv4_hdr *pkt, uint32_t link_len) + int + init_mem(uint16_t portid, unsigned int nb_mbuf); + ++int config_port_max_pkt_len(struct rte_eth_conf *conf, ++ struct rte_eth_dev_info *dev_info); ++ + /* Function pointers for LPM, EM or FIB functionality. */ + void + setup_lpm(const int socketid); diff --git a/dpdk/examples/l3fwd/l3fwd_altivec.h b/dpdk/examples/l3fwd/l3fwd_altivec.h index ed9b0b8f25..88fb41843b 100644 --- a/dpdk/examples/l3fwd/l3fwd_altivec.h @@ -45225,7 +84182,7 @@ index 7d83ff641a..8e4c27218f 100644 } diff --git a/dpdk/examples/l3fwd/l3fwd_event.c b/dpdk/examples/l3fwd/l3fwd_event.c -index dd9eafa9b9..7a401290f8 100644 +index dd9eafa9b9..e72add8d64 100644 --- a/dpdk/examples/l3fwd/l3fwd_event.c +++ b/dpdk/examples/l3fwd/l3fwd_event.c @@ -43,8 +43,6 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf) @@ -45237,6 +84194,19 @@ index dd9eafa9b9..7a401290f8 100644 unsigned int nb_lcores = rte_lcore_count(); struct rte_eth_conf local_port_conf; struct rte_eth_dev_info dev_info; +@@ -75,6 +73,12 @@ l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf) + rte_panic("Error during getting device (port %u) info:" + "%s\n", port_id, strerror(-ret)); + ++ ret = config_port_max_pkt_len(&local_port_conf, &dev_info); ++ if (ret != 0) ++ rte_exit(EXIT_FAILURE, ++ "Invalid max packet length: %u (port %u)\n", ++ max_pkt_len, port_id); ++ + if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; diff --git a/dpdk/examples/l3fwd/l3fwd_event_internal_port.c b/dpdk/examples/l3fwd/l3fwd_event_internal_port.c index 1e8f46bc11..32cf657148 100644 --- a/dpdk/examples/l3fwd/l3fwd_event_internal_port.c @@ -45335,7 +84305,7 @@ index bb565ed546..d5a717e18c 100644 * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>. * We doing 4 comparisons at once and the result is 4 bit mask. diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c -index eb68ffc5aa..3a0e15109b 100644 +index eb68ffc5aa..59436bb589 100644 --- a/dpdk/examples/l3fwd/main.c +++ b/dpdk/examples/l3fwd/main.c @@ -53,9 +53,8 @@ @@ -45350,6 +84320,15 @@ index eb68ffc5aa..3a0e15109b 100644 /**< Ports set in promiscuous mode off by default. */ static int promiscuous_on; +@@ -134,7 +133,7 @@ static struct rte_eth_conf port_conf = { + }, + }; + +-static uint32_t max_pkt_len; ++uint32_t max_pkt_len; + + static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS]; + static struct rte_mempool *vector_pool[RTE_MAX_ETHPORTS]; @@ -342,6 +341,8 @@ print_usage(const char *prgname) " [-P]" " [--lookup]" @@ -45460,6 +84439,15 @@ index eb68ffc5aa..3a0e15109b 100644 case CMD_LINE_OPT_ETH_DEST_NUM: parse_eth_dest(optarg); break; +@@ -1043,7 +1094,7 @@ eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu) + return overhead_len; + } + +-static int ++int + config_port_max_pkt_len(struct rte_eth_conf *conf, + struct rte_eth_dev_info *dev_info) + { diff --git a/dpdk/examples/link_status_interrupt/main.c b/dpdk/examples/link_status_interrupt/main.c index 551f0524da..9699e14ce6 100644 --- a/dpdk/examples/link_status_interrupt/main.c @@ -45526,7 +84514,7 @@ index b35886a77b..050337765f 100644 * secondary process, just with a different proc-id parameter in each case * (apart from the EAL flag to indicate a secondary process). diff --git a/dpdk/examples/ntb/ntb_fwd.c b/dpdk/examples/ntb/ntb_fwd.c -index f110fc129f..81964d0308 100644 +index f110fc129f..41bb536141 100644 --- a/dpdk/examples/ntb/ntb_fwd.c +++ b/dpdk/examples/ntb/ntb_fwd.c @@ -696,7 +696,7 @@ assign_stream_to_lcores(void) @@ -45538,6 +84526,24 @@ index f110fc129f..81964d0308 100644 RTE_LCORE_FOREACH_WORKER(lcore_id) { conf = &fwd_lcore_conf[lcore_id]; +@@ -865,7 +865,7 @@ ntb_stats_clear(void) + + /* Clear NTB dev stats */ + nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0); +- if (nb_ids < 0) { ++ if (nb_ids <= 0) { + printf("Error: Cannot get count of xstats\n"); + return; + } +@@ -923,7 +923,7 @@ ntb_stats_display(void) + + /* Get NTB dev stats and stats names */ + nb_ids = rte_rawdev_xstats_names_get(dev_id, NULL, 0); +- if (nb_ids < 0) { ++ if (nb_ids <= 0) { + printf("Error: Cannot get count of xstats\n"); + return; + } diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c index b01ac60fd1..99e67ef67b 100644 --- a/dpdk/examples/packet_ordering/main.c @@ -45760,6 +84766,19 @@ index 10ca7bea61..ff51d0215a 100644 if ((app_used_core_mask != app_eal_core_mask()) || (app_main_core != rte_get_main_lcore())) { +diff --git a/dpdk/examples/qos_sched/cfg_file.c b/dpdk/examples/qos_sched/cfg_file.c +index 450482f07d..69b5081398 100644 +--- a/dpdk/examples/qos_sched/cfg_file.c ++++ b/dpdk/examples/qos_sched/cfg_file.c +@@ -155,7 +155,7 @@ cfg_load_subport_profile(struct rte_cfgfile *cfg, + + profiles = rte_cfgfile_num_sections(cfg, "subport profile", + sizeof("subport profile") - 1); +- subport_params[0].n_pipe_profiles = profiles; ++ port_params.n_subport_profiles = profiles; + + for (i = 0; i < profiles; i++) { + char sec_name[32]; diff --git a/dpdk/examples/qos_sched/cmdline.c b/dpdk/examples/qos_sched/cmdline.c index 257b87a7cf..6691b02d89 100644 --- a/dpdk/examples/qos_sched/cmdline.c @@ -45773,6 +84792,101 @@ index 257b87a7cf..6691b02d89 100644 ); } +diff --git a/dpdk/examples/qos_sched/init.c b/dpdk/examples/qos_sched/init.c +index 3c1f0bc680..3a7e7d80a1 100644 +--- a/dpdk/examples/qos_sched/init.c ++++ b/dpdk/examples/qos_sched/init.c +@@ -81,6 +81,7 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) + if (app_inited_port_mask & (1u << portid)) + return 0; + ++ memset(&rx_conf, 0, sizeof(struct rte_eth_rxconf)); + rx_conf.rx_thresh.pthresh = rx_thresh.pthresh; + rx_conf.rx_thresh.hthresh = rx_thresh.hthresh; + rx_conf.rx_thresh.wthresh = rx_thresh.wthresh; +@@ -88,6 +89,7 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) + rx_conf.rx_drop_en = 0; + rx_conf.rx_deferred_start = 0; + ++ memset(&tx_conf, 0, sizeof(struct rte_eth_txconf)); + tx_conf.tx_thresh.pthresh = tx_thresh.pthresh; + tx_conf.tx_thresh.hthresh = tx_thresh.hthresh; + tx_conf.tx_thresh.wthresh = tx_thresh.wthresh; +@@ -391,6 +393,8 @@ int app_init(void) + for(i = 0; i < nb_pfc; i++) { + uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core); + struct rte_ring *ring; ++ struct rte_eth_link link = {0}; ++ int retry_count = 100, retry_delay = 100; /* try every 100ms for 10 sec */ + + snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core); + ring = rte_ring_lookup(ring_name); +@@ -421,6 +425,14 @@ int app_init(void) + app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool); + app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool); + ++ rte_eth_link_get(qos_conf[i].tx_port, &link); ++ if (link.link_status == 0) ++ printf("Waiting for link on port %u\n", qos_conf[i].tx_port); ++ while (link.link_status == 0 && retry_count--) { ++ rte_delay_ms(retry_delay); ++ rte_eth_link_get(qos_conf[i].tx_port, &link); ++ } ++ + qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket); + } + +diff --git a/dpdk/examples/qos_sched/profile.cfg b/dpdk/examples/qos_sched/profile.cfg +index d4b21c0170..20b8b20f23 100644 +--- a/dpdk/examples/qos_sched/profile.cfg ++++ b/dpdk/examples/qos_sched/profile.cfg +@@ -26,7 +26,7 @@ number of subports per port = 1 + number of pipes per subport = 4096 + queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 + +-subport 0-8 = 0 ; These subports are configured with subport profile 0 ++pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 + + [subport profile 0] + tb rate = 1250000000 ; Bytes per second +@@ -48,8 +48,6 @@ tc 12 rate = 1250000000 ; Bytes per second + + tc period = 10 ; Milliseconds + +-pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0 +- + ; Pipe configuration + [pipe profile 0] + tb rate = 305175 ; Bytes per second +diff --git a/dpdk/examples/qos_sched/profile_ov.cfg b/dpdk/examples/qos_sched/profile_ov.cfg +index 14c89ae340..b6fe21ee1e 100644 +--- a/dpdk/examples/qos_sched/profile_ov.cfg ++++ b/dpdk/examples/qos_sched/profile_ov.cfg +@@ -6,12 +6,14 @@ + frame overhead = 24 + number of subports per port = 1 + ++subport 0-8 = 0 ++ + ; Subport configuration + [subport 0] + number of pipes per subport = 32 + queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 + +-subport 0-8 = 0 ++pipe 0-31 = 0 ; These pipes are configured with pipe profile 0 + + [subport profile 0] + tb rate = 8400000 ; Bytes per second +@@ -32,8 +34,6 @@ tc 11 rate = 8400000 ; Bytes per second + tc 12 rate = 8400000 ; Bytes per second + tc period = 10 ; Milliseconds + +-pipe 0-31 = 0 ; These pipes are configured with pipe profile 0 +- + ; Pipe configuration + [pipe profile 0] + tb rate = 16800000 ; Bytes per second diff --git a/dpdk/examples/server_node_efd/node/node.c b/dpdk/examples/server_node_efd/node/node.c index ba1c7e5153..fc2aa5ffef 100644 --- a/dpdk/examples/server_node_efd/node/node.c @@ -45800,7 +84914,7 @@ index 16435ee3cc..518cd72179 100644 argc -= ret; argv += ret; diff --git a/dpdk/examples/vhost/main.c b/dpdk/examples/vhost/main.c -index 33d023aa39..f9e932061f 100644 +index 33d023aa39..36464922e3 100644 --- a/dpdk/examples/vhost/main.c +++ b/dpdk/examples/vhost/main.c @@ -32,6 +32,8 @@ @@ -45908,64 +85022,73 @@ index 33d023aa39..f9e932061f 100644 case OPT_CLIENT_NUM: client_mode = 1; break; -@@ -873,31 +900,43 @@ sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, +@@ -873,6 +900,26 @@ sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, } } --static __rte_always_inline void --drain_vhost(struct vhost_dev *vdev) +static __rte_always_inline uint16_t +enqueue_pkts(struct vhost_dev *vdev, struct rte_mbuf **pkts, uint16_t rx_count) - { -- uint16_t ret; -- uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid; -- uint16_t nr_xmit = vhost_txbuff[buff_idx]->len; -- struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table; ++{ + uint16_t enqueue_count; - - if (builtin_net_driver) { -- ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit); ++ ++ if (builtin_net_driver) { + enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, pkts, rx_count); - } else if (async_vhost_driver) { - uint16_t enqueue_fail = 0; - - complete_async_pkts(vdev); -- ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit); -- __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST); ++ } else if (async_vhost_driver) { ++ complete_async_pkts(vdev); + enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, + VIRTIO_RXQ, pkts, rx_count); + __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST); - -- enqueue_fail = nr_xmit - ret; -+ enqueue_fail = rx_count - enqueue_count; - if (enqueue_fail) -- free_pkts(&m[ret], nr_xmit - ret); -+ free_pkts(&pkts[enqueue_count], enqueue_fail); -+ - } else { -- ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, -- m, nr_xmit); ++ } else { + enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, + pkts, rx_count); - } - ++ } ++ + return enqueue_count; +} + -+static __rte_always_inline void -+drain_vhost(struct vhost_dev *vdev) -+{ -+ uint16_t ret; -+ uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid; -+ uint16_t nr_xmit = vhost_txbuff[buff_idx]->len; -+ struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table; -+ + static __rte_always_inline void + drain_vhost(struct vhost_dev *vdev) + { +@@ -881,22 +928,7 @@ drain_vhost(struct vhost_dev *vdev) + uint16_t nr_xmit = vhost_txbuff[buff_idx]->len; + struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table; + +- if (builtin_net_driver) { +- ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit); +- } else if (async_vhost_driver) { +- uint16_t enqueue_fail = 0; +- +- complete_async_pkts(vdev); +- ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit); +- __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST); +- +- enqueue_fail = nr_xmit - ret; +- if (enqueue_fail) +- free_pkts(&m[ret], nr_xmit - ret); +- } else { +- ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, +- m, nr_xmit); +- } + ret = enqueue_pkts(vdev, m, nr_xmit); -+ + if (enable_stats) { __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit, +@@ -905,8 +937,13 @@ drain_vhost(struct vhost_dev *vdev) __ATOMIC_SEQ_CST); -@@ -1190,44 +1229,19 @@ drain_eth_rx(struct vhost_dev *vdev) + } + +- if (!async_vhost_driver) ++ if (!async_vhost_driver) { + free_pkts(m, nr_xmit); ++ } else { ++ uint16_t enqueue_fail = nr_xmit - ret; ++ if (enqueue_fail > 0) ++ free_pkts(&m[ret], enqueue_fail); ++ } + } + + static __rte_always_inline void +@@ -1190,44 +1227,19 @@ drain_eth_rx(struct vhost_dev *vdev) if (!rx_count) return; @@ -45979,12 +85102,12 @@ index 33d023aa39..f9e932061f 100644 - VIRTIO_RXQ))) { - uint32_t retry; + enqueue_count = enqueue_pkts(vdev, pkts, rx_count); -+ + +- for (retry = 0; retry < burst_rx_retry_num; retry++) { + /* Retry if necessary */ + if (enable_retry && unlikely(enqueue_count < rx_count)) { + uint32_t retry = 0; - -- for (retry = 0; retry < burst_rx_retry_num; retry++) { ++ + while (enqueue_count < rx_count && retry++ < burst_rx_retry_num) { rte_delay_us(burst_rx_delay_time); - if (rx_count <= rte_vhost_avail_entries(vdev->vid, @@ -46018,7 +85141,22 @@ index 33d023aa39..f9e932061f 100644 if (enable_stats) { __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count, __ATOMIC_SEQ_CST); -@@ -1289,7 +1303,7 @@ switch_worker(void *arg __rte_unused) +@@ -1235,8 +1247,13 @@ drain_eth_rx(struct vhost_dev *vdev) + __ATOMIC_SEQ_CST); + } + +- if (!async_vhost_driver) ++ if (!async_vhost_driver) { + free_pkts(pkts, rx_count); ++ } else { ++ uint16_t enqueue_fail = rx_count - enqueue_count; ++ if (enqueue_fail > 0) ++ free_pkts(&pkts[enqueue_count], enqueue_fail); ++ } + } + + static __rte_always_inline void +@@ -1289,7 +1306,7 @@ switch_worker(void *arg __rte_unused) struct vhost_dev *vdev; struct mbuf_table *tx_q; @@ -46027,7 +85165,7 @@ index 33d023aa39..f9e932061f 100644 tx_q = &lcore_tx_queue[lcore_id]; for (i = 0; i < rte_lcore_count(); i++) { -@@ -1333,7 +1347,7 @@ switch_worker(void *arg __rte_unused) +@@ -1333,7 +1350,7 @@ switch_worker(void *arg __rte_unused) /* * Remove a device from the specific data core linked list and from the @@ -46036,7 +85174,7 @@ index 33d023aa39..f9e932061f 100644 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering * of dev->remove=1 which can cause an infinite loop in the rte_pause loop. */ -@@ -1606,57 +1620,6 @@ sigint_handler(__rte_unused int signum) +@@ -1606,57 +1623,6 @@ sigint_handler(__rte_unused int signum) exit(0); } @@ -46094,7 +85232,7 @@ index 33d023aa39..f9e932061f 100644 /* * Main function, does initialisation and calls the per-lcore functions. */ -@@ -1715,8 +1678,11 @@ main(int argc, char *argv[]) +@@ -1715,8 +1681,11 @@ main(int argc, char *argv[]) * many queues here. We probably should only do allocation for * those queues we are going to use. */ @@ -46108,6 +85246,86 @@ index 33d023aa39..f9e932061f 100644 if (vm2vm_mode == VM2VM_HARDWARE) { /* Enable VT loop back to let L2 switch to do it. */ +diff --git a/dpdk/examples/vm_power_manager/channel_manager.c b/dpdk/examples/vm_power_manager/channel_manager.c +index 838465ab4b..7d7efdd05a 100644 +--- a/dpdk/examples/vm_power_manager/channel_manager.c ++++ b/dpdk/examples/vm_power_manager/channel_manager.c +@@ -22,6 +22,7 @@ + #include <rte_mempool.h> + #include <rte_log.h> + #include <rte_spinlock.h> ++#include <rte_tailq.h> + + #include <libvirt/libvirt.h> + +@@ -58,16 +59,16 @@ struct virtual_machine_info { + virDomainInfo info; + rte_spinlock_t config_spinlock; + int allow_query; +- LIST_ENTRY(virtual_machine_info) vms_info; ++ RTE_TAILQ_ENTRY(virtual_machine_info) vms_info; + }; + +-LIST_HEAD(, virtual_machine_info) vm_list_head; ++RTE_TAILQ_HEAD(, virtual_machine_info) vm_list_head; + + static struct virtual_machine_info * + find_domain_by_name(const char *name) + { + struct virtual_machine_info *info; +- LIST_FOREACH(info, &vm_list_head, vms_info) { ++ RTE_TAILQ_FOREACH(info, &vm_list_head, vms_info) { + if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1)) + return info; + } +@@ -878,7 +879,7 @@ add_vm(const char *vm_name) + + new_domain->allow_query = 0; + rte_spinlock_init(&(new_domain->config_spinlock)); +- LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info); ++ TAILQ_INSERT_HEAD(&vm_list_head, new_domain, vms_info); + return 0; + } + +@@ -900,7 +901,7 @@ remove_vm(const char *vm_name) + rte_spinlock_unlock(&vm_info->config_spinlock); + return -1; + } +- LIST_REMOVE(vm_info, vms_info); ++ TAILQ_REMOVE(&vm_list_head, vm_info, vms_info); + rte_spinlock_unlock(&vm_info->config_spinlock); + rte_free(vm_info); + return 0; +@@ -953,7 +954,7 @@ channel_manager_init(const char *path __rte_unused) + { + virNodeInfo info; + +- LIST_INIT(&vm_list_head); ++ TAILQ_INIT(&vm_list_head); + if (connect_hypervisor(path) < 0) { + global_n_host_cpus = 64; + global_hypervisor_available = 0; +@@ -1005,9 +1006,9 @@ channel_manager_exit(void) + { + unsigned i; + char mask[RTE_MAX_LCORE]; +- struct virtual_machine_info *vm_info; ++ struct virtual_machine_info *vm_info, *tmp; + +- LIST_FOREACH(vm_info, &vm_list_head, vms_info) { ++ RTE_TAILQ_FOREACH_SAFE(vm_info, &vm_list_head, vms_info, tmp) { + + rte_spinlock_lock(&(vm_info->config_spinlock)); + +@@ -1022,7 +1023,7 @@ channel_manager_exit(void) + } + rte_spinlock_unlock(&(vm_info->config_spinlock)); + +- LIST_REMOVE(vm_info, vms_info); ++ TAILQ_REMOVE(&vm_list_head, vm_info, vms_info); + rte_free(vm_info); + } + diff --git a/dpdk/examples/vm_power_manager/channel_monitor.c b/dpdk/examples/vm_power_manager/channel_monitor.c index d767423a40..97b8def7ca 100644 --- a/dpdk/examples/vm_power_manager/channel_monitor.c @@ -46147,6 +85365,28 @@ index 2c00a942f1..10410b8783 100644 static const struct rte_eth_conf vmdq_conf_default = { .rxmode = { .mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY, +diff --git a/dpdk/kernel/freebsd/contigmem/contigmem.c b/dpdk/kernel/freebsd/contigmem/contigmem.c +index bd72f4d620..7dd87599d9 100644 +--- a/dpdk/kernel/freebsd/contigmem/contigmem.c ++++ b/dpdk/kernel/freebsd/contigmem/contigmem.c +@@ -111,7 +111,7 @@ static struct cdevsw contigmem_ops = { + }; + + static int +-contigmem_load() ++contigmem_load(void) + { + char index_string[8], description[32]; + int i, error = 0; +@@ -178,7 +178,7 @@ contigmem_load() + } + + static int +-contigmem_unload() ++contigmem_unload(void) + { + int i; + diff --git a/dpdk/kernel/freebsd/meson.build b/dpdk/kernel/freebsd/meson.build index bf5aa20a55..1f612711be 100644 --- a/dpdk/kernel/freebsd/meson.build @@ -46163,10 +85403,10 @@ index bf5aa20a55..1f612711be 100644 # to avoid warnings due to race conditions with creating the dev_if.h, etc. diff --git a/dpdk/kernel/linux/kni/compat.h b/dpdk/kernel/linux/kni/compat.h -index 664785674f..3a86d12bbc 100644 +index 664785674f..8beb670465 100644 --- a/dpdk/kernel/linux/kni/compat.h +++ b/dpdk/kernel/linux/kni/compat.h -@@ -141,3 +141,11 @@ +@@ -141,3 +141,17 @@ #if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE #define HAVE_TSK_IN_GUP #endif @@ -46175,9 +85415,37 @@ index 664785674f..3a86d12bbc 100644 +#define HAVE_ETH_HW_ADDR_SET +#endif + -+#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE ++#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE && \ ++ (!(defined(RHEL_RELEASE_CODE) && \ ++ RHEL_RELEASE_VERSION(9, 1) <= RHEL_RELEASE_CODE)) +#define HAVE_NETIF_RX_NI +#endif ++ ++#if KERNEL_VERSION(6, 5, 0) > LINUX_VERSION_CODE ++#define HAVE_VMA_IN_GUP ++#endif +diff --git a/dpdk/kernel/linux/kni/kni_dev.h b/dpdk/kernel/linux/kni/kni_dev.h +index e8633486ee..417c67b5ae 100644 +--- a/dpdk/kernel/linux/kni/kni_dev.h ++++ b/dpdk/kernel/linux/kni/kni_dev.h +@@ -105,11 +105,13 @@ static inline phys_addr_t iova_to_phys(struct task_struct *tsk, + + /* Read one page struct info */ + #ifdef HAVE_TSK_IN_GUP +- ret = get_user_pages_remote(tsk, tsk->mm, iova, 1, +- FOLL_TOUCH, &page, NULL, NULL); ++ ret = get_user_pages_remote(tsk, tsk->mm, iova, 1, 0, &page, NULL, NULL); + #else +- ret = get_user_pages_remote(tsk->mm, iova, 1, +- FOLL_TOUCH, &page, NULL, NULL); ++ #ifdef HAVE_VMA_IN_GUP ++ ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL, NULL); ++ #else ++ ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL); ++ #endif + #endif + if (ret < 0) + return 0; diff --git a/dpdk/kernel/linux/kni/kni_fifo.h b/dpdk/kernel/linux/kni/kni_fifo.h index 5c91b55379..1ba5172002 100644 --- a/dpdk/kernel/linux/kni/kni_fifo.h @@ -46422,7 +85690,7 @@ index f316d3e875..2816632803 100644 if (rc == 0) { /* set data indexes. */ diff --git a/dpdk/lib/acl/acl_run_altivec.h b/dpdk/lib/acl/acl_run_altivec.h -index 2de6f27b1f..4dfe7a14b4 100644 +index 2de6f27b1f..4556e1503b 100644 --- a/dpdk/lib/acl/acl_run_altivec.h +++ b/dpdk/lib/acl/acl_run_altivec.h @@ -41,7 +41,7 @@ resolve_priority_altivec(uint64_t transition, int n, @@ -46434,6 +85702,15 @@ index 2de6f27b1f..4dfe7a14b4 100644 xmm_t *saved_results, *saved_priority; for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) { +@@ -102,7 +102,7 @@ acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms, + /* + * Process 4 transitions (in 2 XMM registers) in parallel + */ +-static inline __attribute__((optimize("O2"))) xmm_t ++static __rte_always_inline xmm_t + transition4(xmm_t next_input, const uint64_t *trans, + xmm_t *indices1, xmm_t *indices2) + { @@ -110,8 +110,8 @@ transition4(xmm_t next_input, const uint64_t *trans, xmm_t in, node_type, r, t; xmm_t dfa_ofs, quad_ofs; @@ -46699,6 +85976,75 @@ index 26d165ad5c..b4d8e87c6d 100644 -#endif /* _BPF_H_ */ +#endif /* BPF_IMPL_H */ +diff --git a/dpdk/lib/cmdline/cmdline.c b/dpdk/lib/cmdline/cmdline.c +index 8f1854cb0b..5600f012c2 100644 +--- a/dpdk/lib/cmdline/cmdline.c ++++ b/dpdk/lib/cmdline/cmdline.c +@@ -199,9 +199,14 @@ cmdline_poll(struct cmdline *cl) + if (read_status < 0) + return read_status; + +- status = cmdline_in(cl, &c, 1); +- if (status < 0 && cl->rdl.status != RDLINE_EXITED) +- return status; ++ if (read_status == 0) { ++ /* end of file is implicit quit */ ++ cmdline_quit(cl); ++ } else { ++ status = cmdline_in(cl, &c, 1); ++ if (status < 0 && cl->rdl.status != RDLINE_EXITED) ++ return status; ++ } + } + + return cl->rdl.status; +diff --git a/dpdk/lib/cmdline/cmdline.h b/dpdk/lib/cmdline/cmdline.h +index 96674dfda2..b14355ef51 100644 +--- a/dpdk/lib/cmdline/cmdline.h ++++ b/dpdk/lib/cmdline/cmdline.h +@@ -23,6 +23,12 @@ + extern "C" { + #endif + ++enum rdline_status { ++ RDLINE_INIT, ++ RDLINE_RUNNING, ++ RDLINE_EXITED ++}; ++ + struct cmdline; + + struct cmdline *cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out); +diff --git a/dpdk/lib/cmdline/cmdline_private.h b/dpdk/lib/cmdline/cmdline_private.h +index c2e906d8de..a3271c7693 100644 +--- a/dpdk/lib/cmdline/cmdline_private.h ++++ b/dpdk/lib/cmdline/cmdline_private.h +@@ -23,12 +23,6 @@ + #define RDLINE_HISTORY_BUF_SIZE BUFSIZ + #define RDLINE_HISTORY_MAX_LINE 64 + +-enum rdline_status { +- RDLINE_INIT, +- RDLINE_RUNNING, +- RDLINE_EXITED +-}; +- + struct rdline { + enum rdline_status status; + /* rdline bufs */ +diff --git a/dpdk/lib/compressdev/rte_compressdev.h b/dpdk/lib/compressdev/rte_compressdev.h +index 2840c27c6c..d9b2fe40dc 100644 +--- a/dpdk/lib/compressdev/rte_compressdev.h ++++ b/dpdk/lib/compressdev/rte_compressdev.h +@@ -353,7 +353,7 @@ rte_compressdev_stats_reset(uint8_t dev_id); + * @note The capabilities field of dev_info is set to point to the first + * element of an array of struct rte_compressdev_capabilities. + * The element after the last valid element has it's op field set to +- * RTE_COMP_ALGO_LIST_END. ++ * RTE_COMP_ALGO_UNSPECIFIED. + */ + __rte_experimental + void diff --git a/dpdk/lib/compressdev/rte_compressdev_internal.h b/dpdk/lib/compressdev/rte_compressdev_internal.h index 22ceac66e2..b3b193e3ee 100644 --- a/dpdk/lib/compressdev/rte_compressdev_internal.h @@ -46742,8 +86088,71 @@ index 22ceac66e2..b3b193e3ee 100644 +#endif + #endif +diff --git a/dpdk/lib/compressdev/rte_compressdev_pmd.c b/dpdk/lib/compressdev/rte_compressdev_pmd.c +index 7f500d76d4..6a11a396b7 100644 +--- a/dpdk/lib/compressdev/rte_compressdev_pmd.c ++++ b/dpdk/lib/compressdev/rte_compressdev_pmd.c +@@ -20,6 +20,9 @@ rte_compressdev_pmd_parse_name_arg(const char *key __rte_unused, + struct rte_compressdev_pmd_init_params *params = extra_args; + int n; + ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + n = strlcpy(params->name, value, RTE_COMPRESSDEV_NAME_MAX_LEN); + if (n >= RTE_COMPRESSDEV_NAME_MAX_LEN) + return -EINVAL; +@@ -37,6 +40,9 @@ rte_compressdev_pmd_parse_uint_arg(const char *key __rte_unused, + int i; + char *end; + ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + errno = 0; + i = strtol(value, &end, 10); + if (*end != 0 || errno != 0 || i < 0) +diff --git a/dpdk/lib/cryptodev/cryptodev_pmd.c b/dpdk/lib/cryptodev/cryptodev_pmd.c +index 739a0b3f34..d1d8ecef98 100644 +--- a/dpdk/lib/cryptodev/cryptodev_pmd.c ++++ b/dpdk/lib/cryptodev/cryptodev_pmd.c +@@ -19,6 +19,9 @@ rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused, + struct rte_cryptodev_pmd_init_params *params = extra_args; + int n; + ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); + if (n >= RTE_CRYPTODEV_NAME_MAX_LEN) + return -EINVAL; +@@ -35,6 +38,10 @@ rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused, + { + int i; + char *end; ++ ++ if (value == NULL || extra_args == NULL) ++ return -EINVAL; ++ + errno = 0; + + i = strtol(value, &end, 10); +@@ -96,11 +103,11 @@ rte_cryptodev_pmd_create(const char *name, + struct rte_cryptodev *cryptodev; + + if (params->name[0] != '\0') { +- CDEV_LOG_INFO("User specified device name = %s\n", params->name); ++ CDEV_LOG_INFO("User specified device name = %s", params->name); + name = params->name; + } + +- CDEV_LOG_INFO("Creating cryptodev %s\n", name); ++ CDEV_LOG_INFO("Creating cryptodev %s", name); + + CDEV_LOG_INFO("Initialisation parameters - name: %s," + "socket id: %d, max queue pairs: %u", diff --git a/dpdk/lib/cryptodev/cryptodev_pmd.h b/dpdk/lib/cryptodev/cryptodev_pmd.h -index b9146f652c..56e659b474 100644 +index b9146f652c..bc71cada4a 100644 --- a/dpdk/lib/cryptodev/cryptodev_pmd.h +++ b/dpdk/lib/cryptodev/cryptodev_pmd.h @@ -5,6 +5,10 @@ @@ -46757,6 +86166,15 @@ index b9146f652c..56e659b474 100644 /** @file * RTE Crypto PMD APIs * +@@ -619,7 +623,7 @@ set_sym_session_private_data(struct rte_cryptodev_sym_session *sess, + uint8_t driver_id, void *private_data) + { + if (unlikely(sess->nb_drivers <= driver_id)) { +- CDEV_LOG_ERR("Set private data for driver %u not allowed\n", ++ CDEV_LOG_ERR("Set private data for driver %u not allowed", + driver_id); + return; + } @@ -640,4 +644,8 @@ set_asym_session_private_data(struct rte_cryptodev_asym_session *sess, sess->sess_private_data[driver_id] = private_data; } @@ -46808,6 +86226,126 @@ index 9c866f553f..9c5bb9233a 100644 /**< RSA private key is in quintuple format * See rte_crypto_rsa_priv_key_qt */ +diff --git a/dpdk/lib/cryptodev/rte_cryptodev.c b/dpdk/lib/cryptodev/rte_cryptodev.c +index a40536c5ea..00fdd18630 100644 +--- a/dpdk/lib/cryptodev/rte_cryptodev.c ++++ b/dpdk/lib/cryptodev/rte_cryptodev.c +@@ -1215,13 +1215,13 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, + } + + if (!qp_conf) { +- CDEV_LOG_ERR("qp_conf cannot be NULL\n"); ++ CDEV_LOG_ERR("qp_conf cannot be NULL"); + return -EINVAL; + } + + if ((qp_conf->mp_session && !qp_conf->mp_session_private) || + (!qp_conf->mp_session && qp_conf->mp_session_private)) { +- CDEV_LOG_ERR("Invalid mempools\n"); ++ CDEV_LOG_ERR("Invalid mempools"); + return -EINVAL; + } + +@@ -1234,7 +1234,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, + pool_priv = rte_mempool_get_priv(qp_conf->mp_session); + if (!pool_priv || qp_conf->mp_session->private_data_size < + sizeof(*pool_priv)) { +- CDEV_LOG_ERR("Invalid mempool\n"); ++ CDEV_LOG_ERR("Invalid mempool"); + return -EINVAL; + } + +@@ -1245,7 +1245,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, + obj_size) || (s.nb_drivers <= dev->driver_id) || + rte_cryptodev_sym_get_private_session_size(dev_id) > + obj_priv_size) { +- CDEV_LOG_ERR("Invalid mempool\n"); ++ CDEV_LOG_ERR("Invalid mempool"); + return -EINVAL; + } + } +@@ -1803,7 +1803,7 @@ rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, + + obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; + if (obj_sz > elt_size) +- CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size, ++ CDEV_LOG_INFO("elt_size %u is expanded to %u", elt_size, + obj_sz); + else + obj_sz = elt_size; +@@ -1813,14 +1813,14 @@ rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, + NULL, NULL, NULL, NULL, + socket_id, 0); + if (mp == NULL) { +- CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", ++ CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", + __func__, name, rte_errno); + return NULL; + } + + pool_priv = rte_mempool_get_priv(mp); + if (!pool_priv) { +- CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", ++ CDEV_LOG_ERR("%s(name=%s) failed to get private data", + __func__, name); + rte_mempool_free(mp); + return NULL; +@@ -1868,7 +1868,7 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp) + struct rte_cryptodev_sym_session_pool_private_data *pool_priv; + + if (!rte_cryptodev_sym_is_valid_session_pool(mp)) { +- CDEV_LOG_ERR("Invalid mempool\n"); ++ CDEV_LOG_ERR("Invalid mempool"); + return NULL; + } + +@@ -1902,7 +1902,7 @@ rte_cryptodev_asym_session_create(struct rte_mempool *mp) + rte_cryptodev_asym_get_header_session_size(); + + if (!mp) { +- CDEV_LOG_ERR("invalid mempool\n"); ++ CDEV_LOG_ERR("invalid mempool"); + return NULL; + } + +@@ -2286,7 +2286,7 @@ rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, + elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), + sizeof(struct rte_crypto_asym_op)); + } else { +- CDEV_LOG_ERR("Invalid op_type\n"); ++ CDEV_LOG_ERR("Invalid op_type"); + return NULL; + } + +@@ -2472,7 +2472,7 @@ cryptodev_handle_dev_info(const char *cmd __rte_unused, + rte_tel_data_start_dict(d); + rte_tel_data_add_dict_string(d, "device_name", + cryptodev_info.device->name); +- rte_tel_data_add_dict_int(d, "max_nb_queue_pairs", ++ rte_tel_data_add_dict_u64(d, "max_nb_queue_pairs", + cryptodev_info.max_nb_queue_pairs); + + return 0; +diff --git a/dpdk/lib/cryptodev/rte_cryptodev.h b/dpdk/lib/cryptodev/rte_cryptodev.h +index 59ea5a54df..99fd4c3569 100644 +--- a/dpdk/lib/cryptodev/rte_cryptodev.h ++++ b/dpdk/lib/cryptodev/rte_cryptodev.h +@@ -897,6 +897,15 @@ struct rte_cryptodev_cb_rcu { + /**< RCU QSBR variable per queue pair */ + }; + ++/** ++ * Get the security context for the cryptodev. ++ * ++ * @param dev_id ++ * The device identifier. ++ * @return ++ * - NULL on error. ++ * - Pointer to security context on success. ++ */ + void * + rte_cryptodev_get_sec_ctx(uint8_t dev_id); + diff --git a/dpdk/lib/distributor/rte_distributor_single.c b/dpdk/lib/distributor/rte_distributor_single.c index b653620688..60ca86152f 100644 --- a/dpdk/lib/distributor/rte_distributor_single.c @@ -46870,6 +86408,32 @@ index f79718ce8c..cec4d69e7a 100644 */ static inline uint64_t __rte_rdtsc_syscall(void) +diff --git a/dpdk/lib/eal/common/eal_common_debug.c b/dpdk/lib/eal/common/eal_common_debug.c +index 15418e957f..37c7c308b9 100644 +--- a/dpdk/lib/eal/common/eal_common_debug.c ++++ b/dpdk/lib/eal/common/eal_common_debug.c +@@ -3,9 +3,12 @@ + */ + + #include <stdarg.h> ++#include <errno.h> ++ + #include <rte_eal.h> + #include <rte_log.h> + #include <rte_debug.h> ++#include <rte_errno.h> + + void + __rte_panic(const char *funcname, const char *format, ...) +@@ -37,7 +40,7 @@ rte_exit(int exit_code, const char *format, ...) + rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap); + va_end(ap); + +- if (rte_eal_cleanup() != 0) ++ if (rte_eal_cleanup() != 0 && rte_errno != EALREADY) + RTE_LOG(CRIT, EAL, + "EAL could not release all resources\n"); + exit(exit_code); diff --git a/dpdk/lib/eal/common/eal_common_dev.c b/dpdk/lib/eal/common/eal_common_dev.c index e1e9976d8d..07f285f862 100644 --- a/dpdk/lib/eal/common/eal_common_dev.c @@ -46899,10 +86463,30 @@ index 8c7650cf6c..184fe676aa 100644 /* First parse according global device syntax. */ if (rte_devargs_layers_parse(da, dev) == 0) { diff --git a/dpdk/lib/eal/common/eal_common_dynmem.c b/dpdk/lib/eal/common/eal_common_dynmem.c -index 7c5437ddfa..c1e1889f5c 100644 +index 7c5437ddfa..9af299b8f8 100644 --- a/dpdk/lib/eal/common/eal_common_dynmem.c +++ b/dpdk/lib/eal/common/eal_common_dynmem.c -@@ -304,6 +304,10 @@ eal_dynmem_hugepage_init(void) +@@ -119,8 +119,7 @@ eal_dynmem_memseg_lists_init(void) + max_seglists_per_type = RTE_MAX_MEMSEG_LISTS / n_memtypes; + + if (max_seglists_per_type == 0) { +- RTE_LOG(ERR, EAL, "Cannot accommodate all memory types, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ RTE_LOG(ERR, EAL, "Cannot accommodate all memory types, please increase RTE_MAX_MEMSEG_LISTS\n"); + goto out; + } + +@@ -179,8 +178,7 @@ eal_dynmem_memseg_lists_init(void) + for (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) { + if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, +- "No more space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ "No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + goto out; + } + msl = &mcfg->memsegs[msl_idx++]; +@@ -304,6 +302,10 @@ eal_dynmem_hugepage_init(void) needed = num_pages - num_pages_alloc; pages = malloc(sizeof(*pages) * needed); @@ -46913,16 +86497,85 @@ index 7c5437ddfa..c1e1889f5c 100644 /* do not request exact number of pages */ cur_pages = eal_memalloc_alloc_seg_bulk(pages, +diff --git a/dpdk/lib/eal/common/eal_common_fbarray.c b/dpdk/lib/eal/common/eal_common_fbarray.c +index 3a28a53247..5903474355 100644 +--- a/dpdk/lib/eal/common/eal_common_fbarray.c ++++ b/dpdk/lib/eal/common/eal_common_fbarray.c +@@ -1485,7 +1485,7 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) + + if (fully_validate(arr->name, arr->elt_sz, arr->len)) { + fprintf(f, "Invalid file-backed array\n"); +- goto out; ++ return; + } + + /* prevent array from changing under us */ +@@ -1499,6 +1499,5 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) + + for (i = 0; i < msk->n_masks; i++) + fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]); +-out: + rte_rwlock_read_unlock(&arr->rwlock); + } +diff --git a/dpdk/lib/eal/common/eal_common_memory.c b/dpdk/lib/eal/common/eal_common_memory.c +index 616db5ce31..4aeb933a8c 100644 +--- a/dpdk/lib/eal/common/eal_common_memory.c ++++ b/dpdk/lib/eal/common/eal_common_memory.c +@@ -1133,7 +1133,7 @@ handle_eal_heap_info_request(const char *cmd __rte_unused, const char *params, + malloc_heap_get_stats(heap, &sock_stats); + + rte_tel_data_start_dict(d); +- rte_tel_data_add_dict_int(d, "Head id", heap_id); ++ rte_tel_data_add_dict_u64(d, "Heap_id", heap_id); + rte_tel_data_add_dict_string(d, "Name", heap->name); + rte_tel_data_add_dict_u64(d, "Heap_size", + sock_stats.heap_totalsz_bytes); +@@ -1195,13 +1195,13 @@ handle_eal_memzone_info_request(const char *cmd __rte_unused, + mz = rte_fbarray_get(&mcfg->memzones, mz_idx); + + rte_tel_data_start_dict(d); +- rte_tel_data_add_dict_int(d, "Zone", mz_idx); ++ rte_tel_data_add_dict_u64(d, "Zone", mz_idx); + rte_tel_data_add_dict_string(d, "Name", mz->name); +- rte_tel_data_add_dict_int(d, "Length", mz->len); ++ rte_tel_data_add_dict_u64(d, "Length", mz->len); + snprintf(addr, ADDR_STR, "%p", mz->addr); + rte_tel_data_add_dict_string(d, "Address", addr); + rte_tel_data_add_dict_int(d, "Socket", mz->socket_id); +- rte_tel_data_add_dict_int(d, "Flags", mz->flags); ++ rte_tel_data_add_dict_u64(d, "Flags", mz->flags); + + /* go through each page occupied by this memzone */ + msl = rte_mem_virt2memseg_list(mz->addr); +@@ -1216,7 +1216,7 @@ handle_eal_memzone_info_request(const char *cmd __rte_unused, + ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz; + ms = rte_fbarray_get(&msl->memseg_arr, ms_idx); + +- rte_tel_data_add_dict_int(d, "Hugepage_size", page_sz); ++ rte_tel_data_add_dict_u64(d, "Hugepage_size", page_sz); + snprintf(addr, ADDR_STR, "%p", ms->addr); + rte_tel_data_add_dict_string(d, "Hugepage_base", addr); + diff --git a/dpdk/lib/eal/common/eal_common_proc.c b/dpdk/lib/eal/common/eal_common_proc.c -index ebd0f6673b..b33d58ea0a 100644 +index ebd0f6673b..38a2164f71 100644 --- a/dpdk/lib/eal/common/eal_common_proc.c +++ b/dpdk/lib/eal/common/eal_common_proc.c +@@ -262,7 +262,7 @@ rte_mp_action_unregister(const char *name) + } + + static int +-read_msg(struct mp_msg_internal *m, struct sockaddr_un *s) ++read_msg(int fd, struct mp_msg_internal *m, struct sockaddr_un *s) + { + int msglen; + struct iovec iov; @@ -282,8 +282,17 @@ read_msg(struct mp_msg_internal *m, struct sockaddr_un *s) msgh.msg_control = control; msgh.msg_controllen = sizeof(control); +- msglen = recvmsg(mp_fd, &msgh, 0); +retry: - msglen = recvmsg(mp_fd, &msgh, 0); ++ msglen = recvmsg(fd, &msgh, 0); + + /* zero length message means socket was closed */ + if (msglen == 0) @@ -46935,24 +86588,57 @@ index ebd0f6673b..b33d58ea0a 100644 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno)); return -1; } -@@ -311,7 +320,7 @@ read_msg(struct mp_msg_internal *m, struct sockaddr_un *s) +@@ -311,7 +320,16 @@ read_msg(struct mp_msg_internal *m, struct sockaddr_un *s) RTE_LOG(ERR, EAL, "invalid received data length\n"); return -1; } - return 0; + return msglen; ++} ++ ++static void ++cleanup_msg_fds(const struct rte_mp_msg *msg) ++{ ++ int i; ++ ++ for (i = 0; i < msg->num_fds; i++) ++ close(msg->fds[i]); } static void -@@ -385,8 +394,13 @@ mp_handle(void *arg __rte_unused) +@@ -342,8 +360,10 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) + else if (pending_req->type == REQUEST_TYPE_ASYNC) + req = async_reply_handle_thread_unsafe( + pending_req); +- } else ++ } else { + RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name); ++ cleanup_msg_fds(msg); ++ } + pthread_mutex_unlock(&pending_requests.lock); + + if (req != NULL) +@@ -373,6 +393,7 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s) + RTE_LOG(ERR, EAL, "Cannot find action: %s\n", + msg->name); + } ++ cleanup_msg_fds(msg); + } else if (action(msg, s->sun_path) < 0) { + RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name); + } +@@ -383,10 +404,16 @@ mp_handle(void *arg __rte_unused) + { + struct mp_msg_internal msg; struct sockaddr_un sa; ++ int fd; - while (mp_fd >= 0) { +- while (mp_fd >= 0) { - if (read_msg(&msg, &sa) == 0) - process_msg(&msg, &sa); ++ while ((fd = __atomic_load_n(&mp_fd, __ATOMIC_RELAXED)) >= 0) { + int ret; + -+ ret = read_msg(&msg, &sa); ++ ret = read_msg(fd, &msg, &sa); + if (ret <= 0) + break; + @@ -46960,8 +86646,226 @@ index ebd0f6673b..b33d58ea0a 100644 } return NULL; +@@ -626,9 +653,8 @@ rte_mp_channel_init(void) + NULL, mp_handle, NULL) < 0) { + RTE_LOG(ERR, EAL, "failed to create mp thread: %s\n", + strerror(errno)); +- close(mp_fd); + close(dir_fd); +- mp_fd = -1; ++ close(__atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED)); + return -1; + } + +@@ -644,11 +670,10 @@ rte_mp_channel_cleanup(void) + { + int fd; + +- if (mp_fd < 0) ++ fd = __atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED); ++ if (fd < 0) + return; + +- fd = mp_fd; +- mp_fd = -1; + pthread_cancel(mp_handle_tid); + pthread_join(mp_handle_tid, NULL); + close_socket_fd(fd); +diff --git a/dpdk/lib/eal/common/eal_common_trace.c b/dpdk/lib/eal/common/eal_common_trace.c +index 7bff1cd2ce..285770377e 100644 +--- a/dpdk/lib/eal/common/eal_common_trace.c ++++ b/dpdk/lib/eal/common/eal_common_trace.c +@@ -48,12 +48,6 @@ eal_trace_init(void) + goto fail; + } + +- if (!STAILQ_EMPTY(&trace.args)) +- trace.status = true; +- +- if (!rte_trace_is_enabled()) +- return 0; +- + rte_spinlock_init(&trace.lock); + + /* Is duplicate trace name registered */ +@@ -72,13 +66,9 @@ eal_trace_init(void) + if (trace_metadata_create() < 0) + goto fail; + +- /* Create trace directory */ +- if (trace_mkdir()) +- goto free_meta; +- + /* Save current epoch timestamp for future use */ + if (trace_epoch_time_save() < 0) +- goto fail; ++ goto free_meta; + + /* Apply global configurations */ + STAILQ_FOREACH(arg, &trace.args, next) +@@ -98,8 +88,6 @@ eal_trace_init(void) + void + eal_trace_fini(void) + { +- if (!rte_trace_is_enabled()) +- return; + trace_mem_free(); + trace_metadata_destroy(); + eal_trace_args_free(); +@@ -108,17 +96,17 @@ eal_trace_fini(void) + bool + rte_trace_is_enabled(void) + { +- return trace.status; ++ return __atomic_load_n(&trace.status, __ATOMIC_ACQUIRE) != 0; + } + + static void +-trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode) ++trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode) + { + if (mode == RTE_TRACE_MODE_OVERWRITE) +- __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, ++ __atomic_and_fetch(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, + __ATOMIC_RELEASE); + else +- __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD, ++ __atomic_or_fetch(t, __RTE_TRACE_FIELD_ENABLE_DISCARD, + __ATOMIC_RELEASE); + } + +@@ -127,9 +115,6 @@ rte_trace_mode_set(enum rte_trace_mode mode) + { + struct trace_point *tp; + +- if (!rte_trace_is_enabled()) +- return; +- + STAILQ_FOREACH(tp, &tp_list, next) + trace_mode_set(tp->handle, mode); + +@@ -149,36 +134,42 @@ trace_point_is_invalid(rte_trace_point_t *t) + } + + bool +-rte_trace_point_is_enabled(rte_trace_point_t *trace) ++rte_trace_point_is_enabled(rte_trace_point_t *t) + { + uint64_t val; + +- if (trace_point_is_invalid(trace)) ++ if (trace_point_is_invalid(t)) + return false; + +- val = __atomic_load_n(trace, __ATOMIC_ACQUIRE); ++ val = __atomic_load_n(t, __ATOMIC_ACQUIRE); + return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0; + } + + int +-rte_trace_point_enable(rte_trace_point_t *trace) ++rte_trace_point_enable(rte_trace_point_t *t) + { +- if (trace_point_is_invalid(trace)) ++ uint64_t prev; ++ ++ if (trace_point_is_invalid(t)) + return -ERANGE; + +- __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK, +- __ATOMIC_RELEASE); ++ prev = __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); ++ if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0) ++ __atomic_add_fetch(&trace.status, 1, __ATOMIC_RELEASE); + return 0; + } + + int +-rte_trace_point_disable(rte_trace_point_t *trace) ++rte_trace_point_disable(rte_trace_point_t *t) + { +- if (trace_point_is_invalid(trace)) ++ uint64_t prev; ++ ++ if (trace_point_is_invalid(t)) + return -ERANGE; + +- __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK, +- __ATOMIC_RELEASE); ++ prev = __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); ++ if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0) ++ __atomic_sub_fetch(&trace.status, 1, __ATOMIC_RELEASE); + return 0; + } + +@@ -221,8 +212,10 @@ rte_trace_regexp(const char *regex, bool enable) + rc = rte_trace_point_disable(tp->handle); + found = 1; + } +- if (rc < 0) +- return rc; ++ if (rc < 0) { ++ found = 0; ++ break; ++ } + } + regfree(&r); + +@@ -262,10 +255,9 @@ trace_lcore_mem_dump(FILE *f) + struct __rte_trace_header *header; + uint32_t count; + +- if (trace->nb_trace_mem_list == 0) +- return; +- + rte_spinlock_lock(&trace->lock); ++ if (trace->nb_trace_mem_list == 0) ++ goto out; + fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list); + fprintf(f, "\nTrace mem info\n--------------\n"); + for (count = 0; count < trace->nb_trace_mem_list; count++) { +@@ -276,6 +268,7 @@ trace_lcore_mem_dump(FILE *f) + header->stream_header.lcore_id, + header->stream_header.thread_name); + } ++out: + rte_spinlock_unlock(&trace->lock); + } + +@@ -414,9 +407,6 @@ trace_mem_free(void) + struct trace *trace = trace_obj_get(); + uint32_t count; + +- if (!rte_trace_is_enabled()) +- return; +- + rte_spinlock_lock(&trace->lock); + for (count = 0; count < trace->nb_trace_mem_list; count++) { + trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]); +@@ -512,6 +502,7 @@ __rte_trace_point_register(rte_trace_point_t *handle, const char *name, + /* Form the trace handle */ + *handle = sz; + *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT; ++ trace_mode_set(handle, trace.mode); + + trace.nb_trace_points++; + tp->handle = handle; +diff --git a/dpdk/lib/eal/common/eal_common_trace_ctf.c b/dpdk/lib/eal/common/eal_common_trace_ctf.c +index 33e419aac7..94726817b2 100644 +--- a/dpdk/lib/eal/common/eal_common_trace_ctf.c ++++ b/dpdk/lib/eal/common/eal_common_trace_ctf.c +@@ -359,9 +359,6 @@ rte_trace_metadata_dump(FILE *f) + char *ctf_meta = trace->ctf_meta; + int rc; + +- if (!rte_trace_is_enabled()) +- return 0; +- + if (ctf_meta == NULL) + return -EINVAL; + diff --git a/dpdk/lib/eal/common/eal_common_trace_utils.c b/dpdk/lib/eal/common/eal_common_trace_utils.c -index 64f58fb66a..2b55dbec65 100644 +index 64f58fb66a..7bf1c05e12 100644 --- a/dpdk/lib/eal/common/eal_common_trace_utils.c +++ b/dpdk/lib/eal/common/eal_common_trace_utils.c @@ -104,13 +104,15 @@ trace_session_name_generate(char *trace_dir) @@ -46982,6 +86886,66 @@ index 64f58fb66a..2b55dbec65 100644 return rc; fail: +@@ -312,14 +314,18 @@ trace_dir_default_path_get(char *dir_path) + return 0; + } + +-int ++static int + trace_mkdir(void) + { + struct trace *trace = trace_obj_get(); + char session[TRACE_DIR_STR_LEN]; ++ static bool already_done; + char *dir_path; + int rc; + ++ if (already_done) ++ return 0; ++ + if (!trace->dir_offset) { + dir_path = calloc(1, sizeof(trace->dir)); + if (dir_path == NULL) { +@@ -363,6 +369,7 @@ trace_mkdir(void) + } + + RTE_LOG(INFO, EAL, "Trace dir: %s\n", trace->dir); ++ already_done = true; + return 0; + } + +@@ -432,6 +439,10 @@ rte_trace_save(void) + if (trace->nb_trace_mem_list == 0) + return rc; + ++ rc = trace_mkdir(); ++ if (rc < 0) ++ return rc; ++ + rc = trace_meta_save(trace); + if (rc) + return rc; +diff --git a/dpdk/lib/eal/common/eal_trace.h b/dpdk/lib/eal/common/eal_trace.h +index 06751eb23a..72a5a461ae 100644 +--- a/dpdk/lib/eal/common/eal_trace.h ++++ b/dpdk/lib/eal/common/eal_trace.h +@@ -54,7 +54,7 @@ struct trace { + char dir[PATH_MAX]; + int dir_offset; + int register_errno; +- bool status; ++ uint32_t status; + enum rte_trace_mode mode; + rte_uuid_t uuid; + uint32_t buff_len; +@@ -104,7 +104,6 @@ void trace_uuid_generate(void); + int trace_metadata_create(void); + void trace_metadata_destroy(void); + char *trace_metadata_fixup_field(const char *field); +-int trace_mkdir(void); + int trace_epoch_time_save(void); + void trace_mem_free(void); + void trace_mem_per_thread_free(void); diff --git a/dpdk/lib/eal/common/malloc_elem.h b/dpdk/lib/eal/common/malloc_elem.h index 15d8ba7af2..503fe5c470 100644 --- a/dpdk/lib/eal/common/malloc_elem.h @@ -47020,14 +86984,16 @@ index 15d8ba7af2..503fe5c470 100644 static inline void asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { } diff --git a/dpdk/lib/eal/common/malloc_heap.c b/dpdk/lib/eal/common/malloc_heap.c -index 55aad2711b..55063ccf81 100644 +index 55aad2711b..25370d1148 100644 --- a/dpdk/lib/eal/common/malloc_heap.c +++ b/dpdk/lib/eal/common/malloc_heap.c -@@ -402,7 +402,7 @@ try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz, +@@ -401,8 +401,8 @@ try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz, + int n_segs; bool callback_triggered = false; - alloc_sz = RTE_ALIGN_CEIL(align + elt_size + +- alloc_sz = RTE_ALIGN_CEIL(align + elt_size + - MALLOC_ELEM_TRAILER_LEN, pg_sz); ++ alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(elt_size, align) + + MALLOC_ELEM_OVERHEAD, pg_sz); n_segs = alloc_sz / pg_sz; @@ -47089,23 +87055,86 @@ index 55aad2711b..55063ccf81 100644 return ret; } diff --git a/dpdk/lib/eal/common/malloc_mp.c b/dpdk/lib/eal/common/malloc_mp.c -index 2e597a17a2..774bd1132f 100644 +index 2e597a17a2..8e236ddd7b 100644 --- a/dpdk/lib/eal/common/malloc_mp.c +++ b/dpdk/lib/eal/common/malloc_mp.c -@@ -251,7 +251,7 @@ handle_alloc_request(const struct malloc_mp_req *m, +@@ -250,8 +250,8 @@ handle_alloc_request(const struct malloc_mp_req *m, + return -1; } - alloc_sz = RTE_ALIGN_CEIL(ar->align + ar->elt_size + +- alloc_sz = RTE_ALIGN_CEIL(ar->align + ar->elt_size + - MALLOC_ELEM_TRAILER_LEN, ar->page_sz); ++ alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(ar->elt_size, ar->align) + + MALLOC_ELEM_OVERHEAD, ar->page_sz); n_segs = alloc_sz / ar->page_sz; /* we can't know in advance how many pages we'll need, so we malloc */ +diff --git a/dpdk/lib/eal/common/rte_malloc.c b/dpdk/lib/eal/common/rte_malloc.c +index d0bec26920..c5a7757deb 100644 +--- a/dpdk/lib/eal/common/rte_malloc.c ++++ b/dpdk/lib/eal/common/rte_malloc.c +@@ -46,13 +46,13 @@ mem_free(void *addr, const bool trace_ena) + void + rte_free(void *addr) + { +- return mem_free(addr, true); ++ mem_free(addr, true); + } + + void + eal_free_no_trace(void *addr) + { +- return mem_free(addr, false); ++ mem_free(addr, false); + } + + static void * +diff --git a/dpdk/lib/eal/common/rte_random.c b/dpdk/lib/eal/common/rte_random.c +index ce21c2242a..ac5876dd1f 100644 +--- a/dpdk/lib/eal/common/rte_random.c ++++ b/dpdk/lib/eal/common/rte_random.c +@@ -82,7 +82,7 @@ rte_srand(uint64_t seed) + unsigned int lcore_id; + + /* add lcore_id to seed to avoid having the same sequence */ +- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) ++ for (lcore_id = 0; lcore_id < RTE_DIM(rand_states); lcore_id++) + __rte_srand_lfsr258(seed + lcore_id, &rand_states[lcore_id]); + } + diff --git a/dpdk/lib/eal/common/rte_service.c b/dpdk/lib/eal/common/rte_service.c -index bd8fb72e78..e76c2baffc 100644 +index bd8fb72e78..fb5a5b61e2 100644 --- a/dpdk/lib/eal/common/rte_service.c +++ b/dpdk/lib/eal/common/rte_service.c -@@ -764,7 +764,9 @@ rte_service_lcore_stop(uint32_t lcore) +@@ -103,14 +103,12 @@ rte_service_init(void) + } + + int i; +- int count = 0; + struct rte_config *cfg = rte_eal_get_configuration(); + for (i = 0; i < RTE_MAX_LCORE; i++) { + if (lcore_config[i].core_role == ROLE_SERVICE) { + if ((unsigned int)i == cfg->main_lcore) + continue; + rte_service_lcore_add(i); +- count++; + } + } + +@@ -478,6 +476,12 @@ service_runner_func(void *arg) + cs->loops++; + } + ++ /* Switch off this core for all services, to ensure that future ++ * calls to may_be_active() know this core is switched off. ++ */ ++ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) ++ cs->service_active_on_lcore[i] = 0; ++ + /* Use SEQ CST memory ordering to avoid any re-ordering around + * this store, ensuring that once this store is visible, the service + * lcore thread really is done in service cores code. +@@ -764,7 +768,9 @@ rte_service_lcore_stop(uint32_t lcore) return -EALREADY; uint32_t i; @@ -47116,36 +87145,72 @@ index bd8fb72e78..e76c2baffc 100644 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { int32_t enabled = service_mask & (UINT64_C(1) << i); int32_t service_running = rte_service_runstate_get(i); -@@ -772,6 +774,11 @@ rte_service_lcore_stop(uint32_t lcore) - __atomic_load_n(&rte_services[i].num_mapped_cores, - __ATOMIC_RELAXED)); - -+ /* Switch off this core for all services, to ensure that future -+ * calls to may_be_active() know this core is switched off. -+ */ -+ cs->service_active_on_lcore[i] = 0; -+ - /* if the core is mapped, and the service is running, and this - * is the only core that is mapped, the service would cease to - * run if this core stopped, so fail instead. diff --git a/dpdk/lib/eal/freebsd/eal.c b/dpdk/lib/eal/freebsd/eal.c -index a1cd2462db..414aad3dd3 100644 +index a1cd2462db..66553089fa 100644 --- a/dpdk/lib/eal/freebsd/eal.c +++ b/dpdk/lib/eal/freebsd/eal.c -@@ -986,11 +986,11 @@ rte_eal_cleanup(void) +@@ -982,15 +982,25 @@ rte_eal_init(int argc, char **argv) + int + rte_eal_cleanup(void) + { ++ static uint32_t run_once; ++ uint32_t has_run = 0; ++ ++ if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0, ++ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { ++ RTE_LOG(WARNING, EAL, "Already called cleanup\n"); ++ rte_errno = EALREADY; ++ return -1; ++ } ++ + struct internal_config *internal_conf = eal_get_internal_configuration(); rte_service_finalize(); rte_mp_channel_cleanup(); -+ rte_trace_save(); -+ eal_trace_fini(); - /* after this point, any DPDK pointers will become dangling */ - rte_eal_memory_detach(); - rte_eal_alarm_cleanup(); -- rte_trace_save(); -- eal_trace_fini(); +- /* after this point, any DPDK pointers will become dangling */ +- rte_eal_memory_detach(); +- rte_eal_alarm_cleanup(); + rte_trace_save(); + eal_trace_fini(); ++ rte_eal_alarm_cleanup(); ++ /* after this point, any DPDK pointers will become dangling */ ++ rte_eal_memory_detach(); eal_cleanup_config(internal_conf); return 0; } +diff --git a/dpdk/lib/eal/freebsd/eal_alarm.c b/dpdk/lib/eal/freebsd/eal_alarm.c +index 1023c32937..1a3e6c0aad 100644 +--- a/dpdk/lib/eal/freebsd/eal_alarm.c ++++ b/dpdk/lib/eal/freebsd/eal_alarm.c +@@ -171,12 +171,12 @@ eal_alarm_callback(void *arg __rte_unused) + struct timespec now; + struct alarm_entry *ap; + +- rte_spinlock_lock(&alarm_list_lk); +- ap = LIST_FIRST(&alarm_list); +- + if (clock_gettime(CLOCK_TYPE_ID, &now) < 0) + return; + ++ rte_spinlock_lock(&alarm_list_lk); ++ ap = LIST_FIRST(&alarm_list); ++ + while (ap != NULL && timespec_cmp(&now, &ap->time) >= 0) { + ap->executing = 1; + ap->executing_id = pthread_self(); +diff --git a/dpdk/lib/eal/freebsd/eal_hugepage_info.c b/dpdk/lib/eal/freebsd/eal_hugepage_info.c +index 9dbe375bd3..e58e618469 100644 +--- a/dpdk/lib/eal/freebsd/eal_hugepage_info.c ++++ b/dpdk/lib/eal/freebsd/eal_hugepage_info.c +@@ -33,7 +33,7 @@ map_shared_memory(const char *filename, const size_t mem_size, int flags) + } + retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + close(fd); +- return retval; ++ return retval == MAP_FAILED ? NULL : retval; + } + + static void * diff --git a/dpdk/lib/eal/freebsd/eal_interrupts.c b/dpdk/lib/eal/freebsd/eal_interrupts.c index 10aa91cc09..9f720bdc8f 100644 --- a/dpdk/lib/eal/freebsd/eal_interrupts.c @@ -47169,10 +87234,32 @@ index 10aa91cc09..9f720bdc8f 100644 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle)) break; diff --git a/dpdk/lib/eal/freebsd/eal_memory.c b/dpdk/lib/eal/freebsd/eal_memory.c -index 78ac142b82..17ab10e0ca 100644 +index 78ac142b82..5c6165c580 100644 --- a/dpdk/lib/eal/freebsd/eal_memory.c +++ b/dpdk/lib/eal/freebsd/eal_memory.c -@@ -446,8 +446,8 @@ memseg_secondary_init(void) +@@ -172,9 +172,8 @@ rte_eal_hugepage_init(void) + break; + } + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { +- RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n", +- RTE_STR(RTE_MAX_MEMSEG_PER_TYPE), +- RTE_STR(RTE_MAX_MEM_MB_PER_TYPE)); ++ RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase RTE_MAX_MEMSEG_PER_LIST " ++ "RTE_MAX_MEMSEG_PER_TYPE and/or RTE_MAX_MEM_MB_PER_TYPE in configuration.\n"); + return -1; + } + arr = &msl->memseg_arr; +@@ -404,8 +403,7 @@ memseg_primary_init(void) + + if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, +- "No more space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ "No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + return -1; + } + +@@ -446,8 +444,8 @@ memseg_secondary_init(void) msl = &mcfg->memsegs[msl_idx]; @@ -47250,6 +87337,46 @@ index 398ceab71d..fe9097303a 100644 +if not cc.compiles(cpuset_test_code, name: 'Detect argument count for CPU_OR') + dpdk_conf.set('RTE_EAL_FREEBSD_CPUSET_LEGACY', 1) +endif +diff --git a/dpdk/lib/eal/include/generic/rte_atomic.h b/dpdk/lib/eal/include/generic/rte_atomic.h +index 276272f40b..f73825a3d3 100644 +--- a/dpdk/lib/eal/include/generic/rte_atomic.h ++++ b/dpdk/lib/eal/include/generic/rte_atomic.h +@@ -175,11 +175,7 @@ rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val); + static inline uint16_t + rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) + { +-#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +-#else +- return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); +-#endif + } + #endif + +@@ -458,11 +454,7 @@ rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val); + static inline uint32_t + rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) + { +-#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +-#else +- return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); +-#endif + } + #endif + +@@ -740,11 +732,7 @@ rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val); + static inline uint64_t + rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) + { +-#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +-#else +- return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); +-#endif + } + #endif + diff --git a/dpdk/lib/eal/include/generic/rte_pflock.h b/dpdk/lib/eal/include/generic/rte_pflock.h index b9de063c89..e7bb29b3c5 100644 --- a/dpdk/lib/eal/include/generic/rte_pflock.h @@ -47300,6 +87427,19 @@ index 86468d1a2b..9700494816 100644 'generic/rte_power_intrinsics.h', 'generic/rte_prefetch.h', 'generic/rte_rwlock.h', +diff --git a/dpdk/lib/eal/include/rte_bitmap.h b/dpdk/lib/eal/include/rte_bitmap.h +index e4623bb176..c589d5cde0 100644 +--- a/dpdk/lib/eal/include/rte_bitmap.h ++++ b/dpdk/lib/eal/include/rte_bitmap.h +@@ -327,8 +327,6 @@ rte_bitmap_reset(struct rte_bitmap *bmp) + * Handle to bitmap instance + * @param pos + * Bit position +- * @return +- * 0 upon success, error code otherwise + */ + static inline void + rte_bitmap_prefetch0(struct rte_bitmap *bmp, uint32_t pos) diff --git a/dpdk/lib/eal/include/rte_bitops.h b/dpdk/lib/eal/include/rte_bitops.h index 141e8ea730..f50dbe4388 100644 --- a/dpdk/lib/eal/include/rte_bitops.h @@ -47349,7 +87489,7 @@ index 854ef9e5dd..0256a9de60 100644 + #endif /* _RTE_BRANCH_PREDICTION_H_ */ diff --git a/dpdk/lib/eal/include/rte_common.h b/dpdk/lib/eal/include/rte_common.h -index 4a399cc7c8..6f004f6cb3 100644 +index 4a399cc7c8..5122ea8925 100644 --- a/dpdk/lib/eal/include/rte_common.h +++ b/dpdk/lib/eal/include/rte_common.h @@ -85,6 +85,11 @@ typedef uint16_t unaligned_uint16_t; @@ -47384,6 +87524,24 @@ index 4a399cc7c8..6f004f6cb3 100644 /*********** Macros for pointer arithmetic ********/ /** +@@ -252,7 +270,7 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) + /** + * subtract a byte-value offset from a pointer + */ +-#define RTE_PTR_SUB(ptr, x) ((void*)((uintptr_t)ptr - (x))) ++#define RTE_PTR_SUB(ptr, x) ((void *)((uintptr_t)(ptr) - (x))) + + /** + * get the difference between two pointer values, i.e. how far apart +@@ -277,7 +295,7 @@ static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void) + * must be a power-of-two value. + */ + #define RTE_PTR_ALIGN_FLOOR(ptr, align) \ +- ((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)ptr, align)) ++ ((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)(ptr), align)) + + /** + * Macro to align a value to a given power-of-two. The resultant value diff --git a/dpdk/lib/eal/include/rte_compat.h b/dpdk/lib/eal/include/rte_compat.h index 2718612cce..a7dbe23449 100644 --- a/dpdk/lib/eal/include/rte_compat.h @@ -47432,6 +87590,28 @@ index 448a41cb0e..e6ff1218f9 100644 +#endif + #endif /* _RTE_DEV_H_ */ +diff --git a/dpdk/lib/eal/include/rte_hexdump.h b/dpdk/lib/eal/include/rte_hexdump.h +index 2d03c089c4..80cf2933a7 100644 +--- a/dpdk/lib/eal/include/rte_hexdump.h ++++ b/dpdk/lib/eal/include/rte_hexdump.h +@@ -27,8 +27,6 @@ extern "C" { + * This is the buffer address to print out. + * @param len + * The number of bytes to dump out +-* @return +-* None. + */ + + extern void +@@ -45,8 +43,6 @@ rte_hexdump(FILE *f, const char * title, const void * buf, unsigned int len); + * This is the buffer address to print out. + * @param len + * The number of bytes to dump out +-* @return +-* None. + */ + + void diff --git a/dpdk/lib/eal/include/rte_hypervisor.h b/dpdk/lib/eal/include/rte_hypervisor.h index 5fe719c1d4..1666431ce3 100644 --- a/dpdk/lib/eal/include/rte_hypervisor.h @@ -47502,6 +87682,34 @@ index ed02e15119..3892519fab 100644 * * Equivalent to rte_malloc() except that the memory zone is * initialised with zeros. +diff --git a/dpdk/lib/eal/include/rte_memzone.h b/dpdk/lib/eal/include/rte_memzone.h +index 5db1210831..bb4223a056 100644 +--- a/dpdk/lib/eal/include/rte_memzone.h ++++ b/dpdk/lib/eal/include/rte_memzone.h +@@ -118,7 +118,6 @@ struct rte_memzone { + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone +@@ -184,7 +183,6 @@ const struct rte_memzone *rte_memzone_reserve(const char *name, + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone +@@ -256,7 +254,6 @@ const struct rte_memzone *rte_memzone_reserve_aligned(const char *name, + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone diff --git a/dpdk/lib/eal/include/rte_pci_dev_feature_defs.h b/dpdk/lib/eal/include/rte_pci_dev_feature_defs.h index e12c22081f..c5bb631286 100644 --- a/dpdk/lib/eal/include/rte_pci_dev_feature_defs.h @@ -47611,11 +87819,76 @@ index 4f5c86552d..2e61439940 100644 +#endif + #endif /* _RTE_TRACE_POINT_REGISTER_H_ */ +diff --git a/dpdk/lib/eal/include/rte_uuid.h b/dpdk/lib/eal/include/rte_uuid.h +index 8b42e070af..cfefd4308a 100644 +--- a/dpdk/lib/eal/include/rte_uuid.h ++++ b/dpdk/lib/eal/include/rte_uuid.h +@@ -37,6 +37,9 @@ typedef unsigned char rte_uuid_t[16]; + ((e) >> 8) & 0xff, (e) & 0xff \ + } + ++/** UUID string length */ ++#define RTE_UUID_STRLEN (36 + 1) ++ + /** + * Test if UUID is all zeros. + * +@@ -95,7 +98,6 @@ int rte_uuid_parse(const char *in, rte_uuid_t uu); + * @param len + * Sizeof the available string buffer + */ +-#define RTE_UUID_STRLEN (36 + 1) + void rte_uuid_unparse(const rte_uuid_t uu, char *out, size_t len); + + #ifdef __cplusplus diff --git a/dpdk/lib/eal/linux/eal.c b/dpdk/lib/eal/linux/eal.c -index 60b4924838..e3d34f7b7c 100644 +index 60b4924838..6f7e8641d3 100644 --- a/dpdk/lib/eal/linux/eal.c +++ b/dpdk/lib/eal/linux/eal.c -@@ -1362,13 +1362,17 @@ rte_eal_cleanup(void) +@@ -1061,12 +1061,6 @@ rte_eal_init(int argc, char **argv) + } + } + +- /* register multi-process action callbacks for hotplug */ +- if (eal_mp_dev_hotplug_init() < 0) { +- rte_eal_init_alert("failed to register mp callback for hotplug"); +- return -1; +- } +- + if (rte_bus_scan()) { + rte_eal_init_alert("Cannot scan the buses for devices"); + rte_errno = ENODEV; +@@ -1205,6 +1199,12 @@ rte_eal_init(int argc, char **argv) + return -1; + } + ++ /* register multi-process action callbacks for hotplug after memory init */ ++ if (eal_mp_dev_hotplug_init() < 0) { ++ rte_eal_init_alert("failed to register mp callback for hotplug"); ++ return -1; ++ } ++ + if (rte_eal_tailqs_init() < 0) { + rte_eal_init_alert("Cannot init tail queues for objects"); + rte_errno = EFAULT; +@@ -1354,6 +1354,16 @@ mark_freeable(const struct rte_memseg_list *msl, const struct rte_memseg *ms, + int + rte_eal_cleanup(void) + { ++ static uint32_t run_once; ++ uint32_t has_run = 0; ++ ++ if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0, ++ __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { ++ RTE_LOG(WARNING, EAL, "Already called cleanup\n"); ++ rte_errno = EALREADY; ++ return -1; ++ } ++ + /* if we're in a primary process, we need to mark hugepages as freeable + * so that finalization can release them back to the system. + */ +@@ -1362,13 +1372,17 @@ rte_eal_cleanup(void) if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_memseg_walk(mark_freeable, NULL); @@ -47625,13 +87898,14 @@ index 60b4924838..e3d34f7b7c 100644 + vfio_mp_sync_cleanup(); +#endif rte_mp_channel_cleanup(); -+ rte_trace_save(); -+ eal_trace_fini(); - /* after this point, any DPDK pointers will become dangling */ - rte_eal_memory_detach(); - rte_eal_alarm_cleanup(); -- rte_trace_save(); -- eal_trace_fini(); +- /* after this point, any DPDK pointers will become dangling */ +- rte_eal_memory_detach(); +- rte_eal_alarm_cleanup(); + rte_trace_save(); + eal_trace_fini(); ++ rte_eal_alarm_cleanup(); ++ /* after this point, any DPDK pointers will become dangling */ ++ rte_eal_memory_detach(); eal_cleanup_config(internal_conf); return 0; } @@ -47663,6 +87937,65 @@ index bde55a3d92..52fe336572 100644 monitor_refcount--; +diff --git a/dpdk/lib/eal/linux/eal_hugepage_info.c b/dpdk/lib/eal/linux/eal_hugepage_info.c +index 9fb0e968db..393e37bfb6 100644 +--- a/dpdk/lib/eal/linux/eal_hugepage_info.c ++++ b/dpdk/lib/eal/linux/eal_hugepage_info.c +@@ -57,7 +57,7 @@ map_shared_memory(const char *filename, const size_t mem_size, int flags) + retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, 0); + close(fd); +- return retval; ++ return retval == MAP_FAILED ? NULL : retval; + } + + static void * +@@ -217,6 +217,8 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + char buf[BUFSIZ]; + const struct internal_config *internal_conf = + eal_get_internal_configuration(); ++ const size_t hugepage_dir_len = (internal_conf->hugepage_dir != NULL) ? ++ strlen(internal_conf->hugepage_dir) : 0; + struct stat st; + + /* +@@ -236,6 +238,7 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + + while (fgets(buf, sizeof(buf), fd)){ + const char *pagesz_str; ++ size_t mountpt_len = 0; + + if (rte_strsplit(buf, sizeof(buf), splitstr, _FIELDNAME_MAX, + split_tok) != _FIELDNAME_MAX) { +@@ -268,12 +271,16 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + break; + } + ++ mountpt_len = strlen(splitstr[MOUNTPT]); ++ + /* +- * Ignore any mount that doesn't contain the --huge-dir +- * directory. ++ * Ignore any mount that doesn't contain the --huge-dir directory ++ * or where mount point is not a parent path of --huge-dir + */ + if (strncmp(internal_conf->hugepage_dir, splitstr[MOUNTPT], +- strlen(splitstr[MOUNTPT])) != 0) { ++ mountpt_len) != 0 || ++ (hugepage_dir_len > mountpt_len && ++ internal_conf->hugepage_dir[mountpt_len] != '/')) { + continue; + } + +@@ -281,7 +288,7 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len) + * We found a match, but only prefer it if it's a longer match + * (so /mnt/1 is preferred over /mnt for matching /mnt/1/2)). + */ +- if (strlen(splitstr[MOUNTPT]) > strlen(found)) ++ if (mountpt_len > strlen(found)) + strlcpy(found, splitstr[MOUNTPT], len); + } /* end while fgets */ + diff --git a/dpdk/lib/eal/linux/eal_interrupts.c b/dpdk/lib/eal/linux/eal_interrupts.c index 6e3925efd4..70060bf3ef 100644 --- a/dpdk/lib/eal/linux/eal_interrupts.c @@ -47712,10 +88045,130 @@ index 337f2bc739..16b58d861b 100644 } /* take out a read lock */ diff --git a/dpdk/lib/eal/linux/eal_memory.c b/dpdk/lib/eal/linux/eal_memory.c -index 03a4f2dd2d..fda6a159d5 100644 +index 03a4f2dd2d..84f4e1cce7 100644 --- a/dpdk/lib/eal/linux/eal_memory.c +++ b/dpdk/lib/eal/linux/eal_memory.c -@@ -1883,8 +1883,8 @@ memseg_secondary_init(void) +@@ -686,6 +686,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) + + /* find free space in memseg lists */ + for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) { ++ int free_len; + bool empty; + msl = &mcfg->memsegs[msl_idx]; + arr = &msl->memseg_arr; +@@ -697,24 +698,31 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) + + /* leave space for a hole if array is not empty */ + empty = arr->count == 0; +- ms_idx = rte_fbarray_find_next_n_free(arr, 0, +- seg_len + (empty ? 0 : 1)); +- +- /* memseg list is full? */ ++ /* find start of the biggest contiguous block and its size */ ++ ms_idx = rte_fbarray_find_biggest_free(arr, 0); + if (ms_idx < 0) + continue; +- ++ /* hole is 1 segment long, so at least two segments long. */ ++ free_len = rte_fbarray_find_contig_free(arr, ms_idx); ++ if (free_len < 2) ++ continue; + /* leave some space between memsegs, they are not IOVA + * contiguous, so they shouldn't be VA contiguous either. + */ +- if (!empty) ++ if (!empty) { + ms_idx++; ++ free_len--; ++ } ++ ++ /* we might not get all of the space we wanted */ ++ free_len = RTE_MIN(seg_len, free_len); ++ seg_end = seg_start + free_len; ++ seg_len = seg_end - seg_start; + break; + } + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { +- RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n", +- RTE_STR(RTE_MAX_MEMSEG_PER_TYPE), +- RTE_STR(RTE_MAX_MEM_MB_PER_TYPE)); ++ RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase RTE_MAX_MEMSEG_PER_LIST " ++ "RTE_MAX_MEMSEG_PER_TYPE and/or RTE_MAX_MEM_MB_PER_TYPE in configuration.\n"); + return -1; + } + +@@ -792,7 +800,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end) + } + RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n", + (seg_len * page_sz) >> 20, socket_id); +- return 0; ++ return seg_len; + } + + static uint64_t +@@ -962,8 +970,7 @@ prealloc_segments(struct hugepage_file *hugepages, int n_pages) + break; + } + if (msl_idx == RTE_MAX_MEMSEG_LISTS) { +- RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + return -1; + } + +@@ -1027,10 +1034,16 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages) + if (new_memseg) { + /* if this isn't the first time, remap segment */ + if (cur_page != 0) { +- ret = remap_segment(hugepages, seg_start_page, +- cur_page); +- if (ret != 0) +- return -1; ++ int n_remapped = 0; ++ int n_needed = cur_page - seg_start_page; ++ while (n_remapped < n_needed) { ++ ret = remap_segment(hugepages, seg_start_page, ++ cur_page); ++ if (ret < 0) ++ return -1; ++ n_remapped += ret; ++ seg_start_page += ret; ++ } + } + /* remember where we started */ + seg_start_page = cur_page; +@@ -1039,10 +1052,16 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages) + } + /* we were stopped, but we didn't remap the last segment, do it now */ + if (cur_page != 0) { +- ret = remap_segment(hugepages, seg_start_page, +- cur_page); +- if (ret != 0) +- return -1; ++ int n_remapped = 0; ++ int n_needed = cur_page - seg_start_page; ++ while (n_remapped < n_needed) { ++ ret = remap_segment(hugepages, seg_start_page, ++ cur_page); ++ if (ret < 0) ++ return -1; ++ n_remapped += ret; ++ seg_start_page += ret; ++ } + } + return 0; + } +@@ -1817,8 +1836,7 @@ memseg_primary_init_32(void) + + if (msl_idx >= RTE_MAX_MEMSEG_LISTS) { + RTE_LOG(ERR, EAL, +- "No more space in memseg lists, please increase %s\n", +- RTE_STR(RTE_MAX_MEMSEG_LISTS)); ++ "No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS\n"); + return -1; + } + +@@ -1883,8 +1901,8 @@ memseg_secondary_init(void) msl = &mcfg->memsegs[msl_idx]; @@ -47884,6 +88337,50 @@ index 3180adb0ff..cd4bdff8b8 100644 +#endif + } +diff --git a/dpdk/lib/eal/unix/eal_firmware.c b/dpdk/lib/eal/unix/eal_firmware.c +index d1616b0bd9..1a7cf8e7b7 100644 +--- a/dpdk/lib/eal/unix/eal_firmware.c ++++ b/dpdk/lib/eal/unix/eal_firmware.c +@@ -25,19 +25,31 @@ static int + firmware_open(struct firmware_read_ctx *ctx, const char *name, size_t blocksize) + { + struct archive_entry *e; ++ int err; + + ctx->a = archive_read_new(); + if (ctx->a == NULL) + return -1; +- if (archive_read_support_format_raw(ctx->a) != ARCHIVE_OK || +- archive_read_support_filter_xz(ctx->a) != ARCHIVE_OK || +- archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK || +- archive_read_next_header(ctx->a, &e) != ARCHIVE_OK) { +- archive_read_free(ctx->a); +- ctx->a = NULL; +- return -1; +- } ++ ++ if (archive_read_support_format_raw(ctx->a) != ARCHIVE_OK) ++ goto error; ++ ++ err = archive_read_support_filter_xz(ctx->a); ++ if (err != ARCHIVE_OK && err != ARCHIVE_WARN) ++ goto error; ++ ++ if (archive_read_open_filename(ctx->a, name, blocksize) != ARCHIVE_OK) ++ goto error; ++ ++ if (archive_read_next_header(ctx->a, &e) != ARCHIVE_OK) ++ goto error; ++ + return 0; ++ ++error: ++ archive_read_free(ctx->a); ++ ctx->a = NULL; ++ return -1; + } + + static ssize_t diff --git a/dpdk/lib/eal/windows/eal_memalloc.c b/dpdk/lib/eal/windows/eal_memalloc.c index 55d6dcc71c..aa7589b81d 100644 --- a/dpdk/lib/eal/windows/eal_memalloc.c @@ -47915,6 +88412,29 @@ index 55d6dcc71c..aa7589b81d 100644 } int +diff --git a/dpdk/lib/eal/windows/eal_memory.c b/dpdk/lib/eal/windows/eal_memory.c +index 2fd37d9708..643e0e7a16 100644 +--- a/dpdk/lib/eal/windows/eal_memory.c ++++ b/dpdk/lib/eal/windows/eal_memory.c +@@ -72,10 +72,18 @@ static VirtualAlloc2_type VirtualAlloc2_ptr; + + #ifdef RTE_TOOLCHAIN_GCC + ++#ifndef MEM_COALESCE_PLACEHOLDERS + #define MEM_COALESCE_PLACEHOLDERS 0x00000001 ++#endif ++#ifndef MEM_PRESERVE_PLACEHOLDER + #define MEM_PRESERVE_PLACEHOLDER 0x00000002 ++#endif ++#ifndef MEM_REPLACE_PLACEHOLDER + #define MEM_REPLACE_PLACEHOLDER 0x00004000 ++#endif ++#ifndef MEM_RESERVE_PLACEHOLDER + #define MEM_RESERVE_PLACEHOLDER 0x00040000 ++#endif + + int + eal_mem_win32api_init(void) diff --git a/dpdk/lib/eal/windows/eal_thread.c b/dpdk/lib/eal/windows/eal_thread.c index 54fa93fa62..ff84cb42af 100644 --- a/dpdk/lib/eal/windows/eal_thread.c @@ -47995,6 +88515,20 @@ index c272f65ccd..c6b226bd5d 100644 * * @param flag * containing information about the pattern +diff --git a/dpdk/lib/eal/windows/include/pthread.h b/dpdk/lib/eal/windows/include/pthread.h +index 27fd2cca52..f7cf0e9ddf 100644 +--- a/dpdk/lib/eal/windows/include/pthread.h ++++ b/dpdk/lib/eal/windows/include/pthread.h +@@ -134,7 +134,8 @@ pthread_create(void *threadid, const void *threadattr, void *threadfunc, + { + RTE_SET_USED(threadattr); + HANDLE hThread; +- hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)threadfunc, ++ hThread = CreateThread(NULL, 0, ++ (LPTHREAD_START_ROUTINE)(uintptr_t)threadfunc, + args, 0, (LPDWORD)threadid); + if (hThread) { + SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS); diff --git a/dpdk/lib/eal/windows/include/rte_windows.h b/dpdk/lib/eal/windows/include/rte_windows.h index 0063b5d78c..83730c3d2e 100644 --- a/dpdk/lib/eal/windows/include/rte_windows.h @@ -48298,7 +88832,7 @@ index d95605a355..2822fd8c72 100644 + #endif /* _RTE_ETHDEV_DRIVER_H_ */ diff --git a/dpdk/lib/ethdev/ethdev_pci.h b/dpdk/lib/ethdev/ethdev_pci.h -index 71aa4b2e98..0549842709 100644 +index 71aa4b2e98..7ab819c393 100644 --- a/dpdk/lib/ethdev/ethdev_pci.h +++ b/dpdk/lib/ethdev/ethdev_pci.h @@ -6,6 +6,10 @@ @@ -48324,7 +88858,22 @@ index 71aa4b2e98..0549842709 100644 if (!pci_dev) return -ENODEV; -@@ -171,4 +176,8 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev, +@@ -121,11 +126,13 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev, + struct rte_eth_dev *eth_dev; + int ret; + ++ if (*dev_init == NULL) ++ return -EINVAL; ++ + eth_dev = rte_eth_dev_pci_allocate(pci_dev, private_data_size); + if (!eth_dev) + return -ENOMEM; + +- RTE_FUNC_PTR_OR_ERR_RET(*dev_init, -EINVAL); + ret = dev_init(eth_dev); + if (ret) + rte_eth_dev_release_port(eth_dev); +@@ -171,4 +178,8 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev, return 0; } @@ -48369,6 +88918,19 @@ index 2b49e9665b..f5f536ce64 100644 +#endif + #endif /* _RTE_ETHDEV_VDEV_H_ */ +diff --git a/dpdk/lib/ethdev/rte_class_eth.c b/dpdk/lib/ethdev/rte_class_eth.c +index c8e8fc9244..ae6fac5207 100644 +--- a/dpdk/lib/ethdev/rte_class_eth.c ++++ b/dpdk/lib/ethdev/rte_class_eth.c +@@ -68,7 +68,7 @@ eth_representor_cmp(const char *key __rte_unused, + const struct rte_eth_dev *edev = opaque; + const struct rte_eth_dev_data *data = edev->data; + struct rte_eth_devargs eth_da; +- uint16_t id, nc, np, nf, i, c, p, f; ++ uint16_t id = 0, nc, np, nf, i, c, p, f; + + if ((data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) + return -1; /* not a representor port */ diff --git a/dpdk/lib/ethdev/rte_dev_info.h b/dpdk/lib/ethdev/rte_dev_info.h index cb2fe0ae97..67cf0ae526 100644 --- a/dpdk/lib/ethdev/rte_dev_info.h @@ -48394,7 +88956,7 @@ index cb2fe0ae97..67cf0ae526 100644 + #endif /* _RTE_DEV_INFO_H_ */ diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c -index a1d475a292..62e67f006d 100644 +index a1d475a292..9a8dd94e0a 100644 --- a/dpdk/lib/ethdev/rte_ethdev.c +++ b/dpdk/lib/ethdev/rte_ethdev.c @@ -894,6 +894,17 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) @@ -48415,7 +88977,18 @@ index a1d475a292..62e67f006d 100644 static int eth_err(uint16_t port_id, int ret) { -@@ -1879,8 +1890,9 @@ rte_eth_dev_stop(uint16_t port_id) +@@ -1517,7 +1528,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + } + + if (dev_conf->rxmode.mtu == 0) +- dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU; ++ dev->data->dev_conf.rxmode.mtu = ++ (dev_info.max_mtu == 0) ? RTE_ETHER_MTU : ++ RTE_MIN(dev_info.max_mtu, RTE_ETHER_MTU); + + ret = eth_dev_validate_mtu(port_id, &dev_info, + dev->data->dev_conf.rxmode.mtu); +@@ -1879,8 +1892,9 @@ rte_eth_dev_stop(uint16_t port_id) /* point fast-path functions to dummy ones */ eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id); @@ -48426,7 +88999,7 @@ index a1d475a292..62e67f006d 100644 rte_ethdev_trace_stop(port_id, ret); return ret; -@@ -1920,7 +1932,13 @@ rte_eth_dev_close(uint16_t port_id) +@@ -1920,7 +1934,13 @@ rte_eth_dev_close(uint16_t port_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -48441,7 +89014,7 @@ index a1d475a292..62e67f006d 100644 RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n", port_id); return -EINVAL; -@@ -4070,6 +4088,7 @@ rte_eth_dev_rss_reta_update(uint16_t port_id, +@@ -4070,6 +4090,7 @@ rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { @@ -48449,7 +89022,7 @@ index a1d475a292..62e67f006d 100644 struct rte_eth_dev *dev; int ret; -@@ -4101,6 +4120,12 @@ rte_eth_dev_rss_reta_update(uint16_t port_id, +@@ -4101,6 +4122,12 @@ rte_eth_dev_rss_reta_update(uint16_t port_id, if (ret < 0) return ret; @@ -48462,7 +89035,7 @@ index a1d475a292..62e67f006d 100644 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size)); -@@ -4140,6 +4165,7 @@ rte_eth_dev_rss_hash_update(uint16_t port_id, +@@ -4140,6 +4167,7 @@ rte_eth_dev_rss_hash_update(uint16_t port_id, { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; @@ -48470,7 +89043,7 @@ index a1d475a292..62e67f006d 100644 int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); -@@ -4165,6 +4191,13 @@ rte_eth_dev_rss_hash_update(uint16_t port_id, +@@ -4165,6 +4193,13 @@ rte_eth_dev_rss_hash_update(uint16_t port_id, dev_info.flow_type_rss_offloads); return -EINVAL; } @@ -48484,7 +89057,43 @@ index a1d475a292..62e67f006d 100644 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev, rss_conf)); -@@ -6156,6 +6189,8 @@ eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, +@@ -4318,6 +4353,11 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + ++ if (fec_capa == 0) { ++ RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n"); ++ return -EINVAL; ++ } ++ + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP); + return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); + } +@@ -4452,6 +4492,7 @@ int + rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) + { + struct rte_eth_dev *dev; ++ int index; + int ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); +@@ -4469,6 +4510,15 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP); + ++ /* Keep address unique in dev->data->mac_addrs[]. */ ++ index = eth_dev_get_mac_addr_index(port_id, addr); ++ if (index > 0) { ++ RTE_ETHDEV_LOG(ERR, ++ "New default address for port %u was already in the address list. Please remove it first.\n", ++ port_id); ++ return -EEXIST; ++ } ++ + ret = (*dev->dev_ops->mac_addr_set)(dev, addr); + if (ret < 0) + return ret; +@@ -6156,6 +6206,8 @@ eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats, { int q; struct rte_tel_data *q_data = rte_tel_data_alloc(); @@ -48493,7 +89102,7 @@ index a1d475a292..62e67f006d 100644 rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL); for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++) rte_tel_data_add_array_u64(q_data, q_stats[q]); -@@ -6249,6 +6284,7 @@ eth_dev_handle_port_xstats(const char *cmd __rte_unused, +@@ -6249,6 +6301,7 @@ eth_dev_handle_port_xstats(const char *cmd __rte_unused, for (i = 0; i < num_xstats; i++) rte_tel_data_add_dict_u64(d, xstat_names[i].name, eth_xstats[i].value); @@ -48501,7 +89110,7 @@ index a1d475a292..62e67f006d 100644 return 0; } -@@ -6295,7 +6331,7 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6295,7 +6348,7 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, struct rte_tel_data *d) { struct rte_tel_data *rxq_state, *txq_state; @@ -48510,7 +89119,7 @@ index a1d475a292..62e67f006d 100644 struct rte_eth_dev *eth_dev; char *end_param; int port_id, i; -@@ -6312,16 +6348,16 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6312,16 +6365,16 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, return -EINVAL; eth_dev = &rte_eth_devices[port_id]; @@ -48530,10 +89139,15 @@ index a1d475a292..62e67f006d 100644 rte_tel_data_start_dict(d); rte_tel_data_add_dict_string(d, "name", eth_dev->data->name); -@@ -6336,13 +6372,8 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6332,17 +6385,10 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, + eth_dev->data->nb_tx_queues); + rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); + rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); +- rte_tel_data_add_dict_int(d, "rx_mbuf_size_min", ++ rte_tel_data_add_dict_u64(d, "rx_mbuf_size_min", eth_dev->data->min_rx_buf_size); - rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", - eth_dev->data->rx_mbuf_alloc_failed); +- rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail", +- eth_dev->data->rx_mbuf_alloc_failed); - snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x", - eth_dev->data->mac_addrs->addr_bytes[0], - eth_dev->data->mac_addrs->addr_bytes[1], @@ -48546,8 +89160,25 @@ index a1d475a292..62e67f006d 100644 rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); rte_tel_data_add_dict_int(d, "promiscuous", eth_dev->data->promiscuous); +@@ -6368,12 +6414,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, + rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); + rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); + rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); +- rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags); +- rte_tel_data_add_dict_int(d, "rx_offloads", ++ rte_tel_data_add_dict_u64(d, "dev_flags", eth_dev->data->dev_flags); ++ rte_tel_data_add_dict_u64(d, "rx_offloads", + eth_dev->data->dev_conf.rxmode.offloads); +- rte_tel_data_add_dict_int(d, "tx_offloads", ++ rte_tel_data_add_dict_u64(d, "tx_offloads", + eth_dev->data->dev_conf.txmode.offloads); +- rte_tel_data_add_dict_int(d, "ethdev_rss_hf", ++ rte_tel_data_add_dict_u64(d, "ethdev_rss_hf", + eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); + + return 0; diff --git a/dpdk/lib/ethdev/rte_ethdev.h b/dpdk/lib/ethdev/rte_ethdev.h -index fa299c8ad7..0be04c5809 100644 +index fa299c8ad7..083f324a46 100644 --- a/dpdk/lib/ethdev/rte_ethdev.h +++ b/dpdk/lib/ethdev/rte_ethdev.h @@ -74,7 +74,7 @@ @@ -48578,6 +89209,102 @@ index fa299c8ad7..0be04c5809 100644 uint16_t max_vfs; /**< Maximum number of VFs. */ uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */ struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/ +@@ -3649,7 +3649,7 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size); + * for example, to count dropped packets, or to retry transmission of packets + * which cannot be sent, this function should be used to register a suitable + * callback function to implement the desired behaviour. +- * The example callback "rte_eth_count_unsent_packet_callback()" is also ++ * The example callback "rte_eth_tx_buffer_count_callback()" is also + * provided as reference. + * + * @param buffer +@@ -4034,10 +4034,7 @@ int rte_eth_fec_get_capability(uint16_t port_id, + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa +- * A bitmask of enabled FEC modes. If AUTO bit is set, other +- * bits specify FEC modes which may be negotiated. If AUTO +- * bit is clear, specify FEC modes to be used (only one valid +- * mode per speed may be set). ++ * A bitmask with the current FEC mode. + * @return + * - (0) if successful. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support. +@@ -4057,10 +4054,13 @@ int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa); + * @param port_id + * The port identifier of the Ethernet device. + * @param fec_capa +- * A bitmask of allowed FEC modes. If AUTO bit is set, other +- * bits specify FEC modes which may be negotiated. If AUTO +- * bit is clear, specify FEC modes to be used (only one valid +- * mode per speed may be set). ++ * A bitmask of allowed FEC modes. ++ * If only the AUTO bit is set, the decision on which FEC ++ * mode to use will be made by HW/FW or driver. ++ * If the AUTO bit is set with some FEC modes, only specified ++ * FEC modes can be set. ++ * If AUTO bit is clear, specify FEC mode to be used ++ * (only one valid mode per speed may be set). + * @return + * - (0) if successful. + * - (-EINVAL) if the FEC mode is not valid. +@@ -4163,6 +4163,9 @@ int rte_eth_dev_mac_addr_remove(uint16_t port_id, + + /** + * Set the default MAC address. ++ * It replaces the address at index 0 of the MAC address list. ++ * If the address was already in the MAC address list, ++ * please remove it first. + * + * @param port_id + * The port identifier of the Ethernet device. +@@ -4173,6 +4176,7 @@ int rte_eth_dev_mac_addr_remove(uint16_t port_id, + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if MAC address is invalid. ++ * - (-EEXIST) if MAC address was already in the address list. + */ + int rte_eth_dev_default_mac_addr_set(uint16_t port_id, + struct rte_ether_addr *mac_addr); +diff --git a/dpdk/lib/ethdev/rte_flow.c b/dpdk/lib/ethdev/rte_flow.c +index a93f68abbc..3a6b6db725 100644 +--- a/dpdk/lib/ethdev/rte_flow.c ++++ b/dpdk/lib/ethdev/rte_flow.c +@@ -656,7 +656,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, + if (src.rss->key_len && src.rss->key) { + off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); + tmp = sizeof(*src.rss->key) * src.rss->key_len; +- if (size >= off + tmp) ++ if (size >= (uint64_t)off + (uint64_t)tmp) + dst.rss->key = rte_memcpy + ((void *)((uintptr_t)dst.rss + off), + src.rss->key, tmp); +@@ -665,7 +665,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, + if (src.rss->queue_num) { + off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); + tmp = sizeof(*src.rss->queue) * src.rss->queue_num; +- if (size >= off + tmp) ++ if (size >= (uint64_t)off + (uint64_t)tmp) + dst.rss->queue = rte_memcpy + ((void *)((uintptr_t)dst.rss + off), + src.rss->queue, tmp); +@@ -857,7 +857,15 @@ rte_flow_conv_actions(struct rte_flow_action *dst, + src -= num; + dst -= num; + do { +- if (src->conf) { ++ if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) { ++ /* ++ * Indirect action conf fills the indirect action ++ * handler. Copy the action handle directly instead ++ * of duplicating the pointer memory. ++ */ ++ if (size) ++ dst->conf = src->conf; ++ } else if (src->conf) { + off = RTE_ALIGN_CEIL(off, sizeof(double)); + ret = rte_flow_conv_action_conf + ((void *)(data + off), diff --git a/dpdk/lib/ethdev/version.map b/dpdk/lib/ethdev/version.map index c2fb0669a4..1f7359c846 100644 --- a/dpdk/lib/ethdev/version.map @@ -48630,7 +89357,7 @@ index 499852db16..24b56faaa9 100644 + #endif /* _RTE_EVENTDEV_PMD_PCI_H_ */ diff --git a/dpdk/lib/eventdev/eventdev_pmd_vdev.h b/dpdk/lib/eventdev/eventdev_pmd_vdev.h -index d9ee7277dd..77904910a2 100644 +index d9ee7277dd..aa809dff4f 100644 --- a/dpdk/lib/eventdev/eventdev_pmd_vdev.h +++ b/dpdk/lib/eventdev/eventdev_pmd_vdev.h @@ -5,6 +5,10 @@ @@ -48644,7 +89371,24 @@ index d9ee7277dd..77904910a2 100644 /** @file * RTE Eventdev VDEV PMD APIs * -@@ -99,4 +103,8 @@ rte_event_pmd_vdev_uninit(const char *name) +@@ -40,7 +44,7 @@ + __rte_internal + static inline struct rte_eventdev * + rte_event_pmd_vdev_init(const char *name, size_t dev_private_size, +- int socket_id) ++ int socket_id, struct rte_vdev_device *vdev) + { + + struct rte_eventdev *eventdev; +@@ -62,6 +66,7 @@ rte_event_pmd_vdev_init(const char *name, size_t dev_private_size, + rte_panic("Cannot allocate memzone for private device" + " data"); + } ++ eventdev->dev = &vdev->device; + + return eventdev; + } +@@ -99,4 +104,8 @@ rte_event_pmd_vdev_uninit(const char *name) return 0; } @@ -48653,8 +89397,85 @@ index d9ee7277dd..77904910a2 100644 +#endif + #endif /* _RTE_EVENTDEV_PMD_VDEV_H_ */ +diff --git a/dpdk/lib/eventdev/rte_event_crypto_adapter.c b/dpdk/lib/eventdev/rte_event_crypto_adapter.c +index d84080355d..142bfe9b9b 100644 +--- a/dpdk/lib/eventdev/rte_event_crypto_adapter.c ++++ b/dpdk/lib/eventdev/rte_event_crypto_adapter.c +@@ -30,6 +30,8 @@ + */ + #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024 + ++#define ECA_ADAPTER_ARRAY "crypto_adapter_array" ++ + struct event_crypto_adapter { + /* Event device identifier */ + uint8_t eventdev_id; +@@ -118,7 +120,6 @@ eca_valid_id(uint8_t id) + static int + eca_init(void) + { +- const char *name = "crypto_adapter_array"; + const struct rte_memzone *mz; + unsigned int sz; + +@@ -126,9 +127,10 @@ eca_init(void) + RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE; + sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); + +- mz = rte_memzone_lookup(name); ++ mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY); + if (mz == NULL) { +- mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0, ++ mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz, ++ rte_socket_id(), 0, + RTE_CACHE_LINE_SIZE); + if (mz == NULL) { + RTE_EDEV_LOG_ERR("failed to reserve memzone err = %" +@@ -141,6 +143,22 @@ eca_init(void) + return 0; + } + ++static int ++eca_memzone_lookup(void) ++{ ++ const struct rte_memzone *mz; ++ ++ if (event_crypto_adapter == NULL) { ++ mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY); ++ if (mz == NULL) ++ return -ENOMEM; ++ ++ event_crypto_adapter = mz->addr; ++ } ++ ++ return 0; ++} ++ + static inline struct event_crypto_adapter * + eca_id_to_adapter(uint8_t id) + { +@@ -1051,6 +1069,9 @@ rte_event_crypto_adapter_stats_get(uint8_t id, + uint32_t i; + int ret; + ++ if (eca_memzone_lookup()) ++ return -ENOMEM; ++ + EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = eca_id_to_adapter(id); +@@ -1092,6 +1113,9 @@ rte_event_crypto_adapter_stats_reset(uint8_t id) + struct rte_eventdev *dev; + uint32_t i; + ++ if (eca_memzone_lookup()) ++ return -ENOMEM; ++ + EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + adapter = eca_id_to_adapter(id); diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c -index 809416d9b7..2356e2a535 100644 +index 809416d9b7..54115cc899 100644 --- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c @@ -293,6 +293,30 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, @@ -48688,6 +89509,34 @@ index 809416d9b7..2356e2a535 100644 static inline int rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter) { +@@ -1833,6 +1857,13 @@ rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id) + if (rx_adapter->service_inited) + return 0; + ++ if (rte_mbuf_dyn_rx_timestamp_register( ++ &event_eth_rx_timestamp_dynfield_offset, ++ &event_eth_rx_timestamp_dynflag) != 0) { ++ RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); ++ return -rte_errno; ++ } ++ + memset(&service, 0, sizeof(service)); + snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN, + "rte_event_eth_rx_adapter_%d", id); +@@ -2397,13 +2428,6 @@ rxa_create(uint8_t id, uint8_t dev_id, + if (conf_cb == rxa_default_conf_cb) + rx_adapter->default_cb_arg = 1; + +- if (rte_mbuf_dyn_rx_timestamp_register( +- &event_eth_rx_timestamp_dynfield_offset, +- &event_eth_rx_timestamp_dynflag) != 0) { +- RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); +- return -rte_errno; +- } +- + rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, + conf_arg); + return 0; @@ -2984,15 +3008,17 @@ rte_event_eth_rx_adapter_queue_stats_get(uint8_t id, return -EINVAL; } @@ -48975,10 +89824,32 @@ index 809416d9b7..2356e2a535 100644 } RTE_INIT(rxa_init_telemetry) +diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.h b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.h +index 9546d792e9..8f8c887b9e 100644 +--- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.h ++++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.h +@@ -456,7 +456,7 @@ int rte_event_eth_rx_adapter_free(uint8_t id); + * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ + * + * @param conf +- * Additional configuration structure of type *rte_event_eth_rx_adapter_conf* ++ * Additional configuration structure of type *rte_event_eth_rx_adapter_queue_conf* + * + * @return + * - 0: Success, Receive queue added correctly. diff --git a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c -index c17f33f098..b4b37f1cae 100644 +index c17f33f098..971b4024db 100644 --- a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c +++ b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c +@@ -44,7 +44,7 @@ + #define RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \ + do { \ + if (!txa_valid_id(id)) { \ +- RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \ ++ RTE_EDEV_LOG_ERR("Invalid eth Tx adapter id = %d", id); \ + return retval; \ + } \ + } while (0) @@ -224,7 +224,7 @@ txa_service_data_init(void) if (txa_service_data_array == NULL) { txa_service_data_array = @@ -48988,7 +89859,35 @@ index c17f33f098..b4b37f1cae 100644 RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE); if (txa_service_data_array == NULL) return -ENOMEM; -@@ -806,10 +806,8 @@ txa_service_queue_add(uint8_t id, +@@ -468,14 +468,13 @@ txa_service_ctrl(uint8_t id, int start) + struct txa_service_data *txa; + + txa = txa_service_id_to_data(id); +- if (txa->service_id == TXA_INVALID_SERVICE_ID) ++ if (txa == NULL || txa->service_id == TXA_INVALID_SERVICE_ID) + return 0; + ++ rte_spinlock_lock(&txa->tx_lock); + ret = rte_service_runstate_set(txa->service_id, start); +- if (ret == 0 && !start) { +- while (rte_service_may_be_active(txa->service_id)) +- rte_pause(); +- } ++ rte_spinlock_unlock(&txa->tx_lock); ++ + return ret; + } + +@@ -629,7 +628,7 @@ txa_service_func(void *args) + RTE_ETH_FOREACH_DEV(i) { + uint16_t q; + +- if (i == txa->dev_count) ++ if (i >= txa->dev_count) + break; + + dev = tdi[i].dev; +@@ -806,10 +805,8 @@ txa_service_queue_add(uint8_t id, rte_spinlock_lock(&txa->tx_lock); @@ -49001,7 +89900,7 @@ index c17f33f098..b4b37f1cae 100644 ret = txa_service_queue_array_alloc(txa, eth_dev->data->port_id); if (ret) -@@ -821,6 +819,8 @@ txa_service_queue_add(uint8_t id, +@@ -821,6 +818,8 @@ txa_service_queue_add(uint8_t id, tdi = &txa->txa_ethdev[eth_dev->data->port_id]; tqi = txa_service_queue(txa, eth_dev->data->port_id, tx_queue_id); @@ -49010,7 +89909,7 @@ index c17f33f098..b4b37f1cae 100644 txa_retry = &tqi->txa_retry; txa_retry->id = txa->id; -@@ -836,6 +836,10 @@ txa_service_queue_add(uint8_t id, +@@ -836,6 +835,10 @@ txa_service_queue_add(uint8_t id, tdi->nb_queues++; txa->nb_queues++; @@ -49021,7 +89920,7 @@ index c17f33f098..b4b37f1cae 100644 err_unlock: if (txa->nb_queues == 0) { txa_service_queue_array_free(txa, -@@ -844,7 +848,7 @@ txa_service_queue_add(uint8_t id, +@@ -844,7 +847,7 @@ txa_service_queue_add(uint8_t id, } rte_spinlock_unlock(&txa->tx_lock); @@ -49030,7 +89929,28 @@ index c17f33f098..b4b37f1cae 100644 } static int -@@ -887,9 +891,10 @@ txa_service_queue_del(uint8_t id, +@@ -864,6 +867,8 @@ txa_service_queue_del(uint8_t id, + uint16_t i, q, nb_queues; + int ret = 0; + ++ if (txa->txa_ethdev == NULL) ++ return 0; + nb_queues = txa->txa_ethdev[port_id].nb_queues; + if (nb_queues == 0) + return 0; +@@ -876,10 +881,10 @@ txa_service_queue_del(uint8_t id, + + if (tqi[q].added) { + ret = txa_service_queue_del(id, dev, q); ++ i++; + if (ret != 0) + break; + } +- i++; + q++; + } + return ret; +@@ -887,9 +892,10 @@ txa_service_queue_del(uint8_t id, txa = txa_service_id_to_data(id); @@ -49042,7 +89962,7 @@ index c17f33f098..b4b37f1cae 100644 tb = tqi->tx_buf; tqi->added = 0; -@@ -899,6 +904,9 @@ txa_service_queue_del(uint8_t id, +@@ -899,6 +905,9 @@ txa_service_queue_del(uint8_t id, txa->txa_ethdev[port_id].nb_queues--; txa_service_queue_array_free(txa, port_id); @@ -49077,6 +89997,171 @@ index c0861b0ec2..0b9aefb000 100644 +#endif + #endif +diff --git a/dpdk/lib/eventdev/rte_event_timer_adapter.c b/dpdk/lib/eventdev/rte_event_timer_adapter.c +index 9dad170b5a..5e7d101470 100644 +--- a/dpdk/lib/eventdev/rte_event_timer_adapter.c ++++ b/dpdk/lib/eventdev/rte_event_timer_adapter.c +@@ -19,6 +19,7 @@ + #include <rte_timer.h> + #include <rte_service_component.h> + #include <rte_cycles.h> ++#include <rte_reciprocal.h> + + #include "event_timer_adapter_pmd.h" + #include "eventdev_pmd.h" +@@ -677,13 +678,51 @@ swtim_callback(struct rte_timer *tim) + } + } + +-static __rte_always_inline uint64_t ++static __rte_always_inline int + get_timeout_cycles(struct rte_event_timer *evtim, +- const struct rte_event_timer_adapter *adapter) ++ const struct rte_event_timer_adapter *adapter, ++ uint64_t *timeout_cycles) + { +- struct swtim *sw = swtim_pmd_priv(adapter); +- uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns; +- return timeout_ns * rte_get_timer_hz() / NSECPERSEC; ++ static struct rte_reciprocal_u64 nsecpersec_inverse; ++ static uint64_t timer_hz; ++ uint64_t rem_cycles, secs_cycles = 0; ++ uint64_t secs, timeout_nsecs; ++ uint64_t nsecpersec; ++ struct swtim *sw; ++ ++ sw = swtim_pmd_priv(adapter); ++ nsecpersec = (uint64_t)NSECPERSEC; ++ ++ timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns; ++ if (timeout_nsecs > sw->max_tmo_ns) ++ return -1; ++ if (timeout_nsecs < sw->timer_tick_ns) ++ return -2; ++ ++ /* Set these values in the first invocation */ ++ if (!timer_hz) { ++ timer_hz = rte_get_timer_hz(); ++ nsecpersec_inverse = rte_reciprocal_value_u64(nsecpersec); ++ } ++ ++ /* If timeout_nsecs > nsecpersec, decrease timeout_nsecs by the number ++ * of whole seconds it contains and convert that value to a number ++ * of cycles. This keeps timeout_nsecs in the interval [0..nsecpersec) ++ * in order to avoid overflow when we later multiply by timer_hz. ++ */ ++ if (timeout_nsecs > nsecpersec) { ++ secs = rte_reciprocal_divide_u64(timeout_nsecs, ++ &nsecpersec_inverse); ++ secs_cycles = secs * timer_hz; ++ timeout_nsecs -= secs * nsecpersec; ++ } ++ ++ rem_cycles = rte_reciprocal_divide_u64(timeout_nsecs * timer_hz, ++ &nsecpersec_inverse); ++ ++ *timeout_cycles = secs_cycles + rem_cycles; ++ ++ return 0; + } + + /* This function returns true if one or more (adapter) ticks have occurred since +@@ -717,23 +756,6 @@ swtim_did_tick(struct swtim *sw) + return false; + } + +-/* Check that event timer timeout value is in range */ +-static __rte_always_inline int +-check_timeout(struct rte_event_timer *evtim, +- const struct rte_event_timer_adapter *adapter) +-{ +- uint64_t tmo_nsec; +- struct swtim *sw = swtim_pmd_priv(adapter); +- +- tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns; +- if (tmo_nsec > sw->max_tmo_ns) +- return -1; +- if (tmo_nsec < sw->timer_tick_ns) +- return -2; +- +- return 0; +-} +- + /* Check that event timer event queue sched type matches destination event queue + * sched type + */ +@@ -775,17 +797,18 @@ swtim_service_func(void *arg) + sw->n_expired_timers); + sw->n_expired_timers = 0; + +- event_buffer_flush(&sw->buffer, +- adapter->data->event_dev_id, +- adapter->data->event_port_id, +- &nb_evs_flushed, +- &nb_evs_invalid); +- +- sw->stats.ev_enq_count += nb_evs_flushed; +- sw->stats.ev_inv_count += nb_evs_invalid; + sw->stats.adapter_tick_count++; + } + ++ event_buffer_flush(&sw->buffer, ++ adapter->data->event_dev_id, ++ adapter->data->event_port_id, ++ &nb_evs_flushed, ++ &nb_evs_invalid); ++ ++ sw->stats.ev_enq_count += nb_evs_flushed; ++ sw->stats.ev_inv_count += nb_evs_invalid; ++ + rte_event_maintain(adapter->data->event_dev_id, + adapter->data->event_port_id, 0); + +@@ -1107,21 +1130,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + break; + } + +- ret = check_timeout(evtims[i], adapter); +- if (unlikely(ret == -1)) { +- __atomic_store_n(&evtims[i]->state, +- RTE_EVENT_TIMER_ERROR_TOOLATE, +- __ATOMIC_RELAXED); +- rte_errno = EINVAL; +- break; +- } else if (unlikely(ret == -2)) { +- __atomic_store_n(&evtims[i]->state, +- RTE_EVENT_TIMER_ERROR_TOOEARLY, +- __ATOMIC_RELAXED); +- rte_errno = EINVAL; +- break; +- } +- + if (unlikely(check_destination_event_queue(evtims[i], + adapter) < 0)) { + __atomic_store_n(&evtims[i]->state, +@@ -1137,7 +1145,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter, + evtims[i]->impl_opaque[0] = (uintptr_t)tim; + evtims[i]->impl_opaque[1] = (uintptr_t)adapter; + +- cycles = get_timeout_cycles(evtims[i], adapter); ++ ret = get_timeout_cycles(evtims[i], adapter, &cycles); ++ if (unlikely(ret == -1)) { ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR_TOOLATE, ++ __ATOMIC_RELAXED); ++ rte_errno = EINVAL; ++ break; ++ } else if (unlikely(ret == -2)) { ++ __atomic_store_n(&evtims[i]->state, ++ RTE_EVENT_TIMER_ERROR_TOOEARLY, ++ __ATOMIC_RELAXED); ++ rte_errno = EINVAL; ++ break; ++ } ++ + ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles, + SINGLE, lcore_id, NULL, evtims[i]); + if (ret < 0) { diff --git a/dpdk/lib/eventdev/rte_event_timer_adapter.h b/dpdk/lib/eventdev/rte_event_timer_adapter.h index 1551741820..e68d02da72 100644 --- a/dpdk/lib/eventdev/rte_event_timer_adapter.h @@ -49090,6 +90175,19 @@ index 1551741820..e68d02da72 100644 +#endif + #endif /* __RTE_EVENT_TIMER_ADAPTER_H__ */ +diff --git a/dpdk/lib/eventdev/rte_eventdev.c b/dpdk/lib/eventdev/rte_eventdev.c +index 79b9ea3a02..cb52f17b50 100644 +--- a/dpdk/lib/eventdev/rte_eventdev.c ++++ b/dpdk/lib/eventdev/rte_eventdev.c +@@ -109,6 +109,8 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) + dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; + + dev_info->dev = dev->dev; ++ if (dev->dev != NULL && dev->dev->driver != NULL) ++ dev_info->driver_name = dev->dev->driver->name; + return 0; + } + diff --git a/dpdk/lib/eventdev/rte_eventdev.h b/dpdk/lib/eventdev/rte_eventdev.h index eef47d8acc..476bcbcc21 100644 --- a/dpdk/lib/eventdev/rte_eventdev.h @@ -49136,6 +90234,39 @@ index eef47d8acc..476bcbcc21 100644 /* * Allow zero cost non burst mode routine invocation if application * requests nb_events as const one +diff --git a/dpdk/lib/eventdev/version.map b/dpdk/lib/eventdev/version.map +index ade1f1182e..5c90739873 100644 +--- a/dpdk/lib/eventdev/version.map ++++ b/dpdk/lib/eventdev/version.map +@@ -101,6 +101,7 @@ EXPERIMENTAL { + global: + + # added in 21.11 ++ __rte_eventdev_trace_maintain; + rte_event_eth_rx_adapter_create_with_params; + rte_event_eth_rx_adapter_queue_conf_get; + rte_event_eth_rx_adapter_queue_stats_get; +diff --git a/dpdk/lib/fib/dir24_8.c b/dpdk/lib/fib/dir24_8.c +index bb3bc9753b..49407ba638 100644 +--- a/dpdk/lib/fib/dir24_8.c ++++ b/dpdk/lib/fib/dir24_8.c +@@ -392,9 +392,15 @@ modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip, + return ret; + ledge = redge + + (uint32_t)(1ULL << (32 - tmp_depth)); ++ /* ++ * we got to the end of address space ++ * and wrapped around ++ */ ++ if (ledge == 0) ++ break; + } else { + redge = ip + (uint32_t)(1ULL << (32 - depth)); +- if (ledge == redge) ++ if (ledge == redge && ledge != 0) + break; + ret = install_to_fib(dp, ledge, redge, + next_hop); diff --git a/dpdk/lib/fib/rte_fib.c b/dpdk/lib/fib/rte_fib.c index 6ca180d7e7..0cced97a77 100644 --- a/dpdk/lib/fib/rte_fib.c @@ -49155,10 +90286,19 @@ index 6ca180d7e7..0cced97a77 100644 }; diff --git a/dpdk/lib/fib/rte_fib.h b/dpdk/lib/fib/rte_fib.h -index b3c59dfaaa..e592d3251a 100644 +index b3c59dfaaa..737c921345 100644 --- a/dpdk/lib/fib/rte_fib.h +++ b/dpdk/lib/fib/rte_fib.h -@@ -189,7 +189,7 @@ rte_fib_lookup_bulk(struct rte_fib *fib, uint32_t *ips, +@@ -124,8 +124,6 @@ rte_fib_find_existing(const char *name); + * + * @param fib + * FIB object handle +- * @return +- * None + */ + void + rte_fib_free(struct rte_fib *fib); +@@ -189,7 +187,7 @@ rte_fib_lookup_bulk(struct rte_fib *fib, uint32_t *ips, * FIB object handle * @return * Pointer on the dataplane struct on success @@ -49167,7 +90307,7 @@ index b3c59dfaaa..e592d3251a 100644 */ void * rte_fib_get_dp(struct rte_fib *fib); -@@ -201,7 +201,7 @@ rte_fib_get_dp(struct rte_fib *fib); +@@ -201,7 +199,7 @@ rte_fib_get_dp(struct rte_fib *fib); * FIB object handle * @return * Pointer on the RIB on success @@ -49195,10 +90335,19 @@ index be79efe004..eebee297d6 100644 }; diff --git a/dpdk/lib/fib/rte_fib6.h b/dpdk/lib/fib/rte_fib6.h -index 95879af96d..cb133719e1 100644 +index 95879af96d..53bd0e3674 100644 --- a/dpdk/lib/fib/rte_fib6.h +++ b/dpdk/lib/fib/rte_fib6.h -@@ -184,7 +184,7 @@ rte_fib6_lookup_bulk(struct rte_fib6 *fib, +@@ -115,8 +115,6 @@ rte_fib6_find_existing(const char *name); + * + * @param fib + * FIB object handle +- * @return +- * None + */ + void + rte_fib6_free(struct rte_fib6 *fib); +@@ -184,7 +182,7 @@ rte_fib6_lookup_bulk(struct rte_fib6 *fib, * FIB6 object handle * @return * Pointer on the dataplane struct on success @@ -49207,7 +90356,7 @@ index 95879af96d..cb133719e1 100644 */ void * rte_fib6_get_dp(struct rte_fib6 *fib); -@@ -196,7 +196,7 @@ rte_fib6_get_dp(struct rte_fib6 *fib); +@@ -196,7 +194,7 @@ rte_fib6_get_dp(struct rte_fib6 *fib); * FIB object handle * @return * Pointer on the RIB6 on success @@ -49216,6 +90365,69 @@ index 95879af96d..cb133719e1 100644 */ struct rte_rib6 * rte_fib6_get_rib(struct rte_fib6 *fib); +diff --git a/dpdk/lib/fib/trie.c b/dpdk/lib/fib/trie.c +index 044095bf03..b096743086 100644 +--- a/dpdk/lib/fib/trie.c ++++ b/dpdk/lib/fib/trie.c +@@ -456,6 +456,14 @@ get_nxt_net(uint8_t *ip, uint8_t depth) + } + } + ++static int ++v6_addr_is_zero(const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE]) ++{ ++ uint8_t ip_addr[RTE_FIB6_IPV6_ADDR_SIZE] = {0}; ++ ++ return rte_rib6_is_equal(ip, ip_addr); ++} ++ + static int + modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, + const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE], +@@ -489,11 +497,19 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, + return ret; + get_nxt_net(redge, tmp_depth); + rte_rib6_copy_addr(ledge, redge); ++ /* ++ * we got to the end of address space ++ * and wrapped around ++ */ ++ if (v6_addr_is_zero(ledge)) ++ break; + } else { + rte_rib6_copy_addr(redge, ip); + get_nxt_net(redge, depth); +- if (rte_rib6_is_equal(ledge, redge)) ++ if (rte_rib6_is_equal(ledge, redge) && ++ !v6_addr_is_zero(ledge)) + break; ++ + ret = install_to_dp(dp, ledge, redge, + next_hop); + if (ret != 0) +diff --git a/dpdk/lib/gpudev/gpudev.c b/dpdk/lib/gpudev/gpudev.c +index 9ae36dbae9..6a549ee08e 100644 +--- a/dpdk/lib/gpudev/gpudev.c ++++ b/dpdk/lib/gpudev/gpudev.c +@@ -407,6 +407,7 @@ rte_gpu_callback_register(int16_t dev_id, enum rte_gpu_event event, + callback->function == function && + callback->user_data == user_data) { + GPU_LOG(INFO, "callback already registered"); ++ rte_rwlock_write_unlock(&gpu_callback_lock); + return 0; + } + } +@@ -414,7 +415,9 @@ rte_gpu_callback_register(int16_t dev_id, enum rte_gpu_event event, + callback = malloc(sizeof(*callback)); + if (callback == NULL) { + GPU_LOG(ERR, "cannot allocate callback"); +- return -ENOMEM; ++ rte_rwlock_write_unlock(&gpu_callback_lock); ++ rte_errno = ENOMEM; ++ return -rte_errno; + } + callback->function = function; + callback->user_data = user_data; diff --git a/dpdk/lib/gpudev/version.map b/dpdk/lib/gpudev/version.map index 2e414c65cc..34186ab7f1 100644 --- a/dpdk/lib/gpudev/version.map @@ -49227,8 +90439,33 @@ index 2e414c65cc..34186ab7f1 100644 + + local: *; }; +diff --git a/dpdk/lib/graph/node.c b/dpdk/lib/graph/node.c +index 86ec4316f9..b7b83f761c 100644 +--- a/dpdk/lib/graph/node.c ++++ b/dpdk/lib/graph/node.c +@@ -304,16 +304,16 @@ rte_node_edge_shrink(rte_node_t id, rte_edge_t size) + if (node->id == id) { + if (node->nb_edges < size) { + rte_errno = E2BIG; +- goto fail; ++ } else { ++ node->nb_edges = size; ++ rc = size; + } +- node->nb_edges = size; +- rc = size; + break; + } + } + +-fail: + graph_spinlock_unlock(); ++fail: + return rc; + } + diff --git a/dpdk/lib/graph/rte_graph_worker.h b/dpdk/lib/graph/rte_graph_worker.h -index eef77f732a..0c0b9c095a 100644 +index eef77f732a..6dc7461659 100644 --- a/dpdk/lib/graph/rte_graph_worker.h +++ b/dpdk/lib/graph/rte_graph_worker.h @@ -155,7 +155,7 @@ rte_graph_walk(struct rte_graph *graph) @@ -49240,6 +90477,74 @@ index eef77f732a..0c0b9c095a 100644 RTE_ASSERT(node->fence == RTE_GRAPH_FENCE); objs = node->objs; rte_prefetch0(objs); +@@ -224,7 +224,7 @@ __rte_node_enqueue_prologue(struct rte_graph *graph, struct rte_node *node, + __rte_node_enqueue_tail_update(graph, node); + + if (unlikely(node->size < (idx + space))) +- __rte_node_stream_alloc(graph, node); ++ __rte_node_stream_alloc_size(graph, node, node->size + space); + } + + /** +@@ -432,7 +432,7 @@ rte_node_next_stream_get(struct rte_graph *graph, struct rte_node *node, + uint16_t free_space = node->size - idx; + + if (unlikely(free_space < nb_objs)) +- __rte_node_stream_alloc_size(graph, node, nb_objs); ++ __rte_node_stream_alloc_size(graph, node, node->size + nb_objs); + + return &node->objs[idx]; + } +diff --git a/dpdk/lib/gro/gro_tcp4.c b/dpdk/lib/gro/gro_tcp4.c +index aff22178e3..a6ca6eacc2 100644 +--- a/dpdk/lib/gro/gro_tcp4.c ++++ b/dpdk/lib/gro/gro_tcp4.c +@@ -199,7 +199,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt, + struct rte_tcp_hdr *tcp_hdr; + uint32_t sent_seq; + int32_t tcp_dl; +- uint16_t ip_id, hdr_len, frag_off; ++ uint16_t ip_id, hdr_len, frag_off, ip_tlen; + uint8_t is_atomic; + + struct tcp4_flow_key key; +@@ -226,6 +226,12 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt, + */ + if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) + return -1; ++ ++ /* trim the tail padding bytes */ ++ ip_tlen = rte_be_to_cpu_16(ipv4_hdr->total_length); ++ if (pkt->pkt_len > (uint32_t)(ip_tlen + pkt->l2_len)) ++ rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_tlen - pkt->l2_len); ++ + /* + * Don't process the packet whose payload length is less than or + * equal to 0. +diff --git a/dpdk/lib/gro/gro_udp4.c b/dpdk/lib/gro/gro_udp4.c +index e78dda7874..5a2cabdb88 100644 +--- a/dpdk/lib/gro/gro_udp4.c ++++ b/dpdk/lib/gro/gro_udp4.c +@@ -221,6 +221,11 @@ gro_udp4_reassemble(struct rte_mbuf *pkt, + if (!is_ipv4_fragment(ipv4_hdr)) + return -1; + ++ ip_dl = rte_be_to_cpu_16(ipv4_hdr->total_length); ++ /* trim the tail padding bytes */ ++ if (pkt->pkt_len > (uint32_t)(ip_dl + pkt->l2_len)) ++ rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_dl - pkt->l2_len); ++ + /* + * Don't process the packet whose payload length is less than or + * equal to 0. +@@ -228,7 +233,6 @@ gro_udp4_reassemble(struct rte_mbuf *pkt, + if (pkt->pkt_len <= hdr_len) + return -1; + +- ip_dl = rte_be_to_cpu_16(ipv4_hdr->total_length); + if (ip_dl <= pkt->l3_len) + return -1; + diff --git a/dpdk/lib/gro/rte_gro.c b/dpdk/lib/gro/rte_gro.c index 8ca4da67e9..7a788523ad 100644 --- a/dpdk/lib/gro/rte_gro.c @@ -49260,19 +90565,151 @@ index 8ca4da67e9..7a788523ad 100644 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \ RTE_PTYPE_TUNNEL_VXLAN) && \ ((ptype & RTE_PTYPE_INNER_L4_TCP) == \ +diff --git a/dpdk/lib/hash/rte_cuckoo_hash.c b/dpdk/lib/hash/rte_cuckoo_hash.c +index 1191dfd81a..95c3e6c2f4 100644 +--- a/dpdk/lib/hash/rte_cuckoo_hash.c ++++ b/dpdk/lib/hash/rte_cuckoo_hash.c +@@ -527,6 +527,7 @@ rte_hash_free(struct rte_hash *h) + rte_free(h->buckets_ext); + rte_free(h->tbl_chng_cnt); + rte_free(h->ext_bkt_to_free); ++ rte_free(h->hash_rcu_cfg); + rte_free(h); + rte_free(te); + } +@@ -1865,11 +1866,15 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, + _mm_load_si128( + (__m128i const *)prim_bkt->sig_current), + _mm_set1_epi16(sig))); ++ /* Extract the even-index bits only */ ++ *prim_hash_matches &= 0x5555; + /* Compare all signatures in the bucket */ + *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16( + _mm_load_si128( + (__m128i const *)sec_bkt->sig_current), + _mm_set1_epi16(sig))); ++ /* Extract the even-index bits only */ ++ *sec_hash_matches &= 0x5555; + break; + #elif defined(__ARM_NEON) + case RTE_HASH_COMPARE_NEON: { +diff --git a/dpdk/lib/hash/rte_thash.c b/dpdk/lib/hash/rte_thash.c +index 6847e36f4b..2b97482cfb 100644 +--- a/dpdk/lib/hash/rte_thash.c ++++ b/dpdk/lib/hash/rte_thash.c +@@ -671,7 +671,7 @@ rte_thash_get_gfni_matrices(struct rte_thash_ctx *ctx) + } + + static inline uint8_t +-read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) ++read_unaligned_byte(uint8_t *ptr, unsigned int offset) + { + uint8_t ret = 0; + +@@ -682,13 +682,14 @@ read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) + (CHAR_BIT - (offset % CHAR_BIT)); + } + +- return ret >> (CHAR_BIT - len); ++ return ret; + } + + static inline uint32_t + read_unaligned_bits(uint8_t *ptr, int len, int offset) + { + uint32_t ret = 0; ++ int shift; + + len = RTE_MAX(len, 0); + len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT)); +@@ -696,13 +697,14 @@ read_unaligned_bits(uint8_t *ptr, int len, int offset) + while (len > 0) { + ret <<= CHAR_BIT; + +- ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT), +- offset); ++ ret |= read_unaligned_byte(ptr, offset); + offset += CHAR_BIT; + len -= CHAR_BIT; + } + +- return ret; ++ shift = (len == 0) ? 0 : ++ (CHAR_BIT - ((len + CHAR_BIT) % CHAR_BIT)); ++ return ret >> shift; + } + + /* returns mask for len bits with given offset inside byte */ +diff --git a/dpdk/lib/hash/rte_thash.h b/dpdk/lib/hash/rte_thash.h +index c11ca0d5b8..ff5122b1ad 100644 +--- a/dpdk/lib/hash/rte_thash.h ++++ b/dpdk/lib/hash/rte_thash.h +@@ -330,8 +330,6 @@ rte_thash_find_existing(const char *name); + * + * @param ctx + * Thash context +- * @return +- * None + */ + __rte_experimental + void +diff --git a/dpdk/lib/hash/rte_thash_x86_gfni.h b/dpdk/lib/hash/rte_thash_x86_gfni.h +index 657b1862c3..0583f64793 100644 +--- a/dpdk/lib/hash/rte_thash_x86_gfni.h ++++ b/dpdk/lib/hash/rte_thash_x86_gfni.h +@@ -87,8 +87,10 @@ __rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple, + const __m512i shift_8 = _mm512_set1_epi8(8); + __m512i xor_acc = _mm512_setzero_si512(); + __m512i perm_bytes = _mm512_setzero_si512(); +- __m512i vals, matrixes, tuple_bytes, tuple_bytes_2; +- __mmask64 load_mask, permute_mask, permute_mask_2; ++ __m512i vals, matrixes, tuple_bytes_2; ++ __m512i tuple_bytes = _mm512_setzero_si512(); ++ __mmask64 load_mask, permute_mask_2; ++ __mmask64 permute_mask = 0; + int chunk_len = 0, i = 0; + uint8_t mtrx_msk; + const int prepend = 3; diff --git a/dpdk/lib/ipsec/esp_outb.c b/dpdk/lib/ipsec/esp_outb.c -index 672e56aba0..28bd58e3c7 100644 +index 672e56aba0..969eff5a6b 100644 --- a/dpdk/lib/ipsec/esp_outb.c +++ b/dpdk/lib/ipsec/esp_outb.c -@@ -197,7 +197,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, +@@ -197,9 +197,9 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, /* if UDP encap is enabled update the dgram_len */ if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) { struct rte_udp_hdr *udph = (struct rte_udp_hdr *) - (ph - sizeof(struct rte_udp_hdr)); + (ph + sa->hdr_len - sizeof(struct rte_udp_hdr)); udph->dgram_len = rte_cpu_to_be_16(mb->pkt_len - sqh_len - - sa->hdr_l3_off - sa->hdr_len); +- sa->hdr_l3_off - sa->hdr_len); ++ sa->hdr_len + sizeof(struct rte_udp_hdr)); } + + /* update original and new ip header fields */ +@@ -220,8 +220,10 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, + /* pad length */ + pdlen -= sizeof(*espt); + ++ RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); ++ + /* copy padding data */ +- rte_memcpy(pt, esp_pad_bytes, pdlen); ++ rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); + + /* update esp trailer */ + espt = (struct rte_esp_tail *)(pt + pdlen); +@@ -417,8 +419,10 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, + /* pad length */ + pdlen -= sizeof(*espt); + ++ RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); ++ + /* copy padding data */ +- rte_memcpy(pt, esp_pad_bytes, pdlen); ++ rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); + + /* update esp trailer */ + espt = (struct rte_esp_tail *)(pt + pdlen); diff --git a/dpdk/lib/ipsec/ipsec_telemetry.c b/dpdk/lib/ipsec/ipsec_telemetry.c index b8b08404b6..9a91e47122 100644 --- a/dpdk/lib/ipsec/ipsec_telemetry.c @@ -49304,10 +90741,19 @@ index 60ab297710..62c2bd7217 100644 return NULL; } diff --git a/dpdk/lib/ipsec/rte_ipsec_sad.h b/dpdk/lib/ipsec/rte_ipsec_sad.h -index b65d295831..a3ae57df7e 100644 +index b65d295831..dbd89901af 100644 --- a/dpdk/lib/ipsec/rte_ipsec_sad.h +++ b/dpdk/lib/ipsec/rte_ipsec_sad.h -@@ -153,7 +153,7 @@ rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad); +@@ -139,8 +139,6 @@ rte_ipsec_sad_find_existing(const char *name); + * + * @param sad + * pointer to the SAD object +- * @return +- * None + */ + void + rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad); +@@ -153,7 +151,7 @@ rte_ipsec_sad_destroy(struct rte_ipsec_sad *sad); * @param keys * Array of keys to be looked up in the SAD * @param sa @@ -49317,10 +90763,10 @@ index b65d295831..a3ae57df7e 100644 * will be NULL * @param n diff --git a/dpdk/lib/ipsec/sa.c b/dpdk/lib/ipsec/sa.c -index 1e51482c92..c921699390 100644 +index 1e51482c92..ccf40f0371 100644 --- a/dpdk/lib/ipsec/sa.c +++ b/dpdk/lib/ipsec/sa.c -@@ -362,13 +362,13 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) +@@ -362,19 +362,19 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len); @@ -49337,6 +90783,43 @@ index 1e51482c92..c921699390 100644 udph->dgram_cksum = 0; } + /* update l2_len and l3_len fields for outbound mbuf */ + sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off, +- sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0); ++ prm->tun.hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0); + + esp_outb_init(sa, sa->hdr_len, prm->ipsec_xform.esn.value); + } +diff --git a/dpdk/lib/kni/rte_kni.c b/dpdk/lib/kni/rte_kni.c +index fc8f0e7b5a..149894c152 100644 +--- a/dpdk/lib/kni/rte_kni.c ++++ b/dpdk/lib/kni/rte_kni.c +@@ -635,8 +635,8 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) + { + unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); + +- /* If buffers removed, allocate mbufs and then put them into alloc_q */ +- if (ret) ++ /* If buffers removed or alloc_q is empty, allocate mbufs and then put them into alloc_q */ ++ if (ret || (kni_fifo_count(kni->alloc_q) == 0)) + kni_allocate_mbufs(kni); + + return ret; +diff --git a/dpdk/lib/kni/rte_kni.h b/dpdk/lib/kni/rte_kni.h +index b0eaf46104..62fa4c792b 100644 +--- a/dpdk/lib/kni/rte_kni.h ++++ b/dpdk/lib/kni/rte_kni.h +@@ -66,8 +66,8 @@ struct rte_kni_conf { + uint32_t core_id; /* Core ID to bind kernel thread on */ + uint16_t group_id; /* Group ID */ + unsigned mbuf_size; /* mbuf size */ +- struct rte_pci_addr addr; /* depreciated */ +- struct rte_pci_id id; /* depreciated */ ++ struct rte_pci_addr addr; /* deprecated */ ++ struct rte_pci_id id; /* deprecated */ + + __extension__ + uint8_t force_bind : 1; /* Flag to bind kernel thread */ diff --git a/dpdk/lib/kni/rte_kni_common.h b/dpdk/lib/kni/rte_kni_common.h index b547ea5501..8d3ee0fa4f 100644 --- a/dpdk/lib/kni/rte_kni_common.h @@ -49361,6 +90844,57 @@ index b547ea5501..8d3ee0fa4f 100644 +#endif + #endif /* _RTE_KNI_COMMON_H_ */ +diff --git a/dpdk/lib/kvargs/rte_kvargs.h b/dpdk/lib/kvargs/rte_kvargs.h +index 359a9f5b09..4900b750bc 100644 +--- a/dpdk/lib/kvargs/rte_kvargs.h ++++ b/dpdk/lib/kvargs/rte_kvargs.h +@@ -36,7 +36,19 @@ extern "C" { + /** separator character used between key and value */ + #define RTE_KVARGS_KV_DELIM "=" + +-/** Type of callback function used by rte_kvargs_process() */ ++/** ++ * Callback prototype used by rte_kvargs_process(). ++ * ++ * @param key ++ * The key to consider, it will not be NULL. ++ * @param value ++ * The value corresponding to the key, it may be NULL (e.g. only with key) ++ * @param opaque ++ * An opaque pointer coming from the caller. ++ * @return ++ * - >=0 handle key success. ++ * - <0 on error. ++ */ + typedef int (*arg_handler_t)(const char *key, const char *value, void *opaque); + + /** A key/value association */ +diff --git a/dpdk/lib/lpm/rte_lpm.h b/dpdk/lib/lpm/rte_lpm.h +index 5eb14c1748..d0ba57ae71 100644 +--- a/dpdk/lib/lpm/rte_lpm.h ++++ b/dpdk/lib/lpm/rte_lpm.h +@@ -183,8 +183,6 @@ rte_lpm_find_existing(const char *name); + * + * @param lpm + * LPM object handle +- * @return +- * None + */ + void + rte_lpm_free(struct rte_lpm *lpm); +diff --git a/dpdk/lib/lpm/rte_lpm6.h b/dpdk/lib/lpm/rte_lpm6.h +index f96f3372e5..94fa69e320 100644 +--- a/dpdk/lib/lpm/rte_lpm6.h ++++ b/dpdk/lib/lpm/rte_lpm6.h +@@ -73,8 +73,6 @@ rte_lpm6_find_existing(const char *name); + * + * @param lpm + * LPM object handle +- * @return +- * None + */ + void + rte_lpm6_free(struct rte_lpm6 *lpm); diff --git a/dpdk/lib/lpm/rte_lpm_altivec.h b/dpdk/lib/lpm/rte_lpm_altivec.h index 4fbc1b595d..bab8929495 100644 --- a/dpdk/lib/lpm/rte_lpm_altivec.h @@ -49405,8 +90939,36 @@ index 604d77bbda..dce900f28f 100644 if (m->ol_flags & (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_TX_VLAN)) fprintf(f, ", vlan_tci=%u", m->vlan_tci); +diff --git a/dpdk/lib/mbuf/rte_mbuf.h b/dpdk/lib/mbuf/rte_mbuf.h +index dedf83c38d..30d7937349 100644 +--- a/dpdk/lib/mbuf/rte_mbuf.h ++++ b/dpdk/lib/mbuf/rte_mbuf.h +@@ -666,7 +666,6 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large, or priv_size is not aligned. + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists +@@ -708,7 +707,6 @@ rte_pktmbuf_pool_create(const char *name, unsigned n, + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large, or priv_size is not aligned. + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists +@@ -762,7 +760,6 @@ struct rte_pktmbuf_extmem { + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large, or priv_size is not aligned. + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists diff --git a/dpdk/lib/mbuf/rte_mbuf_core.h b/dpdk/lib/mbuf/rte_mbuf_core.h -index 321a419c71..3d6ddd6773 100644 +index 321a419c71..9ac9ea2af3 100644 --- a/dpdk/lib/mbuf/rte_mbuf_core.h +++ b/dpdk/lib/mbuf/rte_mbuf_core.h @@ -8,7 +8,7 @@ @@ -49418,6 +90980,316 @@ index 321a419c71..3d6ddd6773 100644 * packet offload flags and some related macros. * For majority of DPDK entities, it is not recommended to include * this file directly, use include <rte_mbuf.h> instead. +@@ -686,8 +686,8 @@ struct rte_mbuf { + * @see rte_event_eth_tx_adapter_txq_set() + */ + } txadapter; /**< Eventdev ethdev Tx adapter */ +- /**< User defined tags. See rte_distributor_process() */ + uint32_t usr; ++ /**< User defined tags. See rte_distributor_process() */ + } hash; /**< hash information */ + }; + +diff --git a/dpdk/lib/mbuf/rte_mbuf_ptype.h b/dpdk/lib/mbuf/rte_mbuf_ptype.h +index 17a2dd3576..f2276e2909 100644 +--- a/dpdk/lib/mbuf/rte_mbuf_ptype.h ++++ b/dpdk/lib/mbuf/rte_mbuf_ptype.h +@@ -419,10 +419,10 @@ extern "C" { + * + * Packet format: + * <'ether type'=0x0800 +- * | 'version'=4, 'protocol'=51> ++ * | 'version'=4, 'protocol'=50> + * or, + * <'ether type'=0x86DD +- * | 'version'=6, 'next header'=51> ++ * | 'version'=6, 'next header'=50> + */ + #define RTE_PTYPE_TUNNEL_ESP 0x00009000 + /** +diff --git a/dpdk/lib/mempool/rte_mempool.c b/dpdk/lib/mempool/rte_mempool.c +index c5a699b1d6..4dbff7fbc4 100644 +--- a/dpdk/lib/mempool/rte_mempool.c ++++ b/dpdk/lib/mempool/rte_mempool.c +@@ -4,6 +4,7 @@ + */ + + #include <stdbool.h> ++#include <stdlib.h> + #include <stdio.h> + #include <string.h> + #include <stdint.h> +@@ -43,12 +44,10 @@ static struct rte_tailq_elem rte_mempool_tailq = { + }; + EAL_REGISTER_TAILQ(rte_mempool_tailq) + +-TAILQ_HEAD(mempool_callback_list, rte_tailq_entry); ++TAILQ_HEAD(mempool_callback_tailq, mempool_callback_data); + +-static struct rte_tailq_elem callback_tailq = { +- .name = "RTE_MEMPOOL_CALLBACK", +-}; +-EAL_REGISTER_TAILQ(callback_tailq) ++static struct mempool_callback_tailq callback_tailq = ++ TAILQ_HEAD_INITIALIZER(callback_tailq); + + /* Invoke all registered mempool event callbacks. */ + static void +@@ -917,6 +916,22 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, + STAILQ_INIT(&mp->elt_list); + STAILQ_INIT(&mp->mem_list); + ++ /* ++ * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to ++ * set the correct index into the table of ops structs. ++ */ ++ if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET)) ++ ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); ++ else if (flags & RTE_MEMPOOL_F_SP_PUT) ++ ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); ++ else if (flags & RTE_MEMPOOL_F_SC_GET) ++ ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); ++ else ++ ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); ++ ++ if (ret) ++ goto exit_unlock; ++ + /* + * local_cache pointer is set even if cache_size is zero. + * The local_cache points to just past the elt_pa[] array. +@@ -957,7 +972,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) + { +- int ret; + struct rte_mempool *mp; + + mp = rte_mempool_create_empty(name, n, elt_size, cache_size, +@@ -965,22 +979,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + if (mp == NULL) + return NULL; + +- /* +- * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to +- * set the correct index into the table of ops structs. +- */ +- if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET)) +- ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); +- else if (flags & RTE_MEMPOOL_F_SP_PUT) +- ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); +- else if (flags & RTE_MEMPOOL_F_SC_GET) +- ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL); +- else +- ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL); +- +- if (ret) +- goto fail; +- + /* call the mempool priv initializer */ + if (mp_init) + mp_init(mp, mp_init_arg); +@@ -1379,6 +1377,7 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *), + } + + struct mempool_callback_data { ++ TAILQ_ENTRY(mempool_callback_data) callbacks; + rte_mempool_event_callback *func; + void *user_data; + }; +@@ -1387,14 +1386,11 @@ static void + mempool_event_callback_invoke(enum rte_mempool_event event, + struct rte_mempool *mp) + { +- struct mempool_callback_list *list; +- struct rte_tailq_entry *te; ++ struct mempool_callback_data *cb; + void *tmp_te; + + rte_mcfg_tailq_read_lock(); +- list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list); +- RTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) { +- struct mempool_callback_data *cb = te->data; ++ RTE_TAILQ_FOREACH_SAFE(cb, &callback_tailq, callbacks, tmp_te) { + rte_mcfg_tailq_read_unlock(); + cb->func(event, mp, cb->user_data); + rte_mcfg_tailq_read_lock(); +@@ -1406,10 +1402,7 @@ int + rte_mempool_event_callback_register(rte_mempool_event_callback *func, + void *user_data) + { +- struct mempool_callback_list *list; +- struct rte_tailq_entry *te = NULL; + struct mempool_callback_data *cb; +- void *tmp_te; + int ret; + + if (func == NULL) { +@@ -1418,36 +1411,23 @@ rte_mempool_event_callback_register(rte_mempool_event_callback *func, + } + + rte_mcfg_tailq_write_lock(); +- list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list); +- RTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) { +- cb = te->data; ++ TAILQ_FOREACH(cb, &callback_tailq, callbacks) { + if (cb->func == func && cb->user_data == user_data) { + ret = -EEXIST; + goto exit; + } + } + +- te = rte_zmalloc("mempool_cb_tail_entry", sizeof(*te), 0); +- if (te == NULL) { +- RTE_LOG(ERR, MEMPOOL, +- "Cannot allocate event callback tailq entry!\n"); +- ret = -ENOMEM; +- goto exit; +- } +- +- cb = rte_malloc("mempool_cb_data", sizeof(*cb), 0); ++ cb = calloc(1, sizeof(*cb)); + if (cb == NULL) { +- RTE_LOG(ERR, MEMPOOL, +- "Cannot allocate event callback!\n"); +- rte_free(te); ++ RTE_LOG(ERR, MEMPOOL, "Cannot allocate event callback!\n"); + ret = -ENOMEM; + goto exit; + } + + cb->func = func; + cb->user_data = user_data; +- te->data = cb; +- TAILQ_INSERT_TAIL(list, te, next); ++ TAILQ_INSERT_TAIL(&callback_tailq, cb, callbacks); + ret = 0; + + exit: +@@ -1460,27 +1440,21 @@ int + rte_mempool_event_callback_unregister(rte_mempool_event_callback *func, + void *user_data) + { +- struct mempool_callback_list *list; +- struct rte_tailq_entry *te = NULL; + struct mempool_callback_data *cb; + int ret = -ENOENT; + + rte_mcfg_tailq_write_lock(); +- list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list); +- TAILQ_FOREACH(te, list, next) { +- cb = te->data; ++ TAILQ_FOREACH(cb, &callback_tailq, callbacks) { + if (cb->func == func && cb->user_data == user_data) { +- TAILQ_REMOVE(list, te, next); ++ TAILQ_REMOVE(&callback_tailq, cb, callbacks); + ret = 0; + break; + } + } + rte_mcfg_tailq_write_unlock(); + +- if (ret == 0) { +- rte_free(te); +- rte_free(cb); +- } ++ if (ret == 0) ++ free(cb); + rte_errno = -ret; + return ret; + } +@@ -1517,27 +1491,27 @@ mempool_info_cb(struct rte_mempool *mp, void *arg) + return; + + rte_tel_data_add_dict_string(info->d, "name", mp->name); +- rte_tel_data_add_dict_int(info->d, "pool_id", mp->pool_id); +- rte_tel_data_add_dict_int(info->d, "flags", mp->flags); ++ rte_tel_data_add_dict_u64(info->d, "pool_id", mp->pool_id); ++ rte_tel_data_add_dict_u64(info->d, "flags", mp->flags); + rte_tel_data_add_dict_int(info->d, "socket_id", mp->socket_id); +- rte_tel_data_add_dict_int(info->d, "size", mp->size); +- rte_tel_data_add_dict_int(info->d, "cache_size", mp->cache_size); +- rte_tel_data_add_dict_int(info->d, "elt_size", mp->elt_size); +- rte_tel_data_add_dict_int(info->d, "header_size", mp->header_size); +- rte_tel_data_add_dict_int(info->d, "trailer_size", mp->trailer_size); +- rte_tel_data_add_dict_int(info->d, "private_data_size", ++ rte_tel_data_add_dict_u64(info->d, "size", mp->size); ++ rte_tel_data_add_dict_u64(info->d, "cache_size", mp->cache_size); ++ rte_tel_data_add_dict_u64(info->d, "elt_size", mp->elt_size); ++ rte_tel_data_add_dict_u64(info->d, "header_size", mp->header_size); ++ rte_tel_data_add_dict_u64(info->d, "trailer_size", mp->trailer_size); ++ rte_tel_data_add_dict_u64(info->d, "private_data_size", + mp->private_data_size); + rte_tel_data_add_dict_int(info->d, "ops_index", mp->ops_index); +- rte_tel_data_add_dict_int(info->d, "populated_size", ++ rte_tel_data_add_dict_u64(info->d, "populated_size", + mp->populated_size); + + mz = mp->mz; + rte_tel_data_add_dict_string(info->d, "mz_name", mz->name); +- rte_tel_data_add_dict_int(info->d, "mz_len", mz->len); +- rte_tel_data_add_dict_int(info->d, "mz_hugepage_sz", ++ rte_tel_data_add_dict_u64(info->d, "mz_len", mz->len); ++ rte_tel_data_add_dict_u64(info->d, "mz_hugepage_sz", + mz->hugepage_sz); + rte_tel_data_add_dict_int(info->d, "mz_socket_id", mz->socket_id); +- rte_tel_data_add_dict_int(info->d, "mz_flags", mz->flags); ++ rte_tel_data_add_dict_u64(info->d, "mz_flags", mz->flags); + } + + static int +diff --git a/dpdk/lib/mempool/rte_mempool.h b/dpdk/lib/mempool/rte_mempool.h +index 1e7a3c1527..9c27820b7a 100644 +--- a/dpdk/lib/mempool/rte_mempool.h ++++ b/dpdk/lib/mempool/rte_mempool.h +@@ -440,13 +440,19 @@ typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp); + typedef void (*rte_mempool_free_t)(struct rte_mempool *mp); + + /** +- * Enqueue an object into the external pool. ++ * Enqueue 'n' objects into the external pool. ++ * @return ++ * - 0: Success ++ * - <0: Error + */ + typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp, + void * const *obj_table, unsigned int n); + + /** +- * Dequeue an object from the external pool. ++ * Dequeue 'n' objects from the external pool. ++ * @return ++ * - 0: Success ++ * - <0: Error + */ + typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, + void **obj_table, unsigned int n); +@@ -1041,7 +1047,6 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - cache size provided is too large or an unknown flag was passed + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists +@@ -1452,7 +1457,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj) + * @param cache + * A pointer to a mempool cache structure. May be NULL if not needed. + * @return +- * - >=0: Success; number of objects supplied. ++ * - 0: Success. + * - <0: Error; code of ring dequeue function. + */ + static __rte_always_inline int +@@ -1850,6 +1855,8 @@ typedef void (rte_mempool_event_callback)( + * Register a callback function invoked on mempool life cycle event. + * The function will be invoked in the process + * that performs an action which triggers the callback. ++ * Registration is process-private, ++ * i.e. each process must manage callbacks on its own if needed. + * + * @param func + * Callback function. diff --git a/dpdk/lib/meson.build b/dpdk/lib/meson.build index 018976df17..fbaa6ef7c2 100644 --- a/dpdk/lib/meson.build @@ -49431,6 +91303,50 @@ index 018976df17..fbaa6ef7c2 100644 # NOTE: for speed of meson runs, the dependencies in the subdirectories # sometimes skip deps that would be implied by others, e.g. if mempool is # given as a dep, no need to mention ring. This is especially true for the +diff --git a/dpdk/lib/meter/rte_meter.h b/dpdk/lib/meter/rte_meter.h +index 62c8c1ecc2..c6a25208b7 100644 +--- a/dpdk/lib/meter/rte_meter.h ++++ b/dpdk/lib/meter/rte_meter.h +@@ -128,9 +128,6 @@ int + rte_meter_trtcm_profile_config(struct rte_meter_trtcm_profile *p, + struct rte_meter_trtcm_params *params); + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC 4115 profile configuration + * + * @param p +@@ -174,9 +171,6 @@ rte_meter_trtcm_config(struct rte_meter_trtcm *m, + struct rte_meter_trtcm_profile *p); + + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC 4115 configuration per metered traffic flow + * + * @param m +@@ -277,9 +271,6 @@ rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, + enum rte_color pkt_color); + + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC4115 color blind traffic metering + * + * @param m +@@ -301,9 +292,6 @@ rte_meter_trtcm_rfc4115_color_blind_check( + uint32_t pkt_len); + + /** +- * @warning +- * @b EXPERIMENTAL: this API may change without prior notice +- * + * trTCM RFC4115 color aware traffic metering + * + * @param m diff --git a/dpdk/lib/metrics/rte_metrics_telemetry.h b/dpdk/lib/metrics/rte_metrics_telemetry.h index 2b6eb1ccc8..09b14d9336 100644 --- a/dpdk/lib/metrics/rte_metrics_telemetry.h @@ -49507,6 +91423,39 @@ index dca940c2c5..9849872366 100644 #endif uint8_t data[0]; /**< variable length data fields */ } __rte_packed; +diff --git a/dpdk/lib/net/rte_ip.h b/dpdk/lib/net/rte_ip.h +index c575250852..615e5b125e 100644 +--- a/dpdk/lib/net/rte_ip.h ++++ b/dpdk/lib/net/rte_ip.h +@@ -154,18 +154,21 @@ rte_ipv4_hdr_len(const struct rte_ipv4_hdr *ipv4_hdr) + static inline uint32_t + __rte_raw_cksum(const void *buf, size_t len, uint32_t sum) + { +- /* extend strict-aliasing rules */ +- typedef uint16_t __attribute__((__may_alias__)) u16_p; +- const u16_p *u16_buf = (const u16_p *)buf; +- const u16_p *end = u16_buf + len / sizeof(*u16_buf); ++ const void *end; + +- for (; u16_buf != end; ++u16_buf) +- sum += *u16_buf; ++ for (end = RTE_PTR_ADD(buf, RTE_ALIGN_FLOOR(len, sizeof(uint16_t))); ++ buf != end; buf = RTE_PTR_ADD(buf, sizeof(uint16_t))) { ++ uint16_t v; ++ ++ memcpy(&v, buf, sizeof(uint16_t)); ++ sum += v; ++ } + + /* if length is odd, keeping it byte order independent */ + if (unlikely(len % 2)) { + uint16_t left = 0; +- *(unsigned char *)&left = *(const unsigned char *)end; ++ ++ memcpy(&left, end, 1); + sum += left; + } + diff --git a/dpdk/lib/net/rte_l2tpv2.h b/dpdk/lib/net/rte_l2tpv2.h index b90e36cf12..1f3ad3f03c 100644 --- a/dpdk/lib/net/rte_l2tpv2.h @@ -49563,8 +91512,21 @@ index b90e36cf12..1f3ad3f03c 100644 */ struct rte_l2tpv2_msg_without_offset { rte_be16_t length; /**< length(16) */ +diff --git a/dpdk/lib/node/ethdev_ctrl.c b/dpdk/lib/node/ethdev_ctrl.c +index 13b8b705f0..8a7429faa3 100644 +--- a/dpdk/lib/node/ethdev_ctrl.c ++++ b/dpdk/lib/node/ethdev_ctrl.c +@@ -77,6 +77,8 @@ rte_node_eth_config(struct rte_node_ethdev_config *conf, uint16_t nb_confs, + + /* Add it to list of ethdev rx nodes for lookup */ + elem = malloc(sizeof(ethdev_rx_node_elem_t)); ++ if (elem == NULL) ++ return -ENOMEM; + memset(elem, 0, sizeof(ethdev_rx_node_elem_t)); + elem->ctx.port_id = port_id; + elem->ctx.queue_id = j; diff --git a/dpdk/lib/pcapng/rte_pcapng.c b/dpdk/lib/pcapng/rte_pcapng.c -index 03edabe73e..0caf3d31f8 100644 +index 03edabe73e..e914b7b031 100644 --- a/dpdk/lib/pcapng/rte_pcapng.c +++ b/dpdk/lib/pcapng/rte_pcapng.c @@ -20,6 +20,7 @@ @@ -49655,6 +91617,134 @@ index 03edabe73e..0caf3d31f8 100644 speed = link.link_speed * PCAPNG_MBPS_SPEED; if (rte_eth_macaddr_get(port, &macaddr) < 0) +@@ -515,33 +552,16 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + return NULL; + } + +-/* Count how many segments are in this array of mbufs */ +-static unsigned int +-mbuf_burst_segs(struct rte_mbuf *pkts[], unsigned int n) +-{ +- unsigned int i, iovcnt; +- +- for (iovcnt = 0, i = 0; i < n; i++) { +- const struct rte_mbuf *m = pkts[i]; +- +- __rte_mbuf_sanity_check(m, 1); +- +- iovcnt += m->nb_segs; +- } +- return iovcnt; +-} +- + /* Write pre-formatted packets to file. */ + ssize_t + rte_pcapng_write_packets(rte_pcapng_t *self, + struct rte_mbuf *pkts[], uint16_t nb_pkts) + { +- int iovcnt = mbuf_burst_segs(pkts, nb_pkts); +- struct iovec iov[iovcnt]; +- unsigned int i, cnt; +- ssize_t ret; ++ struct iovec iov[IOV_MAX]; ++ unsigned int i, cnt = 0; ++ ssize_t ret, total = 0; + +- for (i = cnt = 0; i < nb_pkts; i++) { ++ for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *m = pkts[i]; + struct pcapng_enhance_packet_block *epb; + +@@ -553,6 +573,20 @@ rte_pcapng_write_packets(rte_pcapng_t *self, + return -1; + } + ++ /* ++ * Handle case of highly fragmented and large burst size ++ * Note: this assumes that max segments per mbuf < IOV_MAX ++ */ ++ if (unlikely(cnt + m->nb_segs >= IOV_MAX)) { ++ ret = writev(self->outfd, iov, cnt); ++ if (unlikely(ret < 0)) { ++ rte_errno = errno; ++ return -1; ++ } ++ total += ret; ++ cnt = 0; ++ } ++ + /* + * The DPDK port is recorded during pcapng_copy. + * Map that to PCAPNG interface in file. +@@ -565,10 +599,12 @@ rte_pcapng_write_packets(rte_pcapng_t *self, + } while ((m = m->next)); + } + +- ret = writev(self->outfd, iov, iovcnt); +- if (unlikely(ret < 0)) ++ ret = writev(self->outfd, iov, cnt); ++ if (unlikely(ret < 0)) { + rte_errno = errno; +- return ret; ++ return -1; ++ } ++ return total + ret; + } + + /* Create new pcapng writer handle */ +diff --git a/dpdk/lib/pci/rte_pci.h b/dpdk/lib/pci/rte_pci.h +index 71cbd441c7..60f641fa61 100644 +--- a/dpdk/lib/pci/rte_pci.h ++++ b/dpdk/lib/pci/rte_pci.h +@@ -105,8 +105,7 @@ struct rte_pci_addr { + + /** + * Utility function to write a pci device name, this device name can later be +- * used to retrieve the corresponding rte_pci_addr using eal_parse_pci_* +- * BDF helpers. ++ * used to retrieve the corresponding rte_pci_addr using rte_pci_addr_parse(). + * + * @param addr + * The PCI Bus-Device-Function address +diff --git a/dpdk/lib/pdump/rte_pdump.c b/dpdk/lib/pdump/rte_pdump.c +index af450695ec..3b3618bc3a 100644 +--- a/dpdk/lib/pdump/rte_pdump.c ++++ b/dpdk/lib/pdump/rte_pdump.c +@@ -133,7 +133,7 @@ pdump_copy(uint16_t port_id, uint16_t queue, + + __atomic_fetch_add(&stats->accepted, d_pkts, __ATOMIC_RELAXED); + +- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL); ++ ring_enq = rte_ring_enqueue_burst(ring, (void *)&dup_bufs[0], d_pkts, NULL); + if (unlikely(ring_enq < d_pkts)) { + unsigned int drops = d_pkts - ring_enq; + +@@ -536,6 +536,12 @@ pdump_prepare_client_request(const char *device, uint16_t queue, + struct pdump_request *req = (struct pdump_request *)mp_req.param; + struct pdump_response *resp; + ++ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { ++ PDUMP_LOG(ERR, ++ "pdump enable/disable not allowed in primary process\n"); ++ return -EINVAL; ++ } ++ + memset(req, 0, sizeof(*req)); + + req->ver = (flags & RTE_PDUMP_FLAG_PCAPNG) ? V2 : V1; +@@ -557,9 +563,10 @@ pdump_prepare_client_request(const char *device, uint16_t queue, + if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) { + mp_rep = &mp_reply.msgs[0]; + resp = (struct pdump_response *)mp_rep->param; +- rte_errno = resp->err_value; +- if (!resp->err_value) ++ if (resp->err_value == 0) + ret = 0; ++ else ++ rte_errno = -resp->err_value; + free(mp_reply.msgs); + } + diff --git a/dpdk/lib/pipeline/rte_swx_ctl.c b/dpdk/lib/pipeline/rte_swx_ctl.c index 1c908e3e3f..f52ccffd75 100644 --- a/dpdk/lib/pipeline/rte_swx_ctl.c @@ -49831,10 +91921,26 @@ index 46d05823e1..82e62e70a7 100644 */ uint64_t *n_pkts_action; diff --git a/dpdk/lib/pipeline/rte_swx_pipeline.c b/dpdk/lib/pipeline/rte_swx_pipeline.c -index 2145ca0a42..8d5073cf19 100644 +index 2145ca0a42..25b5b0955d 100644 --- a/dpdk/lib/pipeline/rte_swx_pipeline.c +++ b/dpdk/lib/pipeline/rte_swx_pipeline.c -@@ -8531,7 +8531,7 @@ table_state_build(struct rte_swx_pipeline *p) +@@ -7635,6 +7635,7 @@ table_build_free(struct rte_swx_pipeline *p) + free(p->table_stats[i].n_pkts_action); + + free(p->table_stats); ++ p->table_stats = NULL; + } + } + +@@ -8443,6 +8444,7 @@ learner_build_free(struct rte_swx_pipeline *p) + free(p->learner_stats[i].n_pkts_action); + + free(p->learner_stats); ++ p->learner_stats = NULL; + } + } + +@@ -8531,7 +8533,7 @@ table_state_build(struct rte_swx_pipeline *p) struct selector *s; struct learner *l; @@ -49950,6 +92056,125 @@ index 6afd310e4e..25185a791c 100644 * if ((pi->turbo_available) && (pi->curr_idx <= 1)) */ /* Max may have changed, so call to max function */ +diff --git a/dpdk/lib/power/rte_power.h b/dpdk/lib/power/rte_power.h +index c5759afa39..829a22380d 100644 +--- a/dpdk/lib/power/rte_power.h ++++ b/dpdk/lib/power/rte_power.h +@@ -171,14 +171,6 @@ typedef int (*rte_power_freq_change_t)(unsigned int lcore_id); + * Scale up the frequency of a specific lcore according to the available + * frequencies. + * Review each environments specific documentation for usage. +- * +- * @param lcore_id +- * lcore id. +- * +- * @return +- * - 1 on success with frequency changed. +- * - 0 on success without frequency changed. +- * - Negative on error. + */ + extern rte_power_freq_change_t rte_power_freq_up; + +@@ -186,30 +178,13 @@ extern rte_power_freq_change_t rte_power_freq_up; + * Scale down the frequency of a specific lcore according to the available + * frequencies. + * Review each environments specific documentation for usage. +- * +- * @param lcore_id +- * lcore id. +- * +- * @return +- * - 1 on success with frequency changed. +- * - 0 on success without frequency changed. +- * - Negative on error. + */ +- + extern rte_power_freq_change_t rte_power_freq_down; + + /** + * Scale up the frequency of a specific lcore to the highest according to the + * available frequencies. + * Review each environments specific documentation for usage. +- * +- * @param lcore_id +- * lcore id. +- * +- * @return +- * - 1 on success with frequency changed. +- * - 0 on success without frequency changed. +- * - Negative on error. + */ + extern rte_power_freq_change_t rte_power_freq_max; + +@@ -217,54 +192,24 @@ extern rte_power_freq_change_t rte_power_freq_max; + * Scale down the frequency of a specific lcore to the lowest according to the + * available frequencies. + * Review each environments specific documentation for usage.. +- * +- * @param lcore_id +- * lcore id. +- * +- * @return +- * - 1 on success with frequency changed. +- * - 0 on success without frequency changed. +- * - Negative on error. + */ + extern rte_power_freq_change_t rte_power_freq_min; + + /** + * Query the Turbo Boost status of a specific lcore. + * Review each environments specific documentation for usage.. +- * +- * @param lcore_id +- * lcore id. +- * +- * @return +- * - 1 Turbo Boost is enabled for this lcore. +- * - 0 Turbo Boost is disabled for this lcore. +- * - Negative on error. + */ + extern rte_power_freq_change_t rte_power_turbo_status; + + /** + * Enable Turbo Boost for this lcore. + * Review each environments specific documentation for usage.. +- * +- * @param lcore_id +- * lcore id. +- * +- * @return +- * - 0 on success. +- * - Negative on error. + */ + extern rte_power_freq_change_t rte_power_freq_enable_turbo; + + /** + * Disable Turbo Boost for this lcore. + * Review each environments specific documentation for usage.. +- * +- * @param lcore_id +- * lcore id. +- * +- * @return +- * - 0 on success. +- * - Negative on error. + */ + extern rte_power_freq_change_t rte_power_freq_disable_turbo; + +diff --git a/dpdk/lib/rawdev/rte_rawdev.c b/dpdk/lib/rawdev/rte_rawdev.c +index a6134e76ea..c06ed8b9c7 100644 +--- a/dpdk/lib/rawdev/rte_rawdev.c ++++ b/dpdk/lib/rawdev/rte_rawdev.c +@@ -494,8 +494,7 @@ rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id) + uint16_t dev_id; + + if (rte_rawdev_pmd_get_named_dev(name) != NULL) { +- RTE_RDEV_ERR("Event device with name %s already allocated!", +- name); ++ RTE_RDEV_ERR("Raw device with name %s already allocated!", name); + return NULL; + } + diff --git a/dpdk/lib/regexdev/rte_regexdev.h b/dpdk/lib/regexdev/rte_regexdev.h index 86f0b231b0..513ce5b67c 100644 --- a/dpdk/lib/regexdev/rte_regexdev.h @@ -50060,6 +92285,184 @@ index 8db9b17018..3c6e9fffa1 100644 + rte_regexdev_register; + rte_regexdev_unregister; }; +diff --git a/dpdk/lib/reorder/rte_reorder.c b/dpdk/lib/reorder/rte_reorder.c +index 9445853b79..5d4fab17ff 100644 +--- a/dpdk/lib/reorder/rte_reorder.c ++++ b/dpdk/lib/reorder/rte_reorder.c +@@ -61,6 +61,11 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, + { + const unsigned int min_bufsize = sizeof(*b) + + (2 * size * sizeof(struct rte_mbuf *)); ++ static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = { ++ .name = RTE_REORDER_SEQN_DYNFIELD_NAME, ++ .size = sizeof(rte_reorder_seqn_t), ++ .align = __alignof__(rte_reorder_seqn_t), ++ }; + + if (b == NULL) { + RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:" +@@ -87,6 +92,15 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, + return NULL; + } + ++ rte_reorder_seqn_dynfield_offset = rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc); ++ if (rte_reorder_seqn_dynfield_offset < 0) { ++ RTE_LOG(ERR, REORDER, ++ "Failed to register mbuf field for reorder sequence number, rte_errno: %i\n", ++ rte_errno); ++ rte_errno = ENOMEM; ++ return NULL; ++ } ++ + memset(b, 0, bufsize); + strlcpy(b->name, name, sizeof(b->name)); + b->memsize = bufsize; +@@ -99,21 +113,45 @@ rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, + return b; + } + ++/* ++ * Insert new entry into global list. ++ * Returns pointer to already inserted entry if such exists, or to newly inserted one. ++ */ ++static struct rte_tailq_entry * ++rte_reorder_entry_insert(struct rte_tailq_entry *new_te) ++{ ++ struct rte_reorder_list *reorder_list; ++ struct rte_reorder_buffer *b, *nb; ++ struct rte_tailq_entry *te; ++ ++ rte_mcfg_tailq_write_lock(); ++ ++ reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list); ++ /* guarantee there's no existing */ ++ TAILQ_FOREACH(te, reorder_list, next) { ++ b = (struct rte_reorder_buffer *) te->data; ++ nb = (struct rte_reorder_buffer *) new_te->data; ++ if (strncmp(nb->name, b->name, RTE_REORDER_NAMESIZE) == 0) ++ break; ++ } ++ ++ if (te == NULL) { ++ TAILQ_INSERT_TAIL(reorder_list, new_te, next); ++ te = new_te; ++ } ++ ++ rte_mcfg_tailq_write_unlock(); ++ ++ return te; ++} ++ + struct rte_reorder_buffer* + rte_reorder_create(const char *name, unsigned socket_id, unsigned int size) + { + struct rte_reorder_buffer *b = NULL; +- struct rte_tailq_entry *te; +- struct rte_reorder_list *reorder_list; ++ struct rte_tailq_entry *te, *te_inserted; + const unsigned int bufsize = sizeof(struct rte_reorder_buffer) + + (2 * size * sizeof(struct rte_mbuf *)); +- static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = { +- .name = RTE_REORDER_SEQN_DYNFIELD_NAME, +- .size = sizeof(rte_reorder_seqn_t), +- .align = __alignof__(rte_reorder_seqn_t), +- }; +- +- reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list); + + /* Check user arguments. */ + if (!rte_is_power_of_2(size)) { +@@ -129,32 +167,12 @@ rte_reorder_create(const char *name, unsigned socket_id, unsigned int size) + return NULL; + } + +- rte_reorder_seqn_dynfield_offset = +- rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc); +- if (rte_reorder_seqn_dynfield_offset < 0) { +- RTE_LOG(ERR, REORDER, "Failed to register mbuf field for reorder sequence number\n"); +- rte_errno = ENOMEM; +- return NULL; +- } +- +- rte_mcfg_tailq_write_lock(); +- +- /* guarantee there's no existing */ +- TAILQ_FOREACH(te, reorder_list, next) { +- b = (struct rte_reorder_buffer *) te->data; +- if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0) +- break; +- } +- if (te != NULL) +- goto exit; +- + /* allocate tailq entry */ + te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; +- b = NULL; +- goto exit; ++ return NULL; + } + + /* Allocate memory to store the reorder buffer structure. */ +@@ -163,14 +181,23 @@ rte_reorder_create(const char *name, unsigned socket_id, unsigned int size) + RTE_LOG(ERR, REORDER, "Memzone allocation failed\n"); + rte_errno = ENOMEM; + rte_free(te); ++ return NULL; + } else { +- rte_reorder_init(b, bufsize, name, size); ++ if (rte_reorder_init(b, bufsize, name, size) == NULL) { ++ rte_free(b); ++ rte_free(te); ++ return NULL; ++ } + te->data = (void *)b; +- TAILQ_INSERT_TAIL(reorder_list, te, next); + } + +-exit: +- rte_mcfg_tailq_write_unlock(); ++ te_inserted = rte_reorder_entry_insert(te); ++ if (te_inserted != te) { ++ rte_free(b); ++ rte_free(te); ++ return te_inserted->data; ++ } ++ + return b; + } + +@@ -392,6 +419,7 @@ rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs, + /* Try to fetch requested number of mbufs from ready buffer */ + while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) { + mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail]; ++ ready_buf->entries[ready_buf->tail] = NULL; + ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask; + } + +diff --git a/dpdk/lib/reorder/rte_reorder.h b/dpdk/lib/reorder/rte_reorder.h +index 9de0240374..0620f36026 100644 +--- a/dpdk/lib/reorder/rte_reorder.h ++++ b/dpdk/lib/reorder/rte_reorder.h +@@ -81,6 +81,7 @@ rte_reorder_create(const char *name, unsigned socket_id, unsigned int size); + * The initialized reorder buffer instance, or NULL on error + * On error case, rte_errno will be set appropriately: + * - EINVAL - invalid parameters ++ * - ENOMEM - not enough memory to register dynamic field + */ + struct rte_reorder_buffer * + rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize, +@@ -115,8 +116,6 @@ rte_reorder_reset(struct rte_reorder_buffer *b); + * + * @param b + * reorder buffer instance +- * @return +- * None + */ + void + rte_reorder_free(struct rte_reorder_buffer *b); diff --git a/dpdk/lib/rib/rte_rib.c b/dpdk/lib/rib/rte_rib.c index 6c29e1c49a..1a4b10d728 100644 --- a/dpdk/lib/rib/rte_rib.c @@ -50073,8 +92476,21 @@ index 6c29e1c49a..1a4b10d728 100644 return (ip & (1 << (31 - node->depth))) ? node->right : node->left; } +diff --git a/dpdk/lib/rib/rte_rib.h b/dpdk/lib/rib/rte_rib.h +index bebb30f7d7..c9ee211e34 100644 +--- a/dpdk/lib/rib/rte_rib.h ++++ b/dpdk/lib/rib/rte_rib.h +@@ -265,8 +265,6 @@ rte_rib_find_existing(const char *name); + * + * @param rib + * RIB object handle +- * @return +- * None + */ + void + rte_rib_free(struct rte_rib *rib); diff --git a/dpdk/lib/rib/rte_rib6.h b/dpdk/lib/rib/rte_rib6.h -index 6f532265c6..d52b0b05cc 100644 +index 6f532265c6..74398988c3 100644 --- a/dpdk/lib/rib/rte_rib6.h +++ b/dpdk/lib/rib/rte_rib6.h @@ -40,12 +40,12 @@ struct rte_rib6_node; @@ -50101,8 +92517,17 @@ index 6f532265c6..d52b0b05cc 100644 * @return * Pointer to RIB object on success * NULL otherwise with rte_errno indicating reason for failure. +@@ -320,8 +320,6 @@ rte_rib6_find_existing(const char *name); + * + * @param rib + * RIB object handle +- * @return +- * None + */ + void + rte_rib6_free(struct rte_rib6 *rib); diff --git a/dpdk/lib/ring/rte_ring.c b/dpdk/lib/ring/rte_ring.c -index f17bd966be..6a94a038c4 100644 +index f17bd966be..40d29c1bc5 100644 --- a/dpdk/lib/ring/rte_ring.c +++ b/dpdk/lib/ring/rte_ring.c @@ -75,7 +75,7 @@ rte_ring_get_memsize_elem(unsigned int esize, unsigned int count) @@ -50123,8 +92548,70 @@ index f17bd966be..6a94a038c4 100644 return NULL; } +@@ -341,11 +341,6 @@ rte_ring_free(struct rte_ring *r) + return; + } + +- if (rte_memzone_free(r->memzone) != 0) { +- RTE_LOG(ERR, RING, "Cannot free memory\n"); +- return; +- } +- + ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list); + rte_mcfg_tailq_write_lock(); + +@@ -364,6 +359,9 @@ rte_ring_free(struct rte_ring *r) + + rte_mcfg_tailq_write_unlock(); + ++ if (rte_memzone_free(r->memzone) != 0) ++ RTE_LOG(ERR, RING, "Cannot free memory\n"); ++ + rte_free(te); + } + +diff --git a/dpdk/lib/ring/rte_ring.h b/dpdk/lib/ring/rte_ring.h +index da17ed6d7c..6f040dba55 100644 +--- a/dpdk/lib/ring/rte_ring.h ++++ b/dpdk/lib/ring/rte_ring.h +@@ -66,10 +66,9 @@ ssize_t rte_ring_get_memsize(unsigned int count); + * object table. It is advised to use rte_ring_get_memsize() to get the + * appropriate size. + * +- * The ring size is set to *count*, which must be a power of two. Water +- * marking is disabled by default. The real usable ring size is +- * *count-1* instead of *count* to differentiate a free ring from an +- * empty ring. ++ * The ring size is set to *count*, which must be a power of two. ++ * The real usable ring size is *count-1* instead of *count* to ++ * differentiate a full ring from an empty ring. + * + * The ring is not added in RTE_TAILQ_RING global list. Indeed, the + * memory given by the caller may not be shareable among dpdk +@@ -119,10 +118,9 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count, + * This function uses ``memzone_reserve()`` to allocate memory. Then it + * calls rte_ring_init() to initialize an empty ring. + * +- * The new ring size is set to *count*, which must be a power of +- * two. Water marking is disabled by default. The real usable ring size +- * is *count-1* instead of *count* to differentiate a free ring from an +- * empty ring. ++ * The new ring size is set to *count*, which must be a power of two. ++ * The real usable ring size is *count-1* instead of *count* to ++ * differentiate a full ring from an empty ring. + * + * The ring is added in RTE_TAILQ_RING list. + * +@@ -164,7 +162,6 @@ int rte_ring_init(struct rte_ring *r, const char *name, unsigned int count, + * On success, the pointer to the new allocated ring. NULL on error with + * rte_errno set appropriately. Possible errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - count provided is not a power of 2 + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists diff --git a/dpdk/lib/ring/rte_ring_core.h b/dpdk/lib/ring/rte_ring_core.h -index 46ad584f9c..1252ca9546 100644 +index 46ad584f9c..82b237091b 100644 --- a/dpdk/lib/ring/rte_ring_core.h +++ b/dpdk/lib/ring/rte_ring_core.h @@ -12,7 +12,7 @@ @@ -50136,6 +92623,85 @@ index 46ad584f9c..1252ca9546 100644 * init flags and some related macros. * For majority of DPDK entities, it is not recommended to include * this file directly, use include <rte_ring.h> or <rte_ring_elem.h> +@@ -111,8 +111,8 @@ struct rte_ring_hts_headtail { + * An RTE ring structure. + * + * The producer and the consumer have a head and a tail index. The particularity +- * of these index is that they are not between 0 and size(ring). These indexes +- * are between 0 and 2^32, and we mask their value when we access the ring[] ++ * of these index is that they are not between 0 and size(ring)-1. These indexes ++ * are between 0 and 2^32 -1, and we mask their value when we access the ring[] + * field. Thanks to this assumption, we can do subtractions between 2 index + * values in a modulo-32bit base: that's why the overflow of the indexes is not + * a problem. +diff --git a/dpdk/lib/ring/rte_ring_elem.h b/dpdk/lib/ring/rte_ring_elem.h +index 4bd016c110..91f2b5ff2a 100644 +--- a/dpdk/lib/ring/rte_ring_elem.h ++++ b/dpdk/lib/ring/rte_ring_elem.h +@@ -96,7 +96,6 @@ ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count); + * On success, the pointer to the new allocated ring. NULL on error with + * rte_errno set appropriately. Possible errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure +- * - E_RTE_SECONDARY - function was called from a secondary process instance + * - EINVAL - esize is not a multiple of 4 or count provided is not a + * power of 2. + * - ENOSPC - the maximum number of memzones has already been allocated +diff --git a/dpdk/lib/ring/rte_ring_elem_pvt.h b/dpdk/lib/ring/rte_ring_elem_pvt.h +index 275ec55393..99786cca95 100644 +--- a/dpdk/lib/ring/rte_ring_elem_pvt.h ++++ b/dpdk/lib/ring/rte_ring_elem_pvt.h +@@ -10,6 +10,12 @@ + #ifndef _RTE_RING_ELEM_PVT_H_ + #define _RTE_RING_ELEM_PVT_H_ + ++#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) ++#pragma GCC diagnostic push ++#pragma GCC diagnostic ignored "-Wstringop-overflow" ++#pragma GCC diagnostic ignored "-Wstringop-overread" ++#endif ++ + static __rte_always_inline void + __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size, + uint32_t idx, const void *obj_table, uint32_t n) +@@ -188,12 +194,12 @@ __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size, + } + + static __rte_always_inline void +-__rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head, ++__rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t cons_head, + void *obj_table, uint32_t n) + { + unsigned int i; + const uint32_t size = r->size; +- uint32_t idx = prod_head & r->mask; ++ uint32_t idx = cons_head & r->mask; + uint64_t *ring = (uint64_t *)&r[1]; + unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table; + if (likely(idx + n < size)) { +@@ -221,12 +227,12 @@ __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head, + } + + static __rte_always_inline void +-__rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head, ++__rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t cons_head, + void *obj_table, uint32_t n) + { + unsigned int i; + const uint32_t size = r->size; +- uint32_t idx = prod_head & r->mask; ++ uint32_t idx = cons_head & r->mask; + rte_int128_t *ring = (rte_int128_t *)&r[1]; + rte_int128_t *obj = (rte_int128_t *)obj_table; + if (likely(idx + n < size)) { +@@ -382,4 +388,8 @@ __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table, + return n; + } + ++#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) ++#pragma GCC diagnostic pop ++#endif ++ + #endif /* _RTE_RING_ELEM_PVT_H_ */ diff --git a/dpdk/lib/sched/rte_pie.c b/dpdk/lib/sched/rte_pie.c index 934e9aee50..79db6e96b1 100644 --- a/dpdk/lib/sched/rte_pie.c @@ -50241,10 +92807,20 @@ index 36273cac64..f5843dab1b 100644 * Based on new queue average and RED configuration parameters * gives verdict whether to enqueue or drop the packet. diff --git a/dpdk/lib/sched/rte_sched.c b/dpdk/lib/sched/rte_sched.c -index ed44808f7b..62b3d2e315 100644 +index ed44808f7b..b9494ec31e 100644 --- a/dpdk/lib/sched/rte_sched.c +++ b/dpdk/lib/sched/rte_sched.c -@@ -239,7 +239,7 @@ struct rte_sched_port { +@@ -214,6 +214,9 @@ struct rte_sched_subport { + uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE]; + uint32_t qsize_sum; + ++ /* TC oversubscription activation */ ++ int tc_ov_enabled; ++ + struct rte_sched_pipe *pipe; + struct rte_sched_queue *queue; + struct rte_sched_queue_extra *queue_extra; +@@ -239,7 +242,7 @@ struct rte_sched_port { int socket; /* Timing */ @@ -50253,6 +92829,15 @@ index ed44808f7b..62b3d2e315 100644 uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */ uint64_t time; /* Current NIC TX time measured in bytes */ struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */ +@@ -1243,8 +1246,6 @@ rte_sched_subport_config(struct rte_sched_port *port, + + n_subports++; + +- subport_profile_id = 0; +- + /* Port */ + port->subports[subport_id] = s; + diff --git a/dpdk/lib/sched/rte_sched.h b/dpdk/lib/sched/rte_sched.h index 484dbdcc3d..3c625ba169 100644 --- a/dpdk/lib/sched/rte_sched.h @@ -50267,7 +92852,7 @@ index 484dbdcc3d..3c625ba169 100644 * @param port * Handle to port scheduler instance diff --git a/dpdk/lib/security/rte_security.h b/dpdk/lib/security/rte_security.h -index 1228b6c8b1..1a15e95267 100644 +index 1228b6c8b1..71af1bf3e3 100644 --- a/dpdk/lib/security/rte_security.h +++ b/dpdk/lib/security/rte_security.h @@ -301,9 +301,9 @@ struct rte_security_ipsec_lifetime { @@ -50282,6 +92867,46 @@ index 1228b6c8b1..1a15e95267 100644 }; /** +@@ -453,6 +453,7 @@ struct rte_security_docsis_xform { + /** + * Security session action type. + */ ++/* Enumeration of rte_security_session_action_type 8<*/ + enum rte_security_session_action_type { + RTE_SECURITY_ACTION_TYPE_NONE, + /**< No security actions */ +@@ -473,8 +474,10 @@ enum rte_security_session_action_type { + * protocol is processed synchronously by a CPU. + */ + }; ++/* >8 End enumeration of rte_security_session_action_type. */ + + /** Security session protocol definition */ ++/* Enumeration of rte_security_session_protocol 8<*/ + enum rte_security_session_protocol { + RTE_SECURITY_PROTOCOL_IPSEC = 1, + /**< IPsec Protocol */ +@@ -485,10 +488,12 @@ enum rte_security_session_protocol { + RTE_SECURITY_PROTOCOL_DOCSIS, + /**< DOCSIS Protocol */ + }; ++/* >8 End enumeration of rte_security_session_protocol. */ + + /** + * Security session configuration + */ ++/* Structure rte_security_session_conf 8< */ + struct rte_security_session_conf { + enum rte_security_session_action_type action_type; + /**< Type of action to be performed on the session */ +@@ -507,6 +512,7 @@ struct rte_security_session_conf { + void *userdata; + /**< Application specific userdata to be saved with session */ + }; ++/* >8 End of structure rte_security_session_conf. */ + + struct rte_security_session { + void *sess_private_data; diff --git a/dpdk/lib/stack/meson.build b/dpdk/lib/stack/meson.build index 2f53f49677..18177a742f 100644 --- a/dpdk/lib/stack/meson.build @@ -50305,6 +92930,19 @@ index f93e5f3f95..c1383c2e57 100644 * read operations: before a read operation with the source data likely not in * the CPU cache, the source data prefetch is issued and the table lookup * operation is postponed in favor of some other unrelated work, which the CPU +diff --git a/dpdk/lib/table/rte_swx_table_selector.c b/dpdk/lib/table/rte_swx_table_selector.c +index 541ebc2213..5d8e013286 100644 +--- a/dpdk/lib/table/rte_swx_table_selector.c ++++ b/dpdk/lib/table/rte_swx_table_selector.c +@@ -233,7 +233,7 @@ table_params_copy(struct table *t, struct rte_swx_table_selector_params *params) + t->params.n_members_per_group_max = rte_align32pow2(params->n_members_per_group_max); + + for (i = 0; i < 32; i++) +- if (params->n_members_per_group_max == 1U << i) ++ if (t->params.n_members_per_group_max == 1U << i) + t->n_members_per_group_max_log2 = i; + + /* t->params.selector_mask */ diff --git a/dpdk/lib/table/rte_swx_table_selector.h b/dpdk/lib/table/rte_swx_table_selector.h index 62988d2856..05863cc90b 100644 --- a/dpdk/lib/table/rte_swx_table_selector.h @@ -50411,17 +93049,27 @@ index c4c35cc06a..a962ec2f68 100644 k0 = k[0] & m[0]; diff --git a/dpdk/lib/telemetry/rte_telemetry.h b/dpdk/lib/telemetry/rte_telemetry.h -index 7bca8a9a49..3372b32f38 100644 +index 7bca8a9a49..0f24579235 100644 --- a/dpdk/lib/telemetry/rte_telemetry.h +++ b/dpdk/lib/telemetry/rte_telemetry.h -@@ -9,6 +9,10 @@ - #ifndef _RTE_TELEMETRY_H_ - #define _RTE_TELEMETRY_H_ +@@ -2,13 +2,17 @@ + * Copyright(c) 2018 Intel Corporation + */ ++#ifndef _RTE_TELEMETRY_H_ ++#define _RTE_TELEMETRY_H_ ++ +#ifdef __cplusplus +extern "C" { +#endif + + #include <stdint.h> + + #include <rte_compat.h> + +-#ifndef _RTE_TELEMETRY_H_ +-#define _RTE_TELEMETRY_H_ +- /** Maximum length for string used in object. */ #define RTE_TEL_MAX_STRING_LEN 128 /** Maximum length of string. */ @@ -50435,10 +93083,82 @@ index 7bca8a9a49..3372b32f38 100644 + #endif diff --git a/dpdk/lib/telemetry/telemetry.c b/dpdk/lib/telemetry/telemetry.c -index a7483167d4..e5ccfe47f7 100644 +index a7483167d4..e73f4a593e 100644 --- a/dpdk/lib/telemetry/telemetry.c +++ b/dpdk/lib/telemetry/telemetry.c -@@ -534,7 +534,7 @@ telemetry_legacy_init(void) +@@ -197,7 +197,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) + break; + case RTE_TEL_CONTAINER: + { +- char temp[buf_len]; ++ char *temp = malloc(buf_len); ++ if (temp == NULL) ++ break; ++ *temp = '\0'; /* ensure valid string */ ++ + const struct container *cont = + &v->value.container; + if (container_to_json(cont->data, +@@ -208,6 +212,7 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) + v->name, temp); + if (!cont->keep) + rte_tel_data_free(cont->data); ++ free(temp); + break; + } + } +@@ -264,7 +269,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + break; + case RTE_TEL_CONTAINER: + { +- char temp[buf_len]; ++ char *temp = malloc(buf_len); ++ if (temp == NULL) ++ break; ++ *temp = '\0'; /* ensure valid string */ ++ + const struct container *cont = + &v->value.container; + if (container_to_json(cont->data, +@@ -275,6 +284,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + v->name, temp); + if (!cont->keep) + rte_tel_data_free(cont->data); ++ free(temp); + } + } + } +@@ -306,7 +316,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + buf_len, used, + d->data.array[i].u64val); + else if (d->type == RTE_TEL_ARRAY_CONTAINER) { +- char temp[buf_len]; ++ char *temp = malloc(buf_len); ++ if (temp == NULL) ++ break; ++ *temp = '\0'; /* ensure valid string */ ++ + const struct container *rec_data = + &d->data.array[i].container; + if (container_to_json(rec_data->data, +@@ -316,6 +330,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + buf_len, used, temp); + if (!rec_data->keep) + rte_tel_data_free(rec_data->data); ++ free(temp); + } + used += prefix_used; + used += strlcat(out_buf + used, "}", sizeof(out_buf) - used); +@@ -328,7 +343,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) + static void + perform_command(telemetry_cb fn, const char *cmd, const char *param, int s) + { +- struct rte_tel_data data; ++ struct rte_tel_data data = {0}; + + int ret = fn(cmd, param, &data); + if (ret < 0) { +@@ -534,7 +549,7 @@ telemetry_legacy_init(void) } rc = pthread_create(&t_old, NULL, socket_listener, &v1_socket); if (rc != 0) { @@ -50460,6 +93180,60 @@ index f02a12f5b0..db70690274 100644 * This function is not for use for values larger than given buffer length. */ __rte_format_printf(3, 4) +diff --git a/dpdk/lib/timer/rte_timer.c b/dpdk/lib/timer/rte_timer.c +index 6d19ce469b..98c1941cb1 100644 +--- a/dpdk/lib/timer/rte_timer.c ++++ b/dpdk/lib/timer/rte_timer.c +@@ -587,7 +587,7 @@ rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks, + } + + static int +-__rte_timer_stop(struct rte_timer *tim, int local_is_locked, ++__rte_timer_stop(struct rte_timer *tim, + struct rte_timer_data *timer_data) + { + union rte_timer_status prev_status, status; +@@ -609,7 +609,7 @@ __rte_timer_stop(struct rte_timer *tim, int local_is_locked, + + /* remove it from list */ + if (prev_status.state == RTE_TIMER_PENDING) { +- timer_del(tim, prev_status, local_is_locked, priv_timer); ++ timer_del(tim, prev_status, 0, priv_timer); + __TIMER_STAT_ADD(priv_timer, pending, -1); + } + +@@ -638,7 +638,7 @@ rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim) + + TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL); + +- return __rte_timer_stop(tim, 0, timer_data); ++ return __rte_timer_stop(tim, timer_data); + } + + /* loop until rte_timer_stop() succeed */ +@@ -994,21 +994,16 @@ rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores, + walk_lcore = walk_lcores[i]; + priv_timer = &timer_data->priv_timer[walk_lcore]; + +- rte_spinlock_lock(&priv_timer->list_lock); +- + for (tim = priv_timer->pending_head.sl_next[0]; + tim != NULL; + tim = next_tim) { + next_tim = tim->sl_next[0]; + +- /* Call timer_stop with lock held */ +- __rte_timer_stop(tim, 1, timer_data); ++ __rte_timer_stop(tim, timer_data); + + if (f) + f(tim, f_arg); + } +- +- rte_spinlock_unlock(&priv_timer->list_lock); + } + + return 0; diff --git a/dpdk/lib/vhost/rte_vdpa.h b/dpdk/lib/vhost/rte_vdpa.h index 1437f400bf..6ac85d1bbf 100644 --- a/dpdk/lib/vhost/rte_vdpa.h @@ -50486,7 +93260,7 @@ index 1437f400bf..6ac85d1bbf 100644 + #endif /* _RTE_VDPA_H_ */ diff --git a/dpdk/lib/vhost/rte_vhost.h b/dpdk/lib/vhost/rte_vhost.h -index b454c05868..2acb31df2d 100644 +index b454c05868..1582617423 100644 --- a/dpdk/lib/vhost/rte_vhost.h +++ b/dpdk/lib/vhost/rte_vhost.h @@ -21,10 +21,12 @@ @@ -50502,8 +93276,30 @@ index b454c05868..2acb31df2d 100644 #define RTE_VHOST_USER_CLIENT (1ULL << 0) #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1) +@@ -916,6 +918,21 @@ rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx, + */ + int rte_vhost_vring_call(int vid, uint16_t vring_idx); + ++/** ++ * Notify the guest that used descriptors have been added to the vring. This ++ * function acts as a memory barrier. This function will return -EAGAIN when ++ * vq's access lock is held by other thread, user should try again later. ++ * ++ * @param vid ++ * vhost device ID ++ * @param vring_idx ++ * vring index ++ * @return ++ * 0 on success, -1 on failure, -EAGAIN for another retry ++ */ ++__rte_experimental ++int rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx); ++ + /** + * Get vhost RX queue avail count. + * diff --git a/dpdk/lib/vhost/rte_vhost_async.h b/dpdk/lib/vhost/rte_vhost_async.h -index a87ea6ba37..d20152ca7a 100644 +index a87ea6ba37..159b9b0889 100644 --- a/dpdk/lib/vhost/rte_vhost_async.h +++ b/dpdk/lib/vhost/rte_vhost_async.h @@ -5,6 +5,10 @@ @@ -50517,6 +93313,45 @@ index a87ea6ba37..d20152ca7a 100644 #include "rte_vhost.h" /** +@@ -43,15 +47,15 @@ struct rte_vhost_async_channel_ops { + /** + * instruct async engines to perform copies for a batch of packets + * +- * @param vid ++ * vid + * id of vhost device to perform data copies +- * @param queue_id ++ * queue_id + * queue id to perform data copies +- * @param iov_iter ++ * iov_iter + * an array of IOV iterators +- * @param opaque_data ++ * opaque_data + * opaque data pair sending to DMA engine +- * @param count ++ * count + * number of elements in the "descs" array + * @return + * number of IOV iterators processed, negative value means error +@@ -62,13 +66,13 @@ struct rte_vhost_async_channel_ops { + uint16_t count); + /** + * check copy-completed packets from the async engine +- * @param vid ++ * vid + * id of vhost device to check copy completion +- * @param queue_id ++ * queue_id + * queue id to check copy completion +- * @param opaque_data ++ * opaque_data + * buffer to receive the opaque data pair from DMA engine +- * @param max_packets ++ * max_packets + * max number of packets could be completed + * @return + * number of async descs completed, negative value means error @@ -242,4 +246,8 @@ __rte_experimental uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count); @@ -50551,10 +93386,26 @@ index f54d731139..b49e389579 100644 + #endif /**< _VHOST_CRYPTO_H_ */ diff --git a/dpdk/lib/vhost/socket.c b/dpdk/lib/vhost/socket.c -index 82963c1e6d..33f54a779b 100644 +index 82963c1e6d..a34aebd50c 100644 --- a/dpdk/lib/vhost/socket.c +++ b/dpdk/lib/vhost/socket.c -@@ -501,7 +501,7 @@ vhost_user_reconnect_init(void) +@@ -128,10 +128,12 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds, + return ret; + } + +- if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) { ++ if (msgh.msg_flags & MSG_TRUNC) + VHOST_LOG_CONFIG(ERR, "truncated msg\n"); +- return -1; +- } ++ ++ /* MSG_CTRUNC may be caused by LSM misconfiguration */ ++ if (msgh.msg_flags & MSG_CTRUNC) ++ VHOST_LOG_CONFIG(ERR, "truncated control data (fd %d)\n", sockfd); + + for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL; + cmsg = CMSG_NXTHDR(&msgh, cmsg)) { +@@ -501,7 +503,7 @@ vhost_user_reconnect_init(void) ret = pthread_mutex_init(&reconn_list.mutex, NULL); if (ret < 0) { @@ -50563,7 +93414,7 @@ index 82963c1e6d..33f54a779b 100644 return ret; } TAILQ_INIT(&reconn_list.head); -@@ -509,10 +509,10 @@ vhost_user_reconnect_init(void) +@@ -509,10 +511,10 @@ vhost_user_reconnect_init(void) ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL, vhost_user_client_reconnect, NULL); if (ret != 0) { @@ -50576,7 +93427,7 @@ index 82963c1e6d..33f54a779b 100644 } } -@@ -1147,7 +1147,7 @@ rte_vhost_driver_start(const char *path) +@@ -1147,7 +1149,7 @@ rte_vhost_driver_start(const char *path) &vhost_user.fdset); if (ret != 0) { VHOST_LOG_CONFIG(ERR, @@ -50610,10 +93461,14 @@ index fc2d6acedd..7ba9e28e57 100644 + #endif /* _VDPA_DRIVER_H_ */ diff --git a/dpdk/lib/vhost/version.map b/dpdk/lib/vhost/version.map -index a7ef7f1976..0f315ed2a5 100644 +index a7ef7f1976..2cd6ea8a29 100644 --- a/dpdk/lib/vhost/version.map +++ b/dpdk/lib/vhost/version.map -@@ -87,7 +87,7 @@ EXPERIMENTAL { +@@ -84,10 +84,11 @@ EXPERIMENTAL { + + # added in 21.11 + rte_vhost_get_monitor_addr; ++ rte_vhost_vring_call_nonblock; }; INTERNAL { @@ -50623,26 +93478,135 @@ index a7ef7f1976..0f315ed2a5 100644 rte_vdpa_register_device; rte_vdpa_relay_vring_used; diff --git a/dpdk/lib/vhost/vhost.c b/dpdk/lib/vhost/vhost.c -index 13a9bb9dd1..24f94495c6 100644 +index 13a9bb9dd1..95940d542c 100644 --- a/dpdk/lib/vhost/vhost.c +++ b/dpdk/lib/vhost/vhost.c -@@ -1299,11 +1299,15 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx) +@@ -1287,6 +1287,7 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx) + { + struct virtio_net *dev; + struct vhost_virtqueue *vq; ++ int ret = 0; + + dev = get_device(vid); + if (!dev) +@@ -1299,12 +1300,59 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx) if (!vq) return -1; + rte_spinlock_lock(&vq->access_lock); + ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ if (vq_is_packed(dev)) vhost_vring_call_packed(dev, vq); else vhost_vring_call_split(dev, vq); +- return 0; ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + - return 0; ++ return ret; ++} ++ ++int ++rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx) ++{ ++ struct virtio_net *dev; ++ struct vhost_virtqueue *vq; ++ int ret = 0; ++ ++ dev = get_device(vid); ++ if (!dev) ++ return -1; ++ ++ if (vring_idx >= VHOST_MAX_VRING) ++ return -1; ++ ++ vq = dev->virtqueue[vring_idx]; ++ if (!vq) ++ return -1; ++ ++ if (!rte_spinlock_trylock(&vq->access_lock)) ++ return -EAGAIN; ++ ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ ++ if (vq_is_packed(dev)) ++ vhost_vring_call_packed(dev, vq); ++ else ++ vhost_vring_call_split(dev, vq); ++ ++out_unlock: ++ rte_spinlock_unlock(&vq->access_lock); ++ ++ return ret; } -@@ -1779,26 +1783,22 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) + uint16_t +@@ -1327,7 +1375,10 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id) + + rte_spinlock_lock(&vq->access_lock); + +- if (unlikely(!vq->enabled || vq->avail == NULL)) ++ if (unlikely(!vq->access_ok)) ++ goto out; ++ ++ if (unlikely(!vq->enabled)) + goto out; + + ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; +@@ -1419,9 +1470,15 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) + + rte_spinlock_lock(&vq->access_lock); + ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + vq->notif_enable = enable; + ret = vhost_enable_guest_notification(dev, vq, enable); + ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +@@ -1481,7 +1538,10 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) + + rte_spinlock_lock(&vq->access_lock); + +- if (unlikely(!vq->enabled || vq->avail == NULL)) ++ if (unlikely(!vq->access_ok)) ++ goto out; ++ ++ if (unlikely(!vq->enabled)) + goto out; + + ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; +@@ -1722,7 +1782,15 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, + return -1; + + rte_spinlock_lock(&vq->access_lock); ++ ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + ret = async_channel_register(vid, queue_id, ops); ++ ++out_unlock: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +@@ -1779,26 +1847,28 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) if (vq == NULL) return ret; @@ -50659,6 +93623,11 @@ index 13a9bb9dd1..24f94495c6 100644 } - if (vq->async->pkts_inflight_n) { ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (!vq->async) { + ret = 0; + } else if (vq->async->pkts_inflight_n) { @@ -50673,10 +93642,11 @@ index 13a9bb9dd1..24f94495c6 100644 - vhost_free_async_mem(vq); -out: ++out_unlock: rte_spinlock_unlock(&vq->access_lock); return ret; -@@ -1853,16 +1853,15 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id) +@@ -1853,16 +1923,21 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id) if (vq == NULL) return ret; @@ -50690,14 +93660,54 @@ index 13a9bb9dd1..24f94495c6 100644 } - ret = vq->async->pkts_inflight_n; ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (vq->async) + ret = vq->async->pkts_inflight_n; + ++out_unlock: rte_spinlock_unlock(&vq->access_lock); return ret; +@@ -1874,6 +1949,7 @@ rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + { + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; ++ int ret = 0; + + if (dev == NULL) + return -1; +@@ -1884,6 +1960,13 @@ rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + if (vq == NULL) + return -1; + ++ rte_spinlock_lock(&vq->access_lock); ++ ++ if (unlikely(!vq->access_ok)) { ++ ret = -1; ++ goto out_unlock; ++ } ++ + if (vq_is_packed(dev)) { + struct vring_packed_desc *desc; + desc = vq->desc_packed; +@@ -1903,7 +1986,10 @@ rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + pmc->match = 0; + } + +- return 0; ++out_unlock: ++ rte_spinlock_unlock(&vq->access_lock); ++ ++ return ret; + } + + RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO); diff --git a/dpdk/lib/vhost/vhost.h b/dpdk/lib/vhost/vhost.h -index 7085e0885c..d4586f3341 100644 +index 7085e0885c..b9c387882c 100644 --- a/dpdk/lib/vhost/vhost.h +++ b/dpdk/lib/vhost/vhost.h @@ -354,7 +354,8 @@ struct vring_packed_desc_event { @@ -50772,6 +93782,40 @@ index 7085e0885c..d4586f3341 100644 } } } +@@ -671,7 +686,10 @@ hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len) + static __rte_always_inline struct virtio_net * + get_device(int vid) + { +- struct virtio_net *dev = vhost_devices[vid]; ++ struct virtio_net *dev = NULL; ++ ++ if (likely(vid >= 0 && vid < MAX_VHOST_DEVICE)) ++ dev = vhost_devices[vid]; + + if (unlikely(!dev)) { + VHOST_LOG_CONFIG(ERR, +@@ -768,9 +786,9 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) + vhost_used_event(vq), + old, new); + +- if ((vhost_need_event(vhost_used_event(vq), new, old) && +- (vq->callfd >= 0)) || +- unlikely(!signalled_used_valid)) { ++ if ((vhost_need_event(vhost_used_event(vq), new, old) || ++ unlikely(!signalled_used_valid)) && ++ vq->callfd >= 0) { + eventfd_write(vq->callfd, (eventfd_t) 1); + if (dev->notify_ops->guest_notified) + dev->notify_ops->guest_notified(dev->vid); +@@ -833,7 +851,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) + if (vhost_need_event(off, new, old)) + kick = true; + kick: +- if (kick) { ++ if (kick && vq->callfd >= 0) { + eventfd_write(vq->callfd, (eventfd_t)1); + if (dev->notify_ops->guest_notified) + dev->notify_ops->guest_notified(dev->vid); diff --git a/dpdk/lib/vhost/vhost_crypto.c b/dpdk/lib/vhost/vhost_crypto.c index 926b5c0bd9..7d1d6a1861 100644 --- a/dpdk/lib/vhost/vhost_crypto.c @@ -50952,7 +93996,7 @@ index 926b5c0bd9..7d1d6a1861 100644 goto error_exit; } diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c -index a781346c4d..2b45e35d4a 100644 +index a781346c4d..bf489cb13b 100644 --- a/dpdk/lib/vhost/vhost_user.c +++ b/dpdk/lib/vhost/vhost_user.c @@ -143,57 +143,59 @@ get_blk_size(int fd) @@ -51232,6 +94276,24 @@ index a781346c4d..2b45e35d4a 100644 fd = msg->fds[0]; if (msg->size != sizeof(msg->payload.inflight) || fd < 0) { VHOST_LOG_CONFIG(ERR, +@@ -1841,7 +1847,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused, + + if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)) + close(msg->fds[0]); +- VHOST_LOG_CONFIG(INFO, "not implemented\n"); ++ VHOST_LOG_CONFIG(DEBUG, "not implemented\n"); + + return RTE_VHOST_MSG_RESULT_OK; + } +@@ -2372,7 +2378,7 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, + return RTE_VHOST_MSG_RESULT_ERR; + + close(msg->fds[0]); +- VHOST_LOG_CONFIG(INFO, "not implemented.\n"); ++ VHOST_LOG_CONFIG(DEBUG, "not implemented.\n"); + + return RTE_VHOST_MSG_RESULT_OK; + } @@ -2566,8 +2572,12 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, vhost_user_iotlb_cache_insert(vq, imsg->iova, vva, len, imsg->perm); @@ -51259,7 +94321,53 @@ index a781346c4d..2b45e35d4a 100644 } break; default: -@@ -2873,6 +2886,9 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, +@@ -2783,30 +2796,37 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg) + + ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE, + msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num); +- if (ret <= 0) { +- return ret; +- } else if (ret != VHOST_USER_HDR_SIZE) { ++ if (ret <= 0) ++ goto out; ++ ++ if (ret != VHOST_USER_HDR_SIZE) { + VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n"); +- close_msg_fds(msg); +- return -1; ++ ret = -1; ++ goto out; + } + + if (msg->size) { + if (msg->size > sizeof(msg->payload)) { + VHOST_LOG_CONFIG(ERR, + "invalid msg size: %d\n", msg->size); +- return -1; ++ ret = -1; ++ goto out; + } + ret = read(sockfd, &msg->payload, msg->size); + if (ret <= 0) +- return ret; ++ goto out; + if (ret != (int)msg->size) { + VHOST_LOG_CONFIG(ERR, + "read control message failed\n"); +- return -1; ++ ret = -1; ++ goto out; + } + } + ++out: ++ if (ret <= 0) ++ close_msg_fds(msg); ++ + return ret; + } + +@@ -2873,6 +2893,9 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, case VHOST_USER_SET_VRING_ADDR: vring_idx = msg->payload.addr.index; break; @@ -51269,7 +94377,7 @@ index a781346c4d..2b45e35d4a 100644 default: return 0; } -@@ -2961,7 +2977,6 @@ vhost_user_msg_handler(int vid, int fd) +@@ -2961,7 +2984,6 @@ vhost_user_msg_handler(int vid, int fd) return -1; } @@ -51277,7 +94385,7 @@ index a781346c4d..2b45e35d4a 100644 request = msg.request.master; if (request > VHOST_USER_NONE && request < VHOST_USER_MAX && vhost_message_str[request]) { -@@ -3103,9 +3118,11 @@ vhost_user_msg_handler(int vid, int fd) +@@ -3103,9 +3125,11 @@ vhost_user_msg_handler(int vid, int fd) } else if (ret == RTE_VHOST_MSG_RESULT_ERR) { VHOST_LOG_CONFIG(ERR, "vhost message handling failed.\n"); @@ -51290,7 +94398,7 @@ index a781346c4d..2b45e35d4a 100644 for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; bool cur_ready = vq_is_ready(dev, vq); -@@ -3116,10 +3133,11 @@ vhost_user_msg_handler(int vid, int fd) +@@ -3116,10 +3140,11 @@ vhost_user_msg_handler(int vid, int fd) } } @@ -51303,7 +94411,7 @@ index a781346c4d..2b45e35d4a 100644 goto out; /* -@@ -3146,7 +3164,7 @@ vhost_user_msg_handler(int vid, int fd) +@@ -3146,7 +3171,7 @@ vhost_user_msg_handler(int vid, int fd) } out: @@ -51313,7 +94421,7 @@ index a781346c4d..2b45e35d4a 100644 static int process_slave_message_reply(struct virtio_net *dev, diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c -index b3d954aab4..bf4d75b4bd 100644 +index b3d954aab4..b211799687 100644 --- a/dpdk/lib/vhost/virtio_net.c +++ b/dpdk/lib/vhost/virtio_net.c @@ -415,6 +415,16 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) @@ -51333,6 +94441,15 @@ index b3d954aab4..bf4d75b4bd 100644 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len; +@@ -589,7 +599,7 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + */ + static inline int + reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +- uint32_t size, struct buf_vector *buf_vec, ++ uint64_t size, struct buf_vector *buf_vec, + uint16_t *num_buffers, uint16_t avail_head, + uint16_t *nr_vec) + { @@ -870,20 +880,21 @@ async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq, struct vhost_async *async = vq->async; uint64_t mapped_len; @@ -51360,7 +94477,72 @@ index b3d954aab4..bf4d75b4bd 100644 return -1; cpy_len -= (uint32_t)mapped_len; -@@ -1900,16 +1911,22 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, +@@ -1058,7 +1069,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev, + uint16_t buf_id = 0; + uint32_t len = 0; + uint16_t desc_count; +- uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf); ++ uint64_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf); + uint16_t num_buffers = 0; + uint32_t buffer_len[vq->size]; + uint16_t buffer_buf_id[vq->size]; +@@ -1126,7 +1137,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); + + for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { +- uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; ++ uint64_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; + uint16_t nr_vec = 0; + + if (unlikely(reserve_avail_buf_split(dev, vq, +@@ -1235,6 +1246,12 @@ virtio_dev_rx_batch_packed_copy(struct virtio_net *dev, + sizeof(struct virtio_net_hdr_mrg_rxbuf); + } + ++ if (rxvq_is_mergeable(dev)) { ++ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { ++ ASSIGN_UNLESS_EQUAL(hdrs[i]->num_buffers, 1); ++ } ++ } ++ + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr); + +@@ -1474,7 +1491,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, + async_iter_reset(async); + + for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { +- uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; ++ uint64_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; + uint16_t nr_vec = 0; + + if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec, +@@ -1564,7 +1581,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev, + uint16_t buf_id = 0; + uint32_t len = 0; + uint16_t desc_count = 0; +- uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf); ++ uint64_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf); + uint32_t buffer_len[vq->size]; + uint16_t buffer_buf_id[vq->size]; + uint16_t buffer_desc_count[vq->size]; +@@ -1661,7 +1678,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, + struct rte_mbuf **pkts, uint32_t count) + { + uint32_t pkt_idx = 0; +- uint32_t remained = count; + int32_t n_xfer; + uint16_t num_buffers; + uint16_t num_descs; +@@ -1687,7 +1703,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, + pkts_info[slot_idx].mbuf = pkts[pkt_idx]; + + pkt_idx++; +- remained--; + vq_inc_last_avail_packed(vq, num_descs); + } while (pkt_idx < count); + +@@ -1900,16 +1915,22 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, vq = dev->virtqueue[queue_id]; @@ -51386,7 +94568,7 @@ index b3d954aab4..bf4d75b4bd 100644 rte_spinlock_unlock(&vq->access_lock); return n_pkts_cpl; -@@ -2305,25 +2322,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2305,25 +2326,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t buf_avail, buf_offset; uint64_t buf_addr, buf_len; uint32_t mbuf_avail, mbuf_offset; @@ -51419,7 +94601,7 @@ index b3d954aab4..bf4d75b4bd 100644 /* * No luck, the virtio-net header doesn't fit * in a contiguous virtual area. -@@ -2331,34 +2345,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2331,34 +2349,22 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec); hdr = &tmp_hdr; } else { @@ -51464,7 +94646,7 @@ index b3d954aab4..bf4d75b4bd 100644 PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset), (uint32_t)buf_avail, 0); -@@ -2551,6 +2553,14 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2551,6 +2557,14 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, update_shadow_used_ring_split(vq, head_idx, 0); @@ -51479,7 +94661,7 @@ index b3d954aab4..bf4d75b4bd 100644 err = virtio_dev_pktmbuf_prep(dev, pkts[i], buf_len); if (unlikely(err)) { /* -@@ -2754,6 +2764,11 @@ vhost_dequeue_single_packed(struct virtio_net *dev, +@@ -2754,6 +2768,11 @@ vhost_dequeue_single_packed(struct virtio_net *dev, VHOST_ACCESS_RO) < 0)) return -1; @@ -51492,10 +94674,15 @@ index b3d954aab4..bf4d75b4bd 100644 if (!allocerr_warned) { VHOST_LOG_DATA(ERR, diff --git a/dpdk/meson.build b/dpdk/meson.build -index 12cb6e0e83..21dc51f00d 100644 +index 12cb6e0e83..19234656fb 100644 --- a/dpdk/meson.build +++ b/dpdk/meson.build -@@ -5,7 +5,7 @@ project('DPDK', 'C', +@@ -1,11 +1,11 @@ + # SPDX-License-Identifier: BSD-3-Clause + # Copyright(c) 2017-2019 Intel Corporation + +-project('DPDK', 'C', ++project('DPDK', 'c', # Get version number from file. # Fallback to "more" for Windows compatibility. version: run_command(find_program('cat', 'more'), @@ -51504,6 +94691,15 @@ index 12cb6e0e83..21dc51f00d 100644 license: 'BSD', default_options: ['buildtype=release', 'default_library=static'], meson_version: '>= 0.49.2' +@@ -16,7 +16,7 @@ developer_mode = false + if get_option('developer_mode').auto() + if meson.version().version_compare('>=0.53') # fs module available + fs = import('fs') +- developer_mode = fs.is_dir('.git') ++ developer_mode = fs.exists('.git') + endif + else + developer_mode = get_option('developer_mode').enabled() @@ -27,6 +27,8 @@ endif # set up some global vars for compiler, platform, configuration, etc. @@ -52390,6 +95586,74 @@ index a23cdc4ade..087656729a 100644 # _server IDL OVSIDL_BUILT += lib/ovsdb-server-idl.c lib/ovsdb-server-idl.h lib/ovsdb-server-idl.ovsidl +diff --git a/lib/backtrace.h b/lib/backtrace.h +index 5708bf9c68..1a5b879944 100644 +--- a/lib/backtrace.h ++++ b/lib/backtrace.h +@@ -26,7 +26,7 @@ + #endif + + /* log_backtrace() will save the backtrace of a running program +- * into the log at the DEBUG level. ++ * into the log at the ERROR level. + * + * To use it, insert the following code to where backtrace is + * desired: +diff --git a/lib/bfd.c b/lib/bfd.c +index 9698576d07..9af258917b 100644 +--- a/lib/bfd.c ++++ b/lib/bfd.c +@@ -586,7 +586,6 @@ bfd_put_packet(struct bfd *bfd, struct dp_packet *p, + { + long long int min_tx, min_rx; + struct udp_header *udp; +- struct eth_header *eth; + struct ip_header *ip; + struct msg *msg; + +@@ -605,15 +604,13 @@ bfd_put_packet(struct bfd *bfd, struct dp_packet *p, + * set. */ + ovs_assert(!(bfd->flags & FLAG_POLL) || !(bfd->flags & FLAG_FINAL)); + +- dp_packet_reserve(p, 2); /* Properly align after the ethernet header. */ +- eth = dp_packet_put_uninit(p, sizeof *eth); +- eth->eth_src = eth_addr_is_zero(bfd->local_eth_src) +- ? eth_src : bfd->local_eth_src; +- eth->eth_dst = eth_addr_is_zero(bfd->local_eth_dst) +- ? eth_addr_bfd : bfd->local_eth_dst; +- eth->eth_type = htons(ETH_TYPE_IP); ++ ip = eth_compose(p, ++ eth_addr_is_zero(bfd->local_eth_dst) ++ ? eth_addr_bfd : bfd->local_eth_dst, ++ eth_addr_is_zero(bfd->local_eth_src) ++ ? eth_src : bfd->local_eth_src, ++ ETH_TYPE_IP, sizeof *ip + sizeof *udp + sizeof *msg); + +- ip = dp_packet_put_zeros(p, sizeof *ip); + ip->ip_ihl_ver = IP_IHL_VER(5, 4); + ip->ip_tot_len = htons(sizeof *ip + sizeof *udp + sizeof *msg); + ip->ip_ttl = MAXTTL; +@@ -621,15 +618,17 @@ bfd_put_packet(struct bfd *bfd, struct dp_packet *p, + ip->ip_proto = IPPROTO_UDP; + put_16aligned_be32(&ip->ip_src, bfd->ip_src); + put_16aligned_be32(&ip->ip_dst, bfd->ip_dst); +- /* Checksum has already been zeroed by put_zeros call. */ ++ /* Checksum has already been zeroed by eth_compose call. */ + ip->ip_csum = csum(ip, sizeof *ip); ++ dp_packet_set_l4(p, ip + 1); + +- udp = dp_packet_put_zeros(p, sizeof *udp); ++ udp = dp_packet_l4(p); + udp->udp_src = htons(bfd->udp_src); + udp->udp_dst = htons(BFD_DEST_PORT); + udp->udp_len = htons(sizeof *udp + sizeof *msg); ++ /* Checksum already zero from eth_compose. */ + +- msg = dp_packet_put_uninit(p, sizeof *msg); ++ msg = (struct msg *)(udp + 1); + msg->vers_diag = (BFD_VERSION << 5) | bfd->diag; + msg->flags = (bfd->state & STATE_MASK) | bfd->flags; + diff --git a/lib/cfm.c b/lib/cfm.c index cc43e70e31..c3742f3de2 100644 --- a/lib/cfm.c @@ -55867,6 +99131,34 @@ index 720c73d940..27a829656b 100644 } return json_array_create(elems, array->n); } +diff --git a/lib/jsonrpc.c b/lib/jsonrpc.c +index c8ce5362e1..3db5f76e28 100644 +--- a/lib/jsonrpc.c ++++ b/lib/jsonrpc.c +@@ -221,19 +221,19 @@ jsonrpc_log_msg(const struct jsonrpc *rpc, const char *title, + } + if (msg->params) { + ds_put_cstr(&s, ", params="); +- json_to_ds(msg->params, 0, &s); ++ json_to_ds(msg->params, JSSF_SORT, &s); + } + if (msg->result) { + ds_put_cstr(&s, ", result="); +- json_to_ds(msg->result, 0, &s); ++ json_to_ds(msg->result, JSSF_SORT, &s); + } + if (msg->error) { + ds_put_cstr(&s, ", error="); +- json_to_ds(msg->error, 0, &s); ++ json_to_ds(msg->error, JSSF_SORT, &s); + } + if (msg->id) { + ds_put_cstr(&s, ", id="); +- json_to_ds(msg->id, 0, &s); ++ json_to_ds(msg->id, JSSF_SORT, &s); + } + VLOG_DBG("%s: %s %s%s", rpc->name, title, + jsonrpc_msg_type_to_string(msg->type), ds_cstr(&s)); diff --git a/lib/lacp.c b/lib/lacp.c index 89d711225f..3252f17ebf 100644 --- a/lib/lacp.c @@ -56344,7 +99636,7 @@ index 785cda4c27..dd317ea52e 100644 hmap_remove(&map->by_number, &node->number_node); free(node->name); diff --git a/lib/netdev-afxdp.c b/lib/netdev-afxdp.c -index 482400d8d1..ca3f2431ea 100644 +index 482400d8d1..4d57efa5ce 100644 --- a/lib/netdev-afxdp.c +++ b/lib/netdev-afxdp.c @@ -235,11 +235,11 @@ netdev_afxdp_cleanup_unused_pool(struct unused_pool *pool) @@ -56361,6 +99653,29 @@ index 482400d8d1..ca3f2431ea 100644 count = umem_pool_count(&pool->umem_info->mpool); ovs_assert(count + pool->lost_in_rings <= NUM_FRAMES); +@@ -868,9 +868,22 @@ netdev_afxdp_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch, + OVS_XDP_HEADROOM); + dp_packet_set_size(packet, len); + ++#if __GNUC__ >= 11 && !__clang__ ++ /* GCC 11+ generates a false-positive warning about free() being ++ * called on DPBUF_AFXDP packet, but it is an imposisible code path. ++ * Disabling a warning to avoid build failures. ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108187 */ ++#pragma GCC diagnostic push ++#pragma GCC diagnostic ignored "-Wfree-nonheap-object" ++#endif ++ + /* Add packet into batch, increase batch->count. */ + dp_packet_batch_add(batch, packet); + ++#if __GNUC__ && !__clang__ ++#pragma GCC diagnostic pop ++#endif ++ + idx_rx++; + } + /* Release the RX queue. */ diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index b6b29c75e3..dbdd540d09 100644 --- a/lib/netdev-dpdk.c @@ -56795,6 +100110,224 @@ index b6b29c75e3..dbdd540d09 100644 hmap_remove(&policer->queues, &queue->hmap_node); free(queue); } +diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c +index 72cb954711..1c61535068 100644 +--- a/lib/netdev-dummy.c ++++ b/lib/netdev-dummy.c +@@ -39,6 +39,7 @@ + #include "pcap-file.h" + #include "openvswitch/poll-loop.h" + #include "openvswitch/shash.h" ++#include "ovs-router.h" + #include "sset.h" + #include "stream.h" + #include "unaligned.h" +@@ -136,8 +137,7 @@ struct netdev_dummy { + + struct pcap_file *tx_pcap, *rxq_pcap OVS_GUARDED; + +- struct in_addr address, netmask; +- struct in6_addr ipv6, ipv6_mask; ++ struct ovs_list addrs OVS_GUARDED; + struct ovs_list rxes OVS_GUARDED; /* List of child "netdev_rxq_dummy"s. */ + + struct hmap offloaded_flows OVS_GUARDED; +@@ -161,6 +161,12 @@ struct netdev_rxq_dummy { + struct seq *seq; /* Reports newly queued packets. */ + }; + ++struct netdev_addr_dummy { ++ struct in6_addr address; ++ struct in6_addr netmask; ++ struct ovs_list node; /* In netdev_dummy's "addrs" list. */ ++}; ++ + static unixctl_cb_func netdev_dummy_set_admin_state; + static int netdev_dummy_construct(struct netdev *); + static void netdev_dummy_queue_packet(struct netdev_dummy *, +@@ -169,6 +175,7 @@ static void netdev_dummy_queue_packet(struct netdev_dummy *, + static void dummy_packet_stream_close(struct dummy_packet_stream *); + + static void pkt_list_delete(struct ovs_list *); ++static void addr_list_delete(struct ovs_list *); + + static bool + is_dummy_class(const struct netdev_class *class) +@@ -720,6 +727,7 @@ netdev_dummy_construct(struct netdev *netdev_) + dummy_packet_conn_init(&netdev->conn); + + ovs_list_init(&netdev->rxes); ++ ovs_list_init(&netdev->addrs); + hmap_init(&netdev->offloaded_flows); + ovs_mutex_unlock(&netdev->mutex); + +@@ -756,6 +764,7 @@ netdev_dummy_destruct(struct netdev *netdev_) + free(off_flow); + } + hmap_destroy(&netdev->offloaded_flows); ++ addr_list_delete(&netdev->addrs); + + ovs_mutex_unlock(&netdev->mutex); + ovs_mutex_destroy(&netdev->mutex); +@@ -803,32 +812,24 @@ netdev_dummy_get_addr_list(const struct netdev *netdev_, struct in6_addr **paddr + struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); + int cnt = 0, i = 0, err = 0; + struct in6_addr *addr, *mask; ++ struct netdev_addr_dummy *addr_dummy; + + ovs_mutex_lock(&netdev->mutex); +- if (netdev->address.s_addr != INADDR_ANY) { +- cnt++; +- } + +- if (ipv6_addr_is_set(&netdev->ipv6)) { +- cnt++; +- } ++ cnt = ovs_list_size(&netdev->addrs); + if (!cnt) { + err = EADDRNOTAVAIL; + goto out; + } + addr = xmalloc(sizeof *addr * cnt); + mask = xmalloc(sizeof *mask * cnt); +- if (netdev->address.s_addr != INADDR_ANY) { +- in6_addr_set_mapped_ipv4(&addr[i], netdev->address.s_addr); +- in6_addr_set_mapped_ipv4(&mask[i], netdev->netmask.s_addr); +- i++; +- } + +- if (ipv6_addr_is_set(&netdev->ipv6)) { +- memcpy(&addr[i], &netdev->ipv6, sizeof *addr); +- memcpy(&mask[i], &netdev->ipv6_mask, sizeof *mask); ++ LIST_FOR_EACH (addr_dummy, node, &netdev->addrs) { ++ memcpy(&addr[i], &addr_dummy->address, sizeof *addr); ++ memcpy(&mask[i], &addr_dummy->netmask, sizeof *mask); + i++; + } ++ + if (paddr) { + *paddr = addr; + *pmask = mask; +@@ -844,14 +845,16 @@ out: + } + + static int +-netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address, ++netdev_dummy_add_in4(struct netdev *netdev_, struct in_addr address, + struct in_addr netmask) + { + struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); ++ struct netdev_addr_dummy *addr_dummy = xmalloc(sizeof *addr_dummy); + + ovs_mutex_lock(&netdev->mutex); +- netdev->address = address; +- netdev->netmask = netmask; ++ in6_addr_set_mapped_ipv4(&addr_dummy->address, address.s_addr); ++ in6_addr_set_mapped_ipv4(&addr_dummy->netmask, netmask.s_addr); ++ ovs_list_push_back(&netdev->addrs, &addr_dummy->node); + netdev_change_seq_changed(netdev_); + ovs_mutex_unlock(&netdev->mutex); + +@@ -859,14 +862,16 @@ netdev_dummy_set_in4(struct netdev *netdev_, struct in_addr address, + } + + static int +-netdev_dummy_set_in6(struct netdev *netdev_, struct in6_addr *in6, ++netdev_dummy_add_in6(struct netdev *netdev_, struct in6_addr *in6, + struct in6_addr *mask) + { + struct netdev_dummy *netdev = netdev_dummy_cast(netdev_); ++ struct netdev_addr_dummy *addr_dummy = xmalloc(sizeof *addr_dummy); + + ovs_mutex_lock(&netdev->mutex); +- netdev->ipv6 = *in6; +- netdev->ipv6_mask = *mask; ++ addr_dummy->address = *in6; ++ addr_dummy->netmask = *mask; ++ ovs_list_push_back(&netdev->addrs, &addr_dummy->node); + netdev_change_seq_changed(netdev_); + ovs_mutex_unlock(&netdev->mutex); + +@@ -1178,7 +1183,10 @@ netdev_dummy_send(struct netdev *netdev, int qid, + dummy_packet_conn_send(&dev->conn, buffer, size); + + /* Reply to ARP requests for 'dev''s assigned IP address. */ +- if (dev->address.s_addr) { ++ struct netdev_addr_dummy *addr_dummy; ++ LIST_FOR_EACH (addr_dummy, node, &dev->addrs) { ++ ovs_be32 address = in6_addr_get_mapped_ipv4(&addr_dummy->address); ++ + struct dp_packet dp; + struct flow flow; + +@@ -1186,11 +1194,12 @@ netdev_dummy_send(struct netdev *netdev, int qid, + flow_extract(&dp, &flow); + if (flow.dl_type == htons(ETH_TYPE_ARP) + && flow.nw_proto == ARP_OP_REQUEST +- && flow.nw_dst == dev->address.s_addr) { ++ && flow.nw_dst == address) { + struct dp_packet *reply = dp_packet_new(0); + compose_arp(reply, ARP_OP_REPLY, dev->hwaddr, flow.dl_src, + false, flow.nw_dst, flow.nw_src); + netdev_dummy_queue_packet(dev, reply, NULL, 0); ++ break; + } + } + +@@ -1677,6 +1686,16 @@ pkt_list_delete(struct ovs_list *l) + } + } + ++static void ++addr_list_delete(struct ovs_list *l) ++{ ++ struct netdev_addr_dummy *addr_dummy; ++ ++ LIST_FOR_EACH_POP (addr_dummy, node, l) { ++ free(addr_dummy); ++ } ++} ++ + static struct dp_packet * + eth_from_packet(const char *s) + { +@@ -2005,11 +2024,20 @@ netdev_dummy_ip4addr(struct unixctl_conn *conn, int argc OVS_UNUSED, + + if (netdev && is_dummy_class(netdev->netdev_class)) { + struct in_addr ip, mask; ++ struct in6_addr ip6; ++ uint32_t plen; + char *error; + +- error = ip_parse_masked(argv[2], &ip.s_addr, &mask.s_addr); ++ error = ip_parse_cidr(argv[2], &ip.s_addr, &plen); + if (!error) { +- netdev_dummy_set_in4(netdev, ip, mask); ++ mask.s_addr = be32_prefix_mask(plen); ++ netdev_dummy_add_in4(netdev, ip, mask); ++ ++ /* Insert local route entry for the new address. */ ++ in6_addr_set_mapped_ipv4(&ip6, ip.s_addr); ++ ovs_router_force_insert(0, &ip6, plen + 96, true, argv[1], ++ &in6addr_any); ++ + unixctl_command_reply(conn, "OK"); + } else { + unixctl_command_reply_error(conn, error); +@@ -2038,7 +2066,12 @@ netdev_dummy_ip6addr(struct unixctl_conn *conn, int argc OVS_UNUSED, + struct in6_addr mask; + + mask = ipv6_create_mask(plen); +- netdev_dummy_set_in6(netdev, &ip6, &mask); ++ netdev_dummy_add_in6(netdev, &ip6, &mask); ++ ++ /* Insert local route entry for the new address. */ ++ ovs_router_force_insert(0, &ip6, plen, true, argv[1], ++ &in6addr_any); ++ + unixctl_command_reply(conn, "OK"); + } else { + unixctl_command_reply_error(conn, error); diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c index 620a451dec..80506ae2d9 100644 --- a/lib/netdev-linux.c @@ -57397,7 +100930,7 @@ index 94dc6a9b74..cecec071ca 100644 return 0; } diff --git a/lib/netdev-offload-tc.c b/lib/netdev-offload-tc.c -index 9845e8d3fe..6fa27d1dda 100644 +index 9845e8d3fe..f1e9f2f26d 100644 --- a/lib/netdev-offload-tc.c +++ b/lib/netdev-offload-tc.c @@ -44,6 +44,7 @@ @@ -57674,12 +101207,12 @@ index 9845e8d3fe..6fa27d1dda 100644 + if (flower->mask.tunnel.tp_src) { + match_set_tun_tp_dst_masked(match, flower->key.tunnel.tp_src, + flower->mask.tunnel.tp_src); -+ } + } +- if (flower->key.tunnel.metadata.present.len) { + if (flower->mask.tunnel.tp_dst) { + match_set_tun_tp_dst_masked(match, flower->key.tunnel.tp_dst, + flower->mask.tunnel.tp_dst); - } -- if (flower->key.tunnel.metadata.present.len) { ++ } + + if (!strcmp(netdev_get_type(netdev), "geneve")) { flower_tun_opt_to_match(match, flower); @@ -57869,7 +101402,18 @@ index 9845e8d3fe..6fa27d1dda 100644 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: { action->encap.ipv6.ipv6_src = nl_attr_get_in6_addr(tun_attr); -@@ -1354,12 +1471,31 @@ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action, +@@ -1341,7 +1458,9 @@ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action, + } + break; + case OVS_TUNNEL_KEY_ATTR_TP_SRC: { +- action->encap.tp_src = nl_attr_get_be16(tun_attr); ++ /* There is no corresponding attribute in TC. */ ++ VLOG_DBG_RL(&rl, "unsupported tunnel key attribute TP_SRC"); ++ return EOPNOTSUPP; + } + break; + case OVS_TUNNEL_KEY_ATTR_TP_DST: { +@@ -1354,12 +1473,31 @@ parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action, action->encap.data.present.len = nl_attr_get_size(tun_attr); } break; @@ -57901,7 +101445,7 @@ index 9845e8d3fe..6fa27d1dda 100644 static int test_key_and_mask(struct match *match) { -@@ -1442,8 +1578,23 @@ test_key_and_mask(struct match *match) +@@ -1442,31 +1580,88 @@ test_key_and_mask(struct match *match) return EOPNOTSUPP; } @@ -57926,18 +101470,19 @@ index 9845e8d3fe..6fa27d1dda 100644 return EOPNOTSUPP; } -@@ -1452,18 +1603,51 @@ test_key_and_mask(struct match *match) + return 0; + } - static void +-static void ++static int flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, - const struct flow_tnl *tnl_mask) + struct flow_tnl *tnl_mask) { struct geneve_opt *opt, *opt_mask; - int len, cnt = 0; - -- memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv, -- tnl->metadata.present.len); +- int len, cnt = 0; ++ int tot_opt_len, len, cnt = 0; ++ + /* 'flower' always has an exact match on tunnel metadata length, so having + * it in a wrong format is not acceptable unless it is empty. */ + if (!(tnl->flags & FLOW_TNL_F_UDPIF)) { @@ -57952,11 +101497,13 @@ index 9845e8d3fe..6fa27d1dda 100644 + memset(&tnl_mask->metadata.present.map, 0, + sizeof tnl_mask->metadata.present.map); + } -+ return; ++ return 0; + } + + tnl_mask->flags &= ~FLOW_TNL_F_UDPIF; -+ + +- memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv, +- tnl->metadata.present.len); flower->key.tunnel.metadata.present.len = tnl->metadata.present.len; + /* Copying from the key and not from the mask, since in the 'flower' + * the length for a mask is not a mask, but the actual length. TC @@ -57966,7 +101513,7 @@ index 9845e8d3fe..6fa27d1dda 100644 + sizeof tnl_mask->metadata.present.len); + + if (!tnl->metadata.present.len) { -+ return; ++ return 0; + } + memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv, @@ -57980,17 +101527,29 @@ index 9845e8d3fe..6fa27d1dda 100644 + * also not masks, but actual lengths in the 'flower' structure. */ len = flower->key.tunnel.metadata.present.len; while (len) { ++ if (len < sizeof *opt) { ++ return EOPNOTSUPP; ++ } ++ opt = &flower->key.tunnel.metadata.opts.gnv[cnt]; -@@ -1474,8 +1658,6 @@ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, - cnt += sizeof(struct geneve_opt) / 4 + opt->length; ++ tot_opt_len = sizeof *opt + opt->length * 4; ++ if (len < tot_opt_len) { ++ return EOPNOTSUPP; ++ } ++ + opt_mask = &flower->mask.tunnel.metadata.opts.gnv[cnt]; + + opt_mask->length = opt->length; +@@ -1475,7 +1670,7 @@ flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl, len -= sizeof(struct geneve_opt) + opt->length * 4; } -- + - flower->mask.tunnel.metadata.present.len = tnl->metadata.present.len; ++ return 0; } static void -@@ -1541,6 +1723,12 @@ parse_match_ct_state_to_flower(struct tc_flower *flower, struct match *match) +@@ -1541,6 +1736,12 @@ parse_match_ct_state_to_flower(struct tc_flower *flower, struct match *match) flower->key.ct_state &= ~(TCA_FLOWER_KEY_CT_FLAGS_NEW); flower->mask.ct_state &= ~(TCA_FLOWER_KEY_CT_FLAGS_NEW); } @@ -58003,7 +101562,7 @@ index 9845e8d3fe..6fa27d1dda 100644 } if (mask->ct_zone) { -@@ -1574,7 +1762,8 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1574,7 +1775,8 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, const struct flow *key = &match->flow; struct flow *mask = &match->wc.masks; const struct flow_tnl *tnl = &match->flow.tunnel; @@ -58013,7 +101572,7 @@ index 9845e8d3fe..6fa27d1dda 100644 struct tc_action *action; bool recirc_act = false; uint32_t block_id = 0; -@@ -1615,17 +1804,49 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1615,17 +1817,53 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, flower.key.tunnel.ttl = tnl->ip_ttl; flower.key.tunnel.tp_src = tnl->tp_src; flower.key.tunnel.tp_dst = tnl->tp_dst; @@ -58053,7 +101612,11 @@ index 9845e8d3fe..6fa27d1dda 100644 + tnl_mask->flags &= ~(FLOW_TNL_F_DONT_FRAGMENT | FLOW_TNL_F_CSUM); + + if (!strcmp(netdev_get_type(netdev), "geneve")) { -+ flower_match_to_tun_opt(&flower, tnl, tnl_mask); ++ err = flower_match_to_tun_opt(&flower, tnl, tnl_mask); ++ if (err) { ++ VLOG_WARN_RL(&warn_rl, "Unable to parse geneve options"); ++ return err; ++ } + } flower.tunnel = true; + } else { @@ -58065,7 +101628,7 @@ index 9845e8d3fe..6fa27d1dda 100644 flower.key.eth_type = key->dl_type; flower.mask.eth_type = mask->dl_type; -@@ -1638,7 +1859,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1638,7 +1876,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, if (mask->vlans[0].tpid && eth_type_vlan(key->vlans[0].tpid)) { flower.key.encap_eth_type[0] = flower.key.eth_type; @@ -58074,7 +101637,7 @@ index 9845e8d3fe..6fa27d1dda 100644 flower.key.eth_type = key->vlans[0].tpid; flower.mask.eth_type = mask->vlans[0].tpid; } -@@ -1734,7 +1955,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1734,7 +1972,7 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, memset(&mask->arp_tha, 0, sizeof mask->arp_tha); } @@ -58083,7 +101646,7 @@ index 9845e8d3fe..6fa27d1dda 100644 flower.key.ip_proto = key->nw_proto; flower.mask.ip_proto = mask->nw_proto; mask->nw_proto = 0; -@@ -1841,7 +2062,25 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1841,7 +2079,25 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, VLOG_DBG_RL(&rl, "Can't find netdev for output port %d", port); return ENODEV; } @@ -58109,7 +101672,7 @@ index 9845e8d3fe..6fa27d1dda 100644 action->out.ingress = is_internal_port(netdev_get_type(outdev)); action->type = TC_ACT_OUTPUT; flower.action_count++; -@@ -1879,10 +2118,6 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1879,10 +2135,6 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, if (err) { return err; } @@ -58120,7 +101683,7 @@ index 9845e8d3fe..6fa27d1dda 100644 } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) { const struct nlattr *set = nl_attr_get(nla); const size_t set_len = nl_attr_get_size(nla); -@@ -1929,10 +2164,12 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1929,10 +2181,12 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, return EOPNOTSUPP; } @@ -58134,7 +101697,7 @@ index 9845e8d3fe..6fa27d1dda 100644 } prio = get_prio_for_tc_flower(&flower); -@@ -1950,8 +2187,9 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, +@@ -1950,8 +2204,9 @@ netdev_tc_flow_put(struct netdev *netdev, struct match *match, if (!err) { if (stats) { memset(stats, 0, sizeof *stats); @@ -58145,7 +101708,7 @@ index 9845e8d3fe..6fa27d1dda 100644 } return err; -@@ -1989,8 +2227,16 @@ netdev_tc_flow_get(struct netdev *netdev, +@@ -1989,8 +2244,16 @@ netdev_tc_flow_get(struct netdev *netdev, } in_port = netdev_ifindex_to_odp_port(id.ifindex); @@ -58163,7 +101726,7 @@ index 9845e8d3fe..6fa27d1dda 100644 match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX); match->flow.in_port.odp_port = in_port; match_set_recirc_id(match, id.chain); -@@ -2003,7 +2249,6 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED, +@@ -2003,7 +2266,6 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED, const ovs_u128 *ufid, struct dpif_flow_stats *stats) { @@ -58171,7 +101734,7 @@ index 9845e8d3fe..6fa27d1dda 100644 struct tcf_id id; int error; -@@ -2012,18 +2257,7 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED, +@@ -2012,18 +2274,7 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED, return error; } @@ -58191,7 +101754,7 @@ index 9845e8d3fe..6fa27d1dda 100644 } static int -@@ -2077,13 +2311,13 @@ probe_multi_mask_per_prio(int ifindex) +@@ -2077,13 +2328,13 @@ probe_multi_mask_per_prio(int ifindex) id2 = tc_make_tcf_id(ifindex, block_id, prio, TC_INGRESS); error = tc_replace_flower(&id2, &flower); @@ -58207,7 +101770,7 @@ index 9845e8d3fe..6fa27d1dda 100644 multi_mask_per_prio = true; VLOG_INFO("probe tc: multiple masks on single tc prio is supported."); -@@ -2135,7 +2369,7 @@ probe_ct_state_support(int ifindex) +@@ -2135,7 +2386,7 @@ probe_ct_state_support(int ifindex) goto out_del; } @@ -58216,7 +101779,7 @@ index 9845e8d3fe..6fa27d1dda 100644 ct_state_support = OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | OVS_CS_F_TRACKED | -@@ -2149,7 +2383,7 @@ probe_ct_state_support(int ifindex) +@@ -2149,7 +2400,7 @@ probe_ct_state_support(int ifindex) goto out_del; } @@ -58225,7 +101788,7 @@ index 9845e8d3fe..6fa27d1dda 100644 /* Test for ct_state INVALID support */ memset(&flower, 0, sizeof flower); -@@ -2160,7 +2394,7 @@ probe_ct_state_support(int ifindex) +@@ -2160,7 +2411,7 @@ probe_ct_state_support(int ifindex) goto out; } @@ -58234,7 +101797,7 @@ index 9845e8d3fe..6fa27d1dda 100644 ct_state_support |= OVS_CS_F_INVALID; /* Test for ct_state REPLY support */ -@@ -2176,7 +2410,7 @@ probe_ct_state_support(int ifindex) +@@ -2176,7 +2427,7 @@ probe_ct_state_support(int ifindex) ct_state_support |= OVS_CS_F_REPLY_DIR; out_del: @@ -58243,7 +101806,7 @@ index 9845e8d3fe..6fa27d1dda 100644 out: tc_add_del_qdisc(ifindex, false, 0, TC_INGRESS); VLOG_INFO("probe tc: supported ovs ct_state bits: 0x%x", ct_state_support); -@@ -2251,7 +2485,7 @@ netdev_tc_init_flow_api(struct netdev *netdev) +@@ -2251,7 +2502,7 @@ netdev_tc_init_flow_api(struct netdev *netdev) /* fallback here if delete chains fail */ if (!get_chain_supported) { @@ -58252,7 +101815,7 @@ index 9845e8d3fe..6fa27d1dda 100644 } /* make sure there is no ingress/egress qdisc */ -@@ -2270,8 +2504,9 @@ netdev_tc_init_flow_api(struct netdev *netdev) +@@ -2270,8 +2521,9 @@ netdev_tc_init_flow_api(struct netdev *netdev) error = tc_add_del_qdisc(ifindex, true, block_id, hook); if (error && error != EEXIST) { @@ -59214,7 +102777,7 @@ index b97470743e..e9050c31ba 100644 /* Prepending attributes. */ void *nl_msg_push_unspec_uninit(struct ofpbuf *, uint16_t type, size_t); diff --git a/lib/odp-util.c b/lib/odp-util.c -index 9a705cffa3..fac4cf3a8c 100644 +index 9a705cffa3..aa49fbebb7 100644 --- a/lib/odp-util.c +++ b/lib/odp-util.c @@ -1003,7 +1003,7 @@ format_odp_conntrack_action(struct ds *ds, const struct nlattr *attr) @@ -59287,7 +102850,54 @@ index 9a705cffa3..fac4cf3a8c 100644 } ofpbuf_clear(&ofp); } -@@ -7161,11 +7173,6 @@ parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], +@@ -6390,12 +6402,10 @@ odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms, + icmpv6_key->icmpv6_code = ntohs(data->tp_dst); + + if (is_nd(flow, NULL) +- /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, ICMP +- * type and code are 8 bits wide. Therefore, an exact match +- * looks like htons(0xff), not htons(0xffff). See +- * xlate_wc_finish() for details. */ +- && (!export_mask || (data->tp_src == htons(0xff) +- && data->tp_dst == htons(0xff)))) { ++ /* Even though 'tp_src' is 16 bits wide, ICMP type is 8 bits ++ * wide. Therefore, an exact match looks like htons(0xff), ++ * not htons(0xffff). See xlate_wc_finish() for details. */ ++ && (!export_mask || data->tp_src == htons(0xff))) { + struct ovs_key_nd *nd_key; + nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND, + sizeof *nd_key); +@@ -7110,20 +7120,17 @@ parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], + flow->arp_sha = nd_key->nd_sll; + flow->arp_tha = nd_key->nd_tll; + if (is_mask) { +- /* Even though 'tp_src' and 'tp_dst' are 16 bits wide, +- * ICMP type and code are 8 bits wide. Therefore, an +- * exact match looks like htons(0xff), not +- * htons(0xffff). See xlate_wc_finish() for details. +- * */ ++ /* Even though 'tp_src' is 16 bits wide, ICMP type ++ * is 8 bits wide. Therefore, an exact match looks ++ * like htons(0xff), not htons(0xffff). See ++ * xlate_wc_finish() for details. */ + if (!is_all_zeros(nd_key, sizeof *nd_key) && +- (flow->tp_src != htons(0xff) || +- flow->tp_dst != htons(0xff))) { ++ flow->tp_src != htons(0xff)) { + odp_parse_error(&rl, errorp, +- "ICMP (src,dst) masks should be " +- "(0xff,0xff) but are actually " +- "(%#"PRIx16",%#"PRIx16")", +- ntohs(flow->tp_src), +- ntohs(flow->tp_dst)); ++ "ICMP src mask should be " ++ "(0xff) but is actually " ++ "(%#"PRIx16")", ++ ntohs(flow->tp_src)); + return ODP_FIT_ERROR; + } else { + *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND; +@@ -7161,11 +7168,6 @@ parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], } } } @@ -59299,7 +102909,7 @@ index 9a705cffa3..fac4cf3a8c 100644 } if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) { if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) { -@@ -7188,7 +7195,8 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], +@@ -7188,7 +7190,8 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], uint64_t present_attrs, int out_of_range_attr, uint64_t expected_attrs, struct flow *flow, const struct nlattr *key, size_t key_len, @@ -59309,7 +102919,7 @@ index 9a705cffa3..fac4cf3a8c 100644 { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); bool is_mask = src_flow != flow; -@@ -7196,9 +7204,11 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], +@@ -7196,9 +7199,11 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], const struct nlattr *encap; enum odp_key_fitness encap_fitness; enum odp_key_fitness fitness = ODP_FIT_ERROR; @@ -59322,7 +102932,7 @@ index 9a705cffa3..fac4cf3a8c 100644 (is_mask ? (src_flow->vlans[encaps].tci & htons(VLAN_CFI)) != 0 : eth_type_vlan(flow->dl_type))) { -@@ -7259,6 +7269,14 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], +@@ -7259,6 +7264,14 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], } expected_attrs = 0; @@ -59337,7 +102947,7 @@ index 9a705cffa3..fac4cf3a8c 100644 if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow, src_flow, errorp)) { return ODP_FIT_ERROR; -@@ -7281,7 +7299,7 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], +@@ -7281,7 +7294,7 @@ parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1], static enum odp_key_fitness odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len, struct flow *flow, const struct flow *src_flow, @@ -59346,7 +102956,7 @@ index 9a705cffa3..fac4cf3a8c 100644 { /* New "struct flow" fields that are visible to the datapath (including all * data fields) should be translated from equivalent datapath flow fields -@@ -7308,6 +7326,14 @@ odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len, +@@ -7308,6 +7321,14 @@ odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len, } expected_attrs = 0; @@ -59361,7 +102971,7 @@ index 9a705cffa3..fac4cf3a8c 100644 /* Metadata. */ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) { flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]); -@@ -7431,7 +7457,7 @@ odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len, +@@ -7431,7 +7452,7 @@ odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len, : eth_type_vlan(src_flow->dl_type)) { fitness = parse_8021q_onward(attrs, present_attrs, out_of_range_attr, expected_attrs, flow, key, key_len, @@ -59370,7 +102980,7 @@ index 9a705cffa3..fac4cf3a8c 100644 } else { if (is_mask) { /* A missing VLAN mask means exact match on vlan_tci 0 (== no -@@ -7497,7 +7523,7 @@ enum odp_key_fitness +@@ -7497,7 +7518,7 @@ enum odp_key_fitness odp_flow_key_to_flow(const struct nlattr *key, size_t key_len, struct flow *flow, char **errorp) { @@ -59379,7 +102989,7 @@ index 9a705cffa3..fac4cf3a8c 100644 } /* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key' -@@ -7509,14 +7535,16 @@ odp_flow_key_to_flow(const struct nlattr *key, size_t key_len, +@@ -7509,14 +7530,16 @@ odp_flow_key_to_flow(const struct nlattr *key, size_t key_len, * If 'errorp' is nonnull, this function uses it for detailed error reports: if * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in * '*errorp', otherwise NULL. */ @@ -59401,7 +103011,7 @@ index 9a705cffa3..fac4cf3a8c 100644 } else { if (errorp) { *errorp = NULL; -@@ -7530,6 +7558,15 @@ odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len, +@@ -7530,6 +7553,15 @@ odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len, } } @@ -59417,7 +103027,7 @@ index 9a705cffa3..fac4cf3a8c 100644 /* Converts the netlink formated key/mask to match. * Fails if odp_flow_key_from_key/mask and odp_flow_key_key/mask * disagree on the acceptable form of flow */ -@@ -7540,12 +7577,15 @@ parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len, +@@ -7540,12 +7572,15 @@ parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len, { enum odp_key_fitness fitness; @@ -59438,7 +103048,7 @@ index 9a705cffa3..fac4cf3a8c 100644 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); if (!VLOG_DROP_ERR(&rl)) { -@@ -7553,20 +7593,18 @@ parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len, +@@ -7553,20 +7588,18 @@ parse_key_and_mask_to_match(const struct nlattr *key, size_t key_len, ds_init(&s); odp_flow_format(key, key_len, NULL, 0, NULL, &s, true); @@ -59629,6 +103239,19 @@ index 271105bdea..879275a7a3 100644 ofpbuf_prealloc_headroom(b, size); b->data = (char*)b->data - size; b->size += size; +diff --git a/lib/ovs-atomic.h b/lib/ovs-atomic.h +index 8fdce0cf80..62ee4e0682 100644 +--- a/lib/ovs-atomic.h ++++ b/lib/ovs-atomic.h +@@ -329,7 +329,7 @@ + #if __CHECKER__ + /* sparse doesn't understand some GCC extensions we use. */ + #include "ovs-atomic-pthreads.h" +- #elif __has_extension(c_atomic) ++ #elif __clang__ && __has_extension(c_atomic) + #include "ovs-atomic-clang.h" + #elif HAVE_ATOMIC && __cplusplus >= 201103L + #include "ovs-atomic-c++.h" diff --git a/lib/ovs-lldp.c b/lib/ovs-lldp.c index 162311fa45..2d13e971ed 100644 --- a/lib/ovs-lldp.c @@ -59840,7 +103463,7 @@ index ecc4c92010..8b397b7fb0 100644 + #endif /* ovs-rcu.h */ diff --git a/lib/ovs-router.c b/lib/ovs-router.c -index 09b81c6e5a..5d0fbd503e 100644 +index 09b81c6e5a..4080134533 100644 --- a/lib/ovs-router.c +++ b/lib/ovs-router.c @@ -164,9 +164,10 @@ static void rt_init_match(struct match *match, uint32_t mark, @@ -59871,11 +103494,39 @@ index 09b81c6e5a..5d0fbd503e 100644 } if (err) { struct ds ds = DS_EMPTY_INITIALIZER; +@@ -275,6 +278,19 @@ ovs_router_insert(uint32_t mark, const struct in6_addr *ip_dst, uint8_t plen, + } + } + ++/* The same as 'ovs_router_insert', but it adds the route even if updates ++ * from the system routing table are disabled. Used for unit tests. */ ++void ++ovs_router_force_insert(uint32_t mark, const struct in6_addr *ip_dst, ++ uint8_t plen, bool local, const char output_bridge[], ++ const struct in6_addr *gw) ++{ ++ uint8_t priority = local ? plen + 64 : plen; ++ ++ ovs_router_insert__(mark, priority, local, ip_dst, plen, ++ output_bridge, gw); ++} ++ + static void + rt_entry_delete__(const struct cls_rule *cr) + { diff --git a/lib/ovs-router.h b/lib/ovs-router.h -index 34ea163eef..d8ce3c00de 100644 +index 34ea163eef..53a3b5d7e9 100644 --- a/lib/ovs-router.h +++ b/lib/ovs-router.h -@@ -37,6 +37,10 @@ void ovs_router_flush(void); +@@ -33,10 +33,18 @@ void ovs_router_init(void); + void ovs_router_insert(uint32_t mark, const struct in6_addr *ip_dst, + uint8_t plen, bool local, + const char output_bridge[], const struct in6_addr *gw); ++void ovs_router_force_insert(uint32_t mark, const struct in6_addr *ip_dst, ++ uint8_t plen, bool local, ++ const char output_bridge[], ++ const struct in6_addr *gw); + void ovs_router_flush(void); void ovs_router_disable_system_routing_table(void); @@ -60088,10 +103739,18 @@ index 47115a7b85..ba5d179a65 100644 /* Transactions with named-uuid row names. */ struct json *ovsdb_datum_to_json_with_row_names(const struct ovsdb_datum *, diff --git a/lib/ovsdb-idl.c b/lib/ovsdb-idl.c -index c19128d55c..917868c54a 100644 +index c19128d55c..a06d78c0de 100644 --- a/lib/ovsdb-idl.c +++ b/lib/ovsdb-idl.c -@@ -389,25 +389,25 @@ ovsdb_idl_clear(struct ovsdb_idl *db) +@@ -177,6 +177,7 @@ static void ovsdb_idl_row_mark_backrefs_for_reparsing(struct ovsdb_idl_row *); + static void ovsdb_idl_row_track_change(struct ovsdb_idl_row *, + enum ovsdb_idl_change); + static void ovsdb_idl_row_untrack_change(struct ovsdb_idl_row *); ++static void ovsdb_idl_row_clear_changeseqno(struct ovsdb_idl_row *); + + static void ovsdb_idl_txn_abort_all(struct ovsdb_idl *); + static bool ovsdb_idl_txn_extract_mutations(struct ovsdb_idl_row *, +@@ -389,25 +390,25 @@ ovsdb_idl_clear(struct ovsdb_idl *db) */ for (size_t i = 0; i < db->class_->n_tables; i++) { struct ovsdb_idl_table *table = &db->tables[i]; @@ -60122,7 +103781,7 @@ index c19128d55c..917868c54a 100644 ovs_list_remove(&arc->src_node); ovs_list_remove(&arc->dst_node); free(arc); -@@ -1041,8 +1041,8 @@ ovsdb_idl_condition_destroy(struct ovsdb_idl_condition *cond) +@@ -1041,8 +1042,8 @@ ovsdb_idl_condition_destroy(struct ovsdb_idl_condition *cond) void ovsdb_idl_condition_clear(struct ovsdb_idl_condition *cond) { @@ -60133,7 +103792,7 @@ index c19128d55c..917868c54a 100644 hmap_remove(&cond->clauses, &clause->hmap_node); ovsdb_idl_clause_destroy(clause); } -@@ -1345,9 +1345,9 @@ ovsdb_idl_track_clear__(struct ovsdb_idl *idl, bool flush_all) +@@ -1345,14 +1346,15 @@ ovsdb_idl_track_clear__(struct ovsdb_idl *idl, bool flush_all) struct ovsdb_idl_table *table = &idl->tables[i]; if (!ovs_list_is_empty(&table->track_list)) { @@ -60145,7 +103804,13 @@ index c19128d55c..917868c54a 100644 if (row->updated) { free(row->updated); row->updated = NULL; -@@ -1480,9 +1480,9 @@ ovsdb_idl_parse_update(struct ovsdb_idl *idl, + } + ovsdb_idl_row_untrack_change(row); ++ ovsdb_idl_row_clear_changeseqno(row); + + if (ovsdb_idl_row_is_orphan(row)) { + ovsdb_idl_row_unparse(row); +@@ -1480,9 +1482,9 @@ ovsdb_idl_parse_update(struct ovsdb_idl *idl, static void ovsdb_idl_reparse_deleted(struct ovsdb_idl *db) { @@ -60157,7 +103822,15 @@ index c19128d55c..917868c54a 100644 ovsdb_idl_row_untrack_change(row); add_tracked_change_for_references(row); ovsdb_idl_row_reparse_backrefs(row); -@@ -1906,8 +1906,8 @@ ovsdb_idl_index_create2(struct ovsdb_idl *idl, +@@ -1572,6 +1574,7 @@ ovsdb_idl_process_update(struct ovsdb_idl_table *table, + ru->columns); + } else if (ovsdb_idl_row_is_orphan(row)) { + ovsdb_idl_row_untrack_change(row); ++ ovsdb_idl_row_clear_changeseqno(row); + ovsdb_idl_insert_row(row, ru->columns); + } else { + VLOG_ERR_RL(&semantic_rl, "cannot add existing row "UUID_FMT" to " +@@ -1906,8 +1909,8 @@ ovsdb_idl_index_create2(struct ovsdb_idl *idl, static void ovsdb_idl_destroy_indexes(struct ovsdb_idl_table *table) { @@ -60168,7 +103841,7 @@ index c19128d55c..917868c54a 100644 skiplist_destroy(index->skiplist, NULL); free(index->columns); free(index); -@@ -2145,12 +2145,12 @@ ovsdb_idl_row_clear_new(struct ovsdb_idl_row *row) +@@ -2145,12 +2148,12 @@ ovsdb_idl_row_clear_new(struct ovsdb_idl_row *row) static void ovsdb_idl_row_clear_arcs(struct ovsdb_idl_row *row, bool destroy_dsts) { @@ -60183,7 +103856,7 @@ index c19128d55c..917868c54a 100644 ovs_list_remove(&arc->dst_node); if (destroy_dsts && ovsdb_idl_row_is_orphan(arc->dst) -@@ -2166,7 +2166,7 @@ ovsdb_idl_row_clear_arcs(struct ovsdb_idl_row *row, bool destroy_dsts) +@@ -2166,7 +2169,7 @@ ovsdb_idl_row_clear_arcs(struct ovsdb_idl_row *row, bool destroy_dsts) static void ovsdb_idl_row_reparse_backrefs(struct ovsdb_idl_row *row) { @@ -60192,7 +103865,7 @@ index c19128d55c..917868c54a 100644 /* This is trickier than it looks. ovsdb_idl_row_clear_arcs() will destroy * 'arc', so we need to use the "safe" variant of list traversal. However, -@@ -2178,7 +2178,7 @@ ovsdb_idl_row_reparse_backrefs(struct ovsdb_idl_row *row) +@@ -2178,7 +2181,7 @@ ovsdb_idl_row_reparse_backrefs(struct ovsdb_idl_row *row) * (If duplicate arcs were possible then we would need to make sure that * 'next' didn't also point into 'arc''s destination, but we forbid * duplicate arcs.) */ @@ -60201,7 +103874,25 @@ index c19128d55c..917868c54a 100644 struct ovsdb_idl_row *ref = arc->src; ovsdb_idl_row_unparse(ref); -@@ -2329,9 +2329,9 @@ ovsdb_idl_row_destroy_postprocess(struct ovsdb_idl *idl) +@@ -2223,11 +2226,15 @@ ovsdb_idl_row_untrack_change(struct ovsdb_idl_row *row) + return; + } + ++ ovs_list_remove(&row->track_node); ++ ovs_list_init(&row->track_node); ++} ++ ++static void ovsdb_idl_row_clear_changeseqno(struct ovsdb_idl_row *row) ++{ + row->change_seqno[OVSDB_IDL_CHANGE_INSERT] = + row->change_seqno[OVSDB_IDL_CHANGE_MODIFY] = + row->change_seqno[OVSDB_IDL_CHANGE_DELETE] = 0; +- ovs_list_remove(&row->track_node); +- ovs_list_init(&row->track_node); + } + + static struct ovsdb_idl_row * +@@ -2329,9 +2336,9 @@ ovsdb_idl_row_destroy_postprocess(struct ovsdb_idl *idl) struct ovsdb_idl_table *table = &idl->tables[i]; if (!ovs_list_is_empty(&table->track_list)) { @@ -60213,7 +103904,7 @@ index c19128d55c..917868c54a 100644 if (!ovsdb_idl_track_is_set(row->table)) { ovs_list_remove(&row->track_node); ovsdb_idl_row_unparse(row); -@@ -2367,6 +2367,10 @@ ovsdb_idl_insert_row(struct ovsdb_idl_row *row, const struct shash *data) +@@ -2367,6 +2374,10 @@ ovsdb_idl_insert_row(struct ovsdb_idl_row *row, const struct shash *data) static void ovsdb_idl_delete_row(struct ovsdb_idl_row *row) { @@ -60224,7 +103915,7 @@ index c19128d55c..917868c54a 100644 ovsdb_idl_remove_from_indexes(row); ovsdb_idl_row_clear_arcs(row, true); ovsdb_idl_row_destroy(row); -@@ -2729,7 +2733,7 @@ ovsdb_idl_txn_increment(struct ovsdb_idl_txn *txn, +@@ -2729,7 +2740,7 @@ ovsdb_idl_txn_increment(struct ovsdb_idl_txn *txn, void ovsdb_idl_txn_destroy(struct ovsdb_idl_txn *txn) { @@ -60233,7 +103924,7 @@ index c19128d55c..917868c54a 100644 if (txn->status == TXN_INCOMPLETE) { ovsdb_cs_forget_transaction(txn->idl->cs, txn->request_id); -@@ -2739,7 +2743,7 @@ ovsdb_idl_txn_destroy(struct ovsdb_idl_txn *txn) +@@ -2739,7 +2750,7 @@ ovsdb_idl_txn_destroy(struct ovsdb_idl_txn *txn) ovsdb_idl_txn_abort(txn); ds_destroy(&txn->comment); free(txn->error); @@ -60242,7 +103933,7 @@ index c19128d55c..917868c54a 100644 free(insert); } hmap_destroy(&txn->inserted_rows); -@@ -2824,7 +2828,7 @@ substitute_uuids(struct json *json, const struct ovsdb_idl_txn *txn) +@@ -2824,7 +2835,7 @@ substitute_uuids(struct json *json, const struct ovsdb_idl_txn *txn) static void ovsdb_idl_txn_disassemble(struct ovsdb_idl_txn *txn) { @@ -60251,7 +103942,7 @@ index c19128d55c..917868c54a 100644 /* This must happen early. Otherwise, ovsdb_idl_row_parse() will call an * ovsdb_idl_column's 'parse' function, which will call -@@ -2832,7 +2836,7 @@ ovsdb_idl_txn_disassemble(struct ovsdb_idl_txn *txn) +@@ -2832,7 +2843,7 @@ ovsdb_idl_txn_disassemble(struct ovsdb_idl_txn *txn) * transaction and fail to update the graph. */ txn->idl->txn = NULL; @@ -60290,6 +103981,29 @@ index 62c4621181..321043282e 100644 set_op_destroy(set_op, type); } hmap_destroy(&list->hmap); +diff --git a/lib/ovsdb-types.h b/lib/ovsdb-types.h +index b9eb0928df..d2455fc977 100644 +--- a/lib/ovsdb-types.h ++++ b/lib/ovsdb-types.h +@@ -235,6 +235,18 @@ static inline bool ovsdb_type_is_map(const struct ovsdb_type *type) + return type->value.type != OVSDB_TYPE_VOID; + } + ++static inline bool ovsdb_type_has_strong_refs(const struct ovsdb_type *type) ++{ ++ return ovsdb_base_type_is_strong_ref(&type->key) ++ || ovsdb_base_type_is_strong_ref(&type->value); ++} ++ ++static inline bool ovsdb_type_has_weak_refs(const struct ovsdb_type *type) ++{ ++ return ovsdb_base_type_is_weak_ref(&type->key) ++ || ovsdb_base_type_is_weak_ref(&type->value); ++} ++ + #ifdef __cplusplus + } + #endif diff --git a/lib/packets.c b/lib/packets.c index d0fba81766..1dcd4a6fcd 100644 --- a/lib/packets.c @@ -60586,6 +104300,30 @@ index a929ddfd2d..89a0bcaf95 100644 if (deadline != LLONG_MAX) { long long int remaining = deadline - now; return MAX(0, MIN(INT_MAX, remaining)); +diff --git a/lib/rstp.c b/lib/rstp.c +index 7e351bf32f..f8c46e7f92 100644 +--- a/lib/rstp.c ++++ b/lib/rstp.c +@@ -50,7 +50,7 @@ + + VLOG_DEFINE_THIS_MODULE(rstp); + +-struct ovs_mutex rstp_mutex = OVS_MUTEX_INITIALIZER; ++struct ovs_mutex rstp_mutex; + + static struct ovs_list all_rstps__ = OVS_LIST_INITIALIZER(&all_rstps__); + static struct ovs_list *const all_rstps OVS_GUARDED_BY(rstp_mutex) = &all_rstps__; +@@ -248,6 +248,10 @@ void + rstp_init(void) + OVS_EXCLUDED(rstp_mutex) + { ++ /* We need a recursive mutex because rstp_send_bpdu() could loop back ++ * into the rstp module through a patch port. */ ++ ovs_mutex_init_recursive(&rstp_mutex); ++ + unixctl_command_register("rstp/tcn", "[bridge]", 0, 1, rstp_unixctl_tcn, + NULL); + unixctl_command_register("rstp/show", "[bridge]", 0, 1, rstp_unixctl_show, diff --git a/lib/seq.c b/lib/seq.c index 6581cb06ba..99e5bf8bd1 100644 --- a/lib/seq.c @@ -61900,10 +105638,20 @@ index adb2d3182a..c4ae6d355a 100644 id->prio = tc_get_major(tc->tcm_info); id->handle = tc->tcm_handle; diff --git a/lib/tc.h b/lib/tc.h -index a147ca461d..35068cbd89 100644 +index a147ca461d..03c7b8181b 100644 --- a/lib/tc.h +++ b/lib/tc.h -@@ -256,11 +256,23 @@ struct tc_action { +@@ -209,7 +209,8 @@ struct tc_action { + struct { + bool id_present; + ovs_be64 id; +- ovs_be16 tp_src; ++ /* ovs_be16 tp_src; Could have been here, but there is no ++ * TCA_TUNNEL_KEY_ENC_ attribute for it in the kernel. */ + ovs_be16 tp_dst; + uint8_t tos; + uint8_t ttl; +@@ -256,11 +257,23 @@ struct tc_action { bool force; bool commit; } ct; @@ -61927,7 +105675,7 @@ index a147ca461d..35068cbd89 100644 enum tc_offloaded_state { TC_OFFLOADED_STATE_UNDEFINED, TC_OFFLOADED_STATE_IN_HW, -@@ -307,7 +319,6 @@ static inline bool +@@ -307,7 +320,6 @@ static inline bool is_tcf_id_eq(struct tcf_id *id1, struct tcf_id *id2) { return id1->prio == id2->prio @@ -61935,7 +105683,7 @@ index a147ca461d..35068cbd89 100644 && id1->handle == id2->handle && id1->hook == id2->hook && id1->block_id == id2->block_id -@@ -330,15 +341,10 @@ struct tc_flower { +@@ -330,15 +342,10 @@ struct tc_flower { int action_count; struct tc_action actions[TCA_ACT_MAX_NUM]; @@ -61953,7 +105701,7 @@ index a147ca461d..35068cbd89 100644 uint32_t csum_update_flags; bool tunnel; -@@ -352,15 +358,9 @@ struct tc_flower { +@@ -352,15 +359,9 @@ struct tc_flower { enum tc_offload_policy tc_policy; }; @@ -62107,10 +105855,28 @@ index 69aed6722c..103357ee91 100644 } diff --git a/lib/vconn.c b/lib/vconn.c -index 7415e6291f..b556762277 100644 +index 7415e6291f..e9603432d2 100644 --- a/lib/vconn.c +++ b/lib/vconn.c -@@ -960,8 +960,8 @@ vconn_transact_multipart(struct vconn *vconn, +@@ -682,7 +682,6 @@ do_send(struct vconn *vconn, struct ofpbuf *msg) + + ofpmsg_update_length(msg); + if (!VLOG_IS_DBG_ENABLED()) { +- COVERAGE_INC(vconn_sent); + retval = (vconn->vclass->send)(vconn, msg); + } else { + char *s = ofp_to_string(msg->data, msg->size, NULL, NULL, 1); +@@ -693,6 +692,9 @@ do_send(struct vconn *vconn, struct ofpbuf *msg) + } + free(s); + } ++ if (!retval) { ++ COVERAGE_INC(vconn_sent); ++ } + return retval; + } + +@@ -960,8 +962,8 @@ vconn_transact_multipart(struct vconn *vconn, ovs_list_init(replies); /* Send all the requests. */ @@ -62265,13 +106031,15 @@ index 4c3bace6ef..09134feca0 100644 AC_DEFINE([HAVE_LD_AVX512_GOOD], [1], [Define to 1 if binutils correctly supports AVX512.]) diff --git a/ofproto/bond.c b/ofproto/bond.c -index cdfdf0b9d8..a7c859b909 100644 +index cdfdf0b9d8..1f3fd4f7b3 100644 --- a/ofproto/bond.c +++ b/ofproto/bond.c -@@ -185,10 +185,14 @@ static struct bond_member *choose_output_member(const struct bond *, +@@ -184,11 +184,15 @@ static struct bond_member *choose_output_member(const struct bond *, + struct flow_wildcards *, uint16_t vlan) OVS_REQ_RDLOCK(rwlock); - static void update_recirc_rules__(struct bond *); +-static void update_recirc_rules__(struct bond *); ++static void update_recirc_rules(struct bond *) OVS_REQ_WRLOCK(rwlock); +static bool bond_may_recirc(const struct bond *); +static void bond_update_post_recirc_rules__(struct bond *, bool force) + OVS_REQ_WRLOCK(rwlock); @@ -62283,8 +106051,34 @@ index cdfdf0b9d8..a7c859b909 100644 /* Attempts to parse 's' as the name of a bond balancing mode. If successful, * stores the mode in '*balance' and returns true. Otherwise returns false * without modifying '*balance'. */ -@@ -338,7 +342,7 @@ static void - update_recirc_rules__(struct bond *bond) +@@ -293,7 +297,10 @@ bond_unref(struct bond *bond) + } + free(bond->hash); + bond->hash = NULL; +- update_recirc_rules__(bond); ++ ++ ovs_rwlock_wrlock(&rwlock); ++ update_recirc_rules(bond); ++ ovs_rwlock_unlock(&rwlock); + + hmap_destroy(&bond->pr_rule_ops); + free(bond->primary); +@@ -325,20 +332,11 @@ add_pr_rule(struct bond *bond, const struct match *match, + hmap_insert(&bond->pr_rule_ops, &pr_op->hmap_node, hash); + } + +-/* This function should almost never be called directly. +- * 'update_recirc_rules()' should be called instead. Since +- * this function modifies 'bond->pr_rule_ops', it is only +- * safe when 'rwlock' is held. +- * +- * However, when the 'bond' is the only reference in the system, +- * calling this function avoid acquiring lock only to satisfy +- * lock annotation. Currently, only 'bond_unref()' calls +- * this function directly. */ + static void +-update_recirc_rules__(struct bond *bond) ++update_recirc_rules(struct bond *bond) OVS_REQ_WRLOCK(rwlock) { struct match match; - struct bond_pr_rule_op *pr_op, *next_op; @@ -62292,7 +106086,7 @@ index cdfdf0b9d8..a7c859b909 100644 uint64_t ofpacts_stub[128 / 8]; struct ofpbuf ofpacts; int i; -@@ -372,7 +376,7 @@ update_recirc_rules__(struct bond *bond) +@@ -372,7 +370,7 @@ update_recirc_rules__(struct bond *bond) ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); @@ -62301,7 +106095,36 @@ index cdfdf0b9d8..a7c859b909 100644 int error; switch (pr_op->op) { case ADD: -@@ -510,6 +514,12 @@ bond_reconfigure(struct bond *bond, const struct bond_settings *s) +@@ -401,6 +399,15 @@ update_recirc_rules__(struct bond *bond) + + VLOG_ERR("failed to remove post recirculation flow %s", err_s); + free(err_s); ++ } else if (bond->hash) { ++ /* If the flow deletion failed, a subsequent call to ++ * ofproto_dpif_add_internal_flow() would just modify the ++ * flow preserving its statistics. Therefore, only reset ++ * the entry's byte counter if it succeeds. */ ++ uint32_t hash = pr_op->match.flow.dp_hash & BOND_MASK; ++ struct bond_entry *entry = &bond->hash[hash]; ++ ++ entry->pr_tx_bytes = 0; + } + + hmap_remove(&bond->pr_rule_ops, &pr_op->hmap_node); +@@ -415,12 +422,6 @@ update_recirc_rules__(struct bond *bond) + ofpbuf_uninit(&ofpacts); + } + +-static void +-update_recirc_rules(struct bond *bond) +- OVS_REQ_RDLOCK(rwlock) +-{ +- update_recirc_rules__(bond); +-} + + /* Updates 'bond''s overall configuration to 's'. + * +@@ -510,6 +511,12 @@ bond_reconfigure(struct bond *bond, const struct bond_settings *s) bond_entry_reset(bond); } @@ -62314,7 +106137,7 @@ index cdfdf0b9d8..a7c859b909 100644 ovs_rwlock_unlock(&rwlock); return revalidate; } -@@ -723,6 +733,12 @@ bond_run(struct bond *bond, enum lacp_status lacp_status) +@@ -723,6 +730,12 @@ bond_run(struct bond *bond, enum lacp_status lacp_status) bond_choose_active_member(bond); } @@ -62327,7 +106150,7 @@ index cdfdf0b9d8..a7c859b909 100644 revalidate = bond->bond_revalidate; bond->bond_revalidate = false; ovs_rwlock_unlock(&rwlock); -@@ -876,7 +892,7 @@ bond_check_admissibility(struct bond *bond, const void *member_, +@@ -876,7 +889,7 @@ bond_check_admissibility(struct bond *bond, const void *member_, if (!member->enabled && member->may_enable) { VLOG_DBG_RL(&rl, "bond %s: member %s: " "main thread has not yet enabled member", @@ -62336,7 +106159,7 @@ index cdfdf0b9d8..a7c859b909 100644 } goto out; case LACP_CONFIGURED: -@@ -1038,7 +1054,7 @@ bond_may_recirc(const struct bond *bond) +@@ -1038,7 +1051,7 @@ bond_may_recirc(const struct bond *bond) } static void @@ -62345,7 +106168,7 @@ index cdfdf0b9d8..a7c859b909 100644 OVS_REQ_WRLOCK(rwlock) { struct bond_entry *e; -@@ -1086,6 +1102,19 @@ bond_update_post_recirc_rules(struct bond *bond, uint32_t *recirc_id, +@@ -1086,6 +1099,19 @@ bond_update_post_recirc_rules(struct bond *bond, uint32_t *recirc_id, } } @@ -62365,7 +106188,7 @@ index cdfdf0b9d8..a7c859b909 100644 /* Rebalancing. */ -@@ -1258,7 +1287,7 @@ insert_bal(struct ovs_list *bals, struct bond_member *member) +@@ -1258,7 +1284,7 @@ insert_bal(struct ovs_list *bals, struct bond_member *member) break; } } @@ -63075,6 +106898,30 @@ index 1f42cd5275..75c0ab81ac 100644 struct dpif_ipfix *, const struct ofproto_ipfix_bridge_exporter_options *, const struct ofproto_ipfix_flow_exporter_options *, size_t); +diff --git a/ofproto/ofproto-dpif-monitor.c b/ofproto/ofproto-dpif-monitor.c +index bb0e490910..5132f9c952 100644 +--- a/ofproto/ofproto-dpif-monitor.c ++++ b/ofproto/ofproto-dpif-monitor.c +@@ -275,19 +275,16 @@ monitor_mport_run(struct mport *mport, struct dp_packet *packet) + long long int lldp_wake_time = LLONG_MAX; + + if (mport->cfm && cfm_should_send_ccm(mport->cfm)) { +- dp_packet_clear(packet); + cfm_compose_ccm(mport->cfm, packet, mport->hw_addr); + ofproto_dpif_send_packet(mport->ofport, false, packet); + } + if (mport->bfd && bfd_should_send_packet(mport->bfd)) { + bool oam; + +- dp_packet_clear(packet); + bfd_put_packet(mport->bfd, packet, mport->hw_addr, &oam); + ofproto_dpif_send_packet(mport->ofport, oam, packet); + } + if (mport->lldp && lldp_should_send_packet(mport->lldp)) { +- dp_packet_clear(packet); + lldp_put_packet(mport->lldp, packet, mport->hw_addr); + ofproto_dpif_send_packet(mport->ofport, false, packet); + } diff --git a/ofproto/ofproto-dpif-sflow.c b/ofproto/ofproto-dpif-sflow.c index 30e7caf54a..e8e1de920b 100644 --- a/ofproto/ofproto-dpif-sflow.c @@ -63103,7 +106950,7 @@ index 30e7caf54a..e8e1de920b 100644 } hmap_destroy(&ds->ports); diff --git a/ofproto/ofproto-dpif-trace.c b/ofproto/ofproto-dpif-trace.c -index 78a54c715d..109940ad2a 100644 +index 78a54c715d..b2e778707f 100644 --- a/ofproto/ofproto-dpif-trace.c +++ b/ofproto/ofproto-dpif-trace.c @@ -65,8 +65,8 @@ static void @@ -63117,6 +106964,41 @@ index 78a54c715d..109940ad2a 100644 ovs_list_remove(&node->node); oftrace_node_destroy(node); } +@@ -812,16 +812,34 @@ ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow, + struct ovs_list *next_ct_states, struct ds *output) + { + struct ovs_list recirc_queue = OVS_LIST_INITIALIZER(&recirc_queue); ++ int recirculations = 0; ++ + ofproto_trace__(ofproto, flow, packet, &recirc_queue, + ofpacts, ofpacts_len, output); + + struct oftrace_recirc_node *recirc_node; + LIST_FOR_EACH_POP (recirc_node, node, &recirc_queue) { ++ if (recirculations++ > 4096) { ++ ds_put_cstr(output, "\n\n"); ++ ds_put_char_multiple(output, '=', 79); ++ ds_put_cstr(output, "\nTrace reached the recirculation limit." ++ " Sopping the trace here."); ++ ds_put_format(output, ++ "\nQueued but not processed: %"PRIuSIZE ++ " recirculations.", ++ ovs_list_size(&recirc_queue) + 1); ++ oftrace_recirc_node_destroy(recirc_node); ++ break; ++ } + ofproto_trace_recirc_node(recirc_node, next_ct_states, output); + ofproto_trace__(ofproto, &recirc_node->flow, recirc_node->packet, + &recirc_queue, ofpacts, ofpacts_len, output); + oftrace_recirc_node_destroy(recirc_node); + } ++ /* Destroy remaining recirculation nodes, if any. */ ++ LIST_FOR_EACH_POP (recirc_node, node, &recirc_queue) { ++ oftrace_recirc_node_destroy(recirc_node); ++ } + } + + void diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c index 57f94df544..4629406b3f 100644 --- a/ofproto/ofproto-dpif-upcall.c @@ -63455,7 +107337,7 @@ index 114aff8ea3..0fc6d2ea60 100644 enum xc_type type; union { diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c -index 578cbfe581..c0d1cfa94b 100644 +index 578cbfe581..d5e7a36c6f 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -66,6 +66,7 @@ @@ -63838,7 +107720,16 @@ index 578cbfe581..c0d1cfa94b 100644 dst_flow->packet_type = htonl(PT_ETH); dst_flow->nw_dst = src_flow->tunnel.ip_dst; dst_flow->nw_src = src_flow->tunnel.ip_src; -@@ -3654,14 +3788,27 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport, +@@ -3627,6 +3761,8 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport, + + if (flow->tunnel.ip_src) { + in6_addr_set_mapped_ipv4(&s_ip6, flow->tunnel.ip_src); ++ } else if (ipv6_addr_is_set(&flow->tunnel.ipv6_src)) { ++ s_ip6 = flow->tunnel.ipv6_src; + } + + err = tnl_route_lookup_flow(ctx, flow, &d_ip6, &s_ip6, &out_dev); +@@ -3654,14 +3790,27 @@ native_tunnel_output(struct xlate_ctx *ctx, const struct xport *xport, err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac); if (err) { @@ -63868,7 +107759,7 @@ index 578cbfe581..c0d1cfa94b 100644 } return err; } -@@ -3827,20 +3974,17 @@ static void +@@ -3827,20 +3976,17 @@ static void patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev, struct xport *out_dev, bool is_last_action) { @@ -63896,7 +107787,7 @@ index 578cbfe581..c0d1cfa94b 100644 flow->in_port.ofp_port = out_dev->ofp_port; flow->metadata = htonll(0); memset(&flow->tunnel, 0, sizeof flow->tunnel); -@@ -3879,14 +4023,15 @@ patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev, +@@ -3879,14 +4025,15 @@ patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev, } else { /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and * the learning action look at the packet, then drop it. */ @@ -63914,7 +107805,7 @@ index 578cbfe581..c0d1cfa94b 100644 ctx->odp_actions->size = old_size; /* Undo changes that may have been done for freezing. */ -@@ -3898,18 +4043,15 @@ patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev, +@@ -3898,18 +4045,15 @@ patch_port_output(struct xlate_ctx *ctx, const struct xport *in_dev, if (independent_mirrors) { ctx->mirrors = old_mirrors; } @@ -63937,7 +107828,7 @@ index 578cbfe581..c0d1cfa94b 100644 /* The out bridge popping MPLS should have no effect on the original * bridge. */ -@@ -4099,6 +4241,16 @@ xport_has_ip(const struct xport *xport) +@@ -4099,6 +4243,16 @@ xport_has_ip(const struct xport *xport) return n_in6 ? true : false; } @@ -63954,7 +107845,7 @@ index 578cbfe581..c0d1cfa94b 100644 static bool terminate_native_tunnel(struct xlate_ctx *ctx, const struct xport *xport, struct flow *flow, struct flow_wildcards *wc, -@@ -4119,9 +4271,7 @@ terminate_native_tunnel(struct xlate_ctx *ctx, const struct xport *xport, +@@ -4119,9 +4273,7 @@ terminate_native_tunnel(struct xlate_ctx *ctx, const struct xport *xport, /* If no tunnel port was found and it's about an ARP or ICMPv6 packet, * do tunnel neighbor snooping. */ if (*tnl_port == ODPP_NONE && @@ -63965,7 +107856,7 @@ index 578cbfe581..c0d1cfa94b 100644 tnl_neigh_snoop(flow, wc, ctx->xbridge->name, ctx->xin->allow_side_effects); } else if (*tnl_port != ODPP_NONE && -@@ -4151,7 +4301,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4151,7 +4303,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port); struct flow_wildcards *wc = ctx->wc; struct flow *flow = &ctx->xin->flow; @@ -63974,7 +107865,7 @@ index 578cbfe581..c0d1cfa94b 100644 union flow_vlan_hdr flow_vlans[FLOW_MAX_VLAN_HEADERS]; uint8_t flow_nw_tos; odp_port_t out_port, odp_port, odp_tnl_port; -@@ -4165,7 +4315,6 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4165,7 +4317,6 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, /* If 'struct flow' gets additional metadata, we'll need to zero it out * before traversing a patch port. */ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42); @@ -63982,7 +107873,7 @@ index 578cbfe581..c0d1cfa94b 100644 if (!check_output_prerequisites(ctx, xport, flow, check_stp)) { return; -@@ -4176,6 +4325,10 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4176,6 +4327,10 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, if (xport->pt_mode == NETDEV_PT_LEGACY_L3) { flow->packet_type = PACKET_TYPE_BE(OFPHTN_ETHERTYPE, ntohs(flow->dl_type)); @@ -63993,7 +107884,7 @@ index 578cbfe581..c0d1cfa94b 100644 } } -@@ -4205,7 +4358,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4205,7 +4360,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, * the Logical (tunnel) Port are not visible for any further * matches, while explicit set actions on tunnel metadata are. */ @@ -64002,7 +107893,7 @@ index 578cbfe581..c0d1cfa94b 100644 odp_port = tnl_port_send(xport->ofport, flow, ctx->wc); if (odp_port == ODPP_NONE) { xlate_report(ctx, OFT_WARN, "Tunneling decided against output"); -@@ -4236,7 +4389,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4236,7 +4391,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, tnl_type = tnl_port_get_type(xport->ofport); commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions, tnl_type); @@ -64011,7 +107902,7 @@ index 578cbfe581..c0d1cfa94b 100644 } } else { odp_port = xport->odp_port; -@@ -4280,7 +4433,8 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4280,7 +4435,8 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, /* Output to native tunnel port. */ native_tunnel_output(ctx, xport, flow, odp_port, truncate, is_last_action); @@ -64021,7 +107912,7 @@ index 578cbfe581..c0d1cfa94b 100644 } else if (terminate_native_tunnel(ctx, xport, flow, wc, &odp_tnl_port)) { -@@ -4323,7 +4477,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4323,7 +4479,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, xport->xbundle)); } @@ -64030,7 +107921,7 @@ index 578cbfe581..c0d1cfa94b 100644 /* Restore flow */ memcpy(flow->vlans, flow_vlans, sizeof flow->vlans); flow->nw_tos = flow_nw_tos; -@@ -4331,6 +4485,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, +@@ -4331,6 +4487,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, flow->dl_src = flow_dl_src; flow->packet_type = flow_packet_type; flow->dl_type = flow_dl_type; @@ -64038,7 +107929,7 @@ index 578cbfe581..c0d1cfa94b 100644 } static void -@@ -4678,7 +4833,7 @@ pick_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group) +@@ -4678,7 +4835,7 @@ pick_dp_hash_select_group(struct xlate_ctx *ctx, struct group_dpif *group) for (int i = 0; i <= hash_mask; i++) { struct ofputil_bucket *b = group->hash_map[(dp_hash + i) & hash_mask]; @@ -64047,7 +107938,7 @@ index 578cbfe581..c0d1cfa94b 100644 return b; } } -@@ -5120,6 +5275,7 @@ compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids) +@@ -5120,6 +5277,7 @@ compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids) } ctx->wc->masks.nw_ttl = 0xff; @@ -64055,7 +107946,7 @@ index 578cbfe581..c0d1cfa94b 100644 if (flow->nw_ttl > 1) { flow->nw_ttl--; return false; -@@ -5308,15 +5464,15 @@ xlate_output_reg_action(struct xlate_ctx *ctx, +@@ -5308,15 +5466,15 @@ xlate_output_reg_action(struct xlate_ctx *ctx, { uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow); if (port <= UINT16_MAX) { @@ -64076,7 +107967,7 @@ index 578cbfe581..c0d1cfa94b 100644 } else { xlate_report(ctx, OFT_WARN, "output port %"PRIu64" is out of range", port); -@@ -5525,8 +5681,16 @@ xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn) +@@ -5525,8 +5683,16 @@ xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn) if (!error) { bool success = true; if (ctx->xin->allow_side_effects) { @@ -64094,7 +107985,7 @@ index 578cbfe581..c0d1cfa94b 100644 } else if (learn->limit) { if (!ofm->temp_rule || ofm->temp_rule->state != RULE_INSERTED) { -@@ -5622,7 +5786,8 @@ xlate_sample_action(struct xlate_ctx *ctx, +@@ -5622,7 +5788,8 @@ xlate_sample_action(struct xlate_ctx *ctx, /* Scale the probability from 16-bit to 32-bit while representing * the same percentage. */ @@ -64104,7 +107995,7 @@ index 578cbfe581..c0d1cfa94b 100644 /* If ofp_port in flow sample action is equel to ofp_port, * this sample action is a input port action. */ -@@ -5656,13 +5821,15 @@ xlate_sample_action(struct xlate_ctx *ctx, +@@ -5656,13 +5823,15 @@ xlate_sample_action(struct xlate_ctx *ctx, struct flow *flow = &ctx->xin->flow; tnl_port_send(xport->ofport, flow, ctx->wc); if (!ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) { @@ -64122,7 +108013,7 @@ index 578cbfe581..c0d1cfa94b 100644 } } else { xlate_report_error(ctx, -@@ -5772,21 +5939,12 @@ clone_xlate_actions(const struct ofpact *actions, size_t actions_len, +@@ -5772,21 +5941,12 @@ clone_xlate_actions(const struct ofpact *actions, size_t actions_len, struct xlate_ctx *ctx, bool is_last_action, bool group_bucket_action OVS_UNUSED) { @@ -64147,7 +108038,7 @@ index 578cbfe581..c0d1cfa94b 100644 do_xlate_actions(actions, actions_len, ctx, is_last_action, false); if (!ctx->freezing) { xlate_action_set(ctx); -@@ -5801,7 +5959,8 @@ clone_xlate_actions(const struct ofpact *actions, size_t actions_len, +@@ -5801,7 +5961,8 @@ clone_xlate_actions(const struct ofpact *actions, size_t actions_len, * avoid emitting those actions twice. Once inside * the clone, another time for the action after clone. */ xlate_commit_actions(ctx); @@ -64157,7 +108048,7 @@ index 578cbfe581..c0d1cfa94b 100644 bool old_was_mpls = ctx->was_mpls; bool old_conntracked = ctx->conntracked; -@@ -5858,14 +6017,10 @@ dp_clone_done: +@@ -5858,14 +6019,10 @@ dp_clone_done: ctx->was_mpls = old_was_mpls; /* Restore the 'base_flow' for the next action. */ @@ -64174,7 +108065,7 @@ index 578cbfe581..c0d1cfa94b 100644 } static void -@@ -6241,8 +6396,8 @@ compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc, +@@ -6241,8 +6398,8 @@ compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc, { uint16_t zone; if (ofc->zone_src.field) { @@ -64185,7 +108076,7 @@ index 578cbfe581..c0d1cfa94b 100644 zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow); if (ctx->xin->frozen_state) { -@@ -6252,12 +6407,13 @@ compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc, +@@ -6252,12 +6409,13 @@ compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc, * which will invalidate the megaflow with old the recirc_id. */ if (!mf_is_frozen_metadata(ofc->zone_src.field)) { @@ -64201,7 +108092,7 @@ index 578cbfe581..c0d1cfa94b 100644 } else { zone = ofc->zone_imm; } -@@ -6347,16 +6503,16 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, +@@ -6347,16 +6505,16 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, const struct ofpact *remaining_acts, size_t remaining_acts_len) { @@ -64222,7 +108113,7 @@ index 578cbfe581..c0d1cfa94b 100644 &ctx->xin->flow); /* If datapath doesn't support check_pkt_len action, then set the * SLOW_ACTION flag. If we don't set SLOW_ACTION, we -@@ -6366,22 +6522,17 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, +@@ -6366,22 +6524,17 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, * the packet length. This results in wrong actions being applied. */ ctx->xout->slow |= SLOW_ACTION; @@ -64250,7 +108141,7 @@ index 578cbfe581..c0d1cfa94b 100644 bool old_was_mpls = ctx->was_mpls; bool old_conntracked = ctx->conntracked; -@@ -6391,8 +6542,8 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, +@@ -6391,8 +6544,8 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, check_pkt_larger->pkt_len); size_t offset_attr = nl_msg_start_nested( ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER); @@ -64261,7 +108152,7 @@ index 578cbfe581..c0d1cfa94b 100644 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false); if (!ctx->freezing) { xlate_action_set(ctx); -@@ -6402,10 +6553,10 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, +@@ -6402,10 +6555,10 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, } nl_msg_end_nested(ctx->odp_actions, offset_attr); @@ -64274,7 +108165,7 @@ index 578cbfe581..c0d1cfa94b 100644 /* If the flow translation for the IF_GREATER case requires freezing, * then ctx->exit would be true. Reset to false so that we can -@@ -6416,8 +6567,8 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, +@@ -6416,8 +6569,8 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, offset_attr = nl_msg_start_nested( ctx->odp_actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL); @@ -64285,7 +108176,7 @@ index 578cbfe581..c0d1cfa94b 100644 do_xlate_actions(remaining_acts, remaining_acts_len, ctx, true, false); if (!ctx->freezing) { xlate_action_set(ctx); -@@ -6428,15 +6579,12 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, +@@ -6428,15 +6581,12 @@ xlate_check_pkt_larger(struct xlate_ctx *ctx, nl_msg_end_nested(ctx->odp_actions, offset_attr); nl_msg_end_nested(ctx->odp_actions, offset); @@ -64304,7 +108195,7 @@ index 578cbfe581..c0d1cfa94b 100644 } static void -@@ -6887,6 +7035,107 @@ xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx, +@@ -6887,6 +7037,107 @@ xlate_ofpact_unroll_xlate(struct xlate_ctx *ctx, "cookie=%#"PRIx64, a->rule_table_id, a->rule_cookie); } @@ -64412,7 +108303,7 @@ index 578cbfe581..c0d1cfa94b 100644 static void do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, struct xlate_ctx *ctx, bool is_last_action, -@@ -6928,6 +7177,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, +@@ -6928,6 +7179,8 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; } @@ -64421,7 +108312,7 @@ index 578cbfe581..c0d1cfa94b 100644 if (OVS_UNLIKELY(ctx->xin->trace)) { struct ds s = DS_EMPTY_INITIALIZER; struct ofpact_format_params fp = { .s = &s }; -@@ -7027,6 +7278,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, +@@ -7027,6 +7280,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, case OFPACT_SET_IPV4_SRC: if (flow->dl_type == htons(ETH_TYPE_IP)) { memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src); @@ -64429,7 +108320,7 @@ index 578cbfe581..c0d1cfa94b 100644 flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4; } break; -@@ -7034,12 +7286,14 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, +@@ -7034,12 +7288,14 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, case OFPACT_SET_IPV4_DST: if (flow->dl_type == htons(ETH_TYPE_IP)) { memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst); @@ -64444,7 +108335,7 @@ index 578cbfe581..c0d1cfa94b 100644 wc->masks.nw_tos |= IP_DSCP_MASK; flow->nw_tos &= ~IP_DSCP_MASK; flow->nw_tos |= ofpact_get_SET_IP_DSCP(a)->dscp; -@@ -7048,6 +7302,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, +@@ -7048,6 +7304,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, case OFPACT_SET_IP_ECN: if (is_ip_any(flow)) { @@ -64452,7 +108343,7 @@ index 578cbfe581..c0d1cfa94b 100644 wc->masks.nw_tos |= IP_ECN_MASK; flow->nw_tos &= ~IP_ECN_MASK; flow->nw_tos |= ofpact_get_SET_IP_ECN(a)->ecn; -@@ -7056,6 +7311,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, +@@ -7056,6 +7313,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, case OFPACT_SET_IP_TTL: if (is_ip_any(flow)) { @@ -64460,7 +108351,7 @@ index 578cbfe581..c0d1cfa94b 100644 wc->masks.nw_ttl = 0xff; flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl; } -@@ -7123,6 +7379,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, +@@ -7123,6 +7381,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, /* Set the field only if the packet actually has it. */ if (mf_are_prereqs_ok(mf, flow, wc)) { @@ -64468,7 +108359,7 @@ index 578cbfe581..c0d1cfa94b 100644 mf_mask_field_masked(mf, ofpact_set_field_mask(set_field), wc); mf_set_flow_value_masked(mf, set_field->value, ofpact_set_field_mask(set_field), -@@ -7179,6 +7436,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, +@@ -7179,6 +7438,7 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, case OFPACT_DEC_TTL: wc->masks.nw_ttl = 0xff; @@ -64476,7 +108367,7 @@ index 578cbfe581..c0d1cfa94b 100644 if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) { return; } -@@ -7609,6 +7867,43 @@ xlate_wc_finish(struct xlate_ctx *ctx) +@@ -7609,6 +7869,43 @@ xlate_wc_finish(struct xlate_ctx *ctx) ctx->wc->masks.vlans[i].tci = 0; } } @@ -64520,7 +108411,7 @@ index 578cbfe581..c0d1cfa94b 100644 } /* Translates the flow, actions, or rule in 'xin' into datapath actions in -@@ -7784,6 +8079,12 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) +@@ -7784,6 +8081,12 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout) goto exit; } @@ -64533,7 +108424,7 @@ index 578cbfe581..c0d1cfa94b 100644 /* Tunnel metadata in udpif format must be normalized before translation. */ if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) { const struct tun_table *tun_tab = ofproto_get_tun_tab( -@@ -8030,6 +8331,10 @@ exit: +@@ -8030,6 +8333,10 @@ exit: if (xin->odp_actions) { ofpbuf_clear(xin->odp_actions); } @@ -65367,6 +109258,18 @@ index b0262da2df..fa7973ac72 100644 #ifdef __cplusplus } #endif +diff --git a/ofproto/tunnel.c b/ofproto/tunnel.c +index 3455ed233b..80ddee78ac 100644 +--- a/ofproto/tunnel.c ++++ b/ofproto/tunnel.c +@@ -432,6 +432,7 @@ tnl_port_send(const struct ofport_dpif *ofport, struct flow *flow, + flow->tunnel.ipv6_dst = in6addr_any; + } + } ++ flow->tunnel.tp_src = 0; /* Do not carry from a previous tunnel. */ + flow->tunnel.tp_dst = cfg->dst_port; + if (!cfg->out_key_flow) { + flow->tunnel.tun_id = cfg->out_key; diff --git a/ovsdb/.gitignore b/ovsdb/.gitignore index fbcefafc6e..a4f9d38f11 100644 --- a/ovsdb/.gitignore @@ -65499,7 +109402,7 @@ index 9f44007d97..b1ef2774ea 100644 break; } diff --git a/ovsdb/jsonrpc-server.c b/ovsdb/jsonrpc-server.c -index 351c39d8aa..17868f5b72 100644 +index 351c39d8aa..5d10f54293 100644 --- a/ovsdb/jsonrpc-server.c +++ b/ovsdb/jsonrpc-server.c @@ -197,9 +197,9 @@ ovsdb_jsonrpc_server_remove_db(struct ovsdb_jsonrpc_server *svr, @@ -65514,7 +109417,15 @@ index 351c39d8aa..17868f5b72 100644 ovsdb_jsonrpc_server_del_remote(node); } shash_destroy(&svr->remotes); -@@ -227,9 +227,9 @@ void +@@ -215,6 +215,7 @@ ovsdb_jsonrpc_default_options(const char *target) + options->probe_interval = (stream_or_pstream_needs_probes(target) + ? RECONNECT_DEFAULT_PROBE_INTERVAL + : 0); ++ options->dscp = DSCP_DEFAULT; + return options; + } + +@@ -227,9 +228,9 @@ void ovsdb_jsonrpc_server_set_remotes(struct ovsdb_jsonrpc_server *svr, const struct shash *new_remotes) { @@ -65526,7 +109437,7 @@ index 351c39d8aa..17868f5b72 100644 struct ovsdb_jsonrpc_remote *remote = node->data; struct ovsdb_jsonrpc_options *options = shash_find_data(new_remotes, node->name); -@@ -267,25 +267,36 @@ ovsdb_jsonrpc_server_add_remote(struct ovsdb_jsonrpc_server *svr, +@@ -267,25 +268,36 @@ ovsdb_jsonrpc_server_add_remote(struct ovsdb_jsonrpc_server *svr, int error; error = jsonrpc_pstream_open(name, &listener, options->dscp); @@ -65579,7 +109490,7 @@ index 351c39d8aa..17868f5b72 100644 } static void -@@ -585,9 +596,9 @@ ovsdb_jsonrpc_session_set_options(struct ovsdb_jsonrpc_session *session, +@@ -585,9 +597,9 @@ ovsdb_jsonrpc_session_set_options(struct ovsdb_jsonrpc_session *session, static void ovsdb_jsonrpc_session_run_all(struct ovsdb_jsonrpc_remote *remote) { @@ -65591,7 +109502,7 @@ index 351c39d8aa..17868f5b72 100644 int error = ovsdb_jsonrpc_session_run(s); if (error) { ovsdb_jsonrpc_session_close(s); -@@ -642,9 +653,9 @@ ovsdb_jsonrpc_session_get_memory_usage_all( +@@ -642,9 +654,9 @@ ovsdb_jsonrpc_session_get_memory_usage_all( static void ovsdb_jsonrpc_session_close_all(struct ovsdb_jsonrpc_remote *remote) { @@ -65603,7 +109514,7 @@ index 351c39d8aa..17868f5b72 100644 ovsdb_jsonrpc_session_close(s); } } -@@ -660,9 +671,9 @@ static void +@@ -660,9 +672,9 @@ static void ovsdb_jsonrpc_session_reconnect_all(struct ovsdb_jsonrpc_remote *remote, bool force, const char *comment) { @@ -65615,7 +109526,7 @@ index 351c39d8aa..17868f5b72 100644 if (force || !s->db_change_aware) { jsonrpc_session_force_reconnect(s->js); if (comment && jsonrpc_session_is_connected(s->js)) { -@@ -909,9 +920,9 @@ error: +@@ -909,9 +921,9 @@ error: static void ovsdb_jsonrpc_session_unlock_all(struct ovsdb_jsonrpc_session *s) { @@ -65627,7 +109538,7 @@ index 351c39d8aa..17868f5b72 100644 ovsdb_jsonrpc_session_unlock__(waiter); } } -@@ -1198,8 +1209,8 @@ static void +@@ -1198,8 +1210,8 @@ static void ovsdb_jsonrpc_trigger_remove__(struct ovsdb_jsonrpc_session *s, struct ovsdb *db) { @@ -65638,7 +109549,7 @@ index 351c39d8aa..17868f5b72 100644 if (!db || t->trigger.db == db) { ovsdb_jsonrpc_trigger_complete(t); } -@@ -1226,8 +1237,8 @@ ovsdb_jsonrpc_trigger_complete_all(struct ovsdb_jsonrpc_session *s) +@@ -1226,8 +1238,8 @@ ovsdb_jsonrpc_trigger_complete_all(struct ovsdb_jsonrpc_session *s) static void ovsdb_jsonrpc_trigger_complete_done(struct ovsdb_jsonrpc_session *s) { @@ -65649,7 +109560,7 @@ index 351c39d8aa..17868f5b72 100644 ovsdb_jsonrpc_trigger_complete(trigger); } } -@@ -1688,8 +1699,8 @@ ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *s, +@@ -1688,8 +1700,8 @@ ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *s, { ovs_assert(db); @@ -65660,7 +109571,7 @@ index 351c39d8aa..17868f5b72 100644 if (m->db == db) { ovsdb_jsonrpc_monitor_destroy(m, true); } -@@ -1700,9 +1711,9 @@ ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *s, +@@ -1700,9 +1712,9 @@ ovsdb_jsonrpc_monitor_preremove_db(struct ovsdb_jsonrpc_session *s, static void ovsdb_jsonrpc_monitor_remove_all(struct ovsdb_jsonrpc_session *s) { @@ -67277,7 +111188,7 @@ index d15f2f1d6d..963e937957 100644 } } diff --git a/ovsdb/transaction.c b/ovsdb/transaction.c -index db86d847c3..8eafefa4bf 100644 +index db86d847c3..fe33ff4835 100644 --- a/ovsdb/transaction.c +++ b/ovsdb/transaction.c @@ -159,15 +159,15 @@ ovsdb_txn_row_abort(struct ovsdb_txn *txn OVS_UNUSED, @@ -67299,7 +111210,17 @@ index db86d847c3..8eafefa4bf 100644 ovs_list_remove(&weak->src_node); ovs_list_init(&weak->src_node); if (hmap_node_is_null(&weak->dst_node)) { -@@ -508,11 +508,11 @@ static struct ovsdb_error * +@@ -314,7 +314,8 @@ update_row_ref_count(struct ovsdb_txn *txn, struct ovsdb_txn_row *r) + const struct ovsdb_column *column = node->data; + struct ovsdb_error *error; + +- if (bitmap_is_set(r->changed, column->index)) { ++ if (bitmap_is_set(r->changed, column->index) ++ && ovsdb_type_has_strong_refs(&column->type)) { + if (r->old && !r->new) { + error = ovsdb_txn_adjust_row_refs( + txn, r->old, column, +@@ -508,11 +509,11 @@ static struct ovsdb_error * ovsdb_txn_update_weak_refs(struct ovsdb_txn *txn OVS_UNUSED, struct ovsdb_txn_row *txn_row) { @@ -67313,7 +111234,7 @@ index db86d847c3..8eafefa4bf 100644 dst_row = CONST_CAST(struct ovsdb_row *, ovsdb_table_get_row(weak->dst_table, &weak->dst)); if (dst_row) { -@@ -529,7 +529,7 @@ ovsdb_txn_update_weak_refs(struct ovsdb_txn *txn OVS_UNUSED, +@@ -529,7 +530,7 @@ ovsdb_txn_update_weak_refs(struct ovsdb_txn *txn OVS_UNUSED, } /* Insert the weak references added in the new version of the row. */ @@ -67322,7 +111243,7 @@ index db86d847c3..8eafefa4bf 100644 dst_row = CONST_CAST(struct ovsdb_row *, ovsdb_table_get_row(weak->dst_table, &weak->dst)); -@@ -544,7 +544,7 @@ ovsdb_txn_update_weak_refs(struct ovsdb_txn *txn OVS_UNUSED, +@@ -544,7 +545,7 @@ ovsdb_txn_update_weak_refs(struct ovsdb_txn *txn OVS_UNUSED, } static void @@ -67331,7 +111252,7 @@ index db86d847c3..8eafefa4bf 100644 struct ovs_list *ref_list, const union ovsdb_atom *key, const union ovsdb_atom *value, bool by_key, const struct ovsdb_column *column) -@@ -552,13 +552,13 @@ add_weak_ref(struct ovsdb_txn_row *txn_row, const struct ovsdb_row *dst_, +@@ -552,13 +553,13 @@ add_weak_ref(struct ovsdb_txn_row *txn_row, const struct ovsdb_row *dst_, struct ovsdb_row *dst = CONST_CAST(struct ovsdb_row *, dst_); struct ovsdb_weak_ref *weak; @@ -67348,7 +111269,7 @@ index db86d847c3..8eafefa4bf 100644 weak->dst_table = dst->table; weak->dst = *ovsdb_row_get_uuid(dst); ovsdb_type_clone(&weak->type, &column->type); -@@ -573,7 +573,7 @@ add_weak_ref(struct ovsdb_txn_row *txn_row, const struct ovsdb_row *dst_, +@@ -573,7 +574,7 @@ add_weak_ref(struct ovsdb_txn_row *txn_row, const struct ovsdb_row *dst_, } static void @@ -67357,7 +111278,7 @@ index db86d847c3..8eafefa4bf 100644 const union ovsdb_atom *key, const union ovsdb_atom *value, const struct ovsdb_column *column, -@@ -585,7 +585,7 @@ find_and_add_weak_ref(struct ovsdb_txn_row *txn_row, +@@ -585,7 +586,7 @@ find_and_add_weak_ref(struct ovsdb_txn_row *txn_row, : ovsdb_table_get_row(column->type.value.uuid.refTable, &value->uuid); if (row) { @@ -67366,7 +111287,7 @@ index db86d847c3..8eafefa4bf 100644 } else if (not_found) { if (uuid_is_zero(by_key ? &key->uuid : &value->uuid)) { *zero = true; -@@ -594,11 +594,36 @@ find_and_add_weak_ref(struct ovsdb_txn_row *txn_row, +@@ -594,11 +595,36 @@ find_and_add_weak_ref(struct ovsdb_txn_row *txn_row, } } @@ -67405,7 +111326,7 @@ index db86d847c3..8eafefa4bf 100644 struct shash_node *node; if (txn_row->old && !txn_row->new) { -@@ -620,6 +645,15 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) +@@ -620,6 +646,15 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) ovs_assert(ovs_list_is_empty(&weak->src_node)); ovs_list_insert(&src_txn_row->deleted_refs, &weak->src_node); } @@ -67421,7 +111342,7 @@ index db86d847c3..8eafefa4bf 100644 } if (!txn_row->new) { -@@ -630,19 +664,18 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) +@@ -630,19 +665,22 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) return NULL; } @@ -67434,6 +111355,10 @@ index db86d847c3..8eafefa4bf 100644 + unsigned int orig_n; bool zero = false; ++ if (!ovsdb_type_has_weak_refs(&column->type)) { ++ continue; ++ } ++ orig_n = datum->n; /* Collecting all key-value pairs that references deleted rows. */ @@ -67443,7 +111368,29 @@ index db86d847c3..8eafefa4bf 100644 if (column->index == weak->column_idx) { ovsdb_datum_add_unsafe(&deleted_refs, &weak->key, &weak->value, &column->type, NULL); -@@ -670,23 +703,8 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) +@@ -659,34 +697,23 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) + ovsdb_datum_destroy(&deleted_refs, &column->type); + + /* Generating the difference between old and new data. */ +- if (txn_row->old) { +- ovsdb_datum_added_removed(&added, &removed, +- &txn_row->old->fields[column->index], +- datum, &column->type); +- } else { +- ovsdb_datum_init_empty(&removed); +- ovsdb_datum_clone(&added, datum, &column->type); ++ ovsdb_datum_init_empty(&added); ++ ovsdb_datum_init_empty(&removed); ++ if (datum->n != orig_n ++ || bitmap_is_set(txn_row->changed, column->index)) { ++ if (txn_row->old) { ++ ovsdb_datum_added_removed(&added, &removed, ++ &txn_row->old->fields[column->index], ++ datum, &column->type); ++ } else { ++ ovsdb_datum_clone(&added, datum, &column->type); ++ } + } /* Checking added data and creating new references. */ ovsdb_datum_init_empty(&deleted_refs); @@ -67469,7 +111416,7 @@ index db86d847c3..8eafefa4bf 100644 if (deleted_refs.n) { /* Removing all the references that doesn't point to valid rows. */ ovsdb_datum_sort_unique(&deleted_refs, column->type.key.type, -@@ -700,24 +718,8 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) +@@ -700,24 +727,8 @@ assess_weak_refs(struct ovsdb_txn *txn, struct ovsdb_txn_row *txn_row) /* Creating refs that needs to be removed on commit. This includes * both: the references that got directly removed from the datum and * references removed due to deletion of a referenced row. */ @@ -67496,7 +111443,7 @@ index db86d847c3..8eafefa4bf 100644 ovsdb_datum_destroy(&removed, &column->type); if (datum->n != orig_n) { -@@ -1094,10 +1096,10 @@ static void +@@ -1094,10 +1105,10 @@ static void ovsdb_txn_destroy_cloned(struct ovsdb_txn *txn) { ovs_assert(!txn->db); @@ -67511,7 +111458,7 @@ index db86d847c3..8eafefa4bf 100644 if (r->old) { ovsdb_row_destroy(r->old); } -@@ -1189,7 +1191,7 @@ ovsdb_txn_precheck_prereq(const struct ovsdb *db) +@@ -1189,7 +1200,7 @@ ovsdb_txn_precheck_prereq(const struct ovsdb *db) struct ovsdb_txn_progress * ovsdb_txn_propose_schema_change(struct ovsdb *db, @@ -67520,7 +111467,7 @@ index db86d847c3..8eafefa4bf 100644 const struct json *data) { struct ovsdb_txn_progress *progress = xzalloc(sizeof *progress); -@@ -1549,19 +1551,19 @@ for_each_txn_row(struct ovsdb_txn *txn, +@@ -1549,19 +1560,19 @@ for_each_txn_row(struct ovsdb_txn *txn, serial++; do { @@ -67544,7 +111491,7 @@ index db86d847c3..8eafefa4bf 100644 if (r->serial != serial) { struct ovsdb_error *error; -@@ -1629,8 +1631,8 @@ ovsdb_txn_history_destroy(struct ovsdb *db) +@@ -1629,8 +1640,8 @@ ovsdb_txn_history_destroy(struct ovsdb *db) return; } @@ -67577,7 +111524,7 @@ index 6b5bb7f24b..9991f34d24 100644 bool ovsdb_txn_progress_is_complete(const struct ovsdb_txn_progress *); const struct ovsdb_error *ovsdb_txn_progress_get_error( diff --git a/ovsdb/trigger.c b/ovsdb/trigger.c -index 726c138bf0..3a693855b9 100644 +index 726c138bf0..ab78d475c8 100644 --- a/ovsdb/trigger.c +++ b/ovsdb/trigger.c @@ -146,14 +146,14 @@ ovsdb_trigger_prereplace_db(struct ovsdb_trigger *trigger) @@ -67597,7 +111544,22 @@ index 726c138bf0..3a693855b9 100644 if (run_triggers || now - t->created >= t->timeout_msec || t->progress || t->txn_forward) { -@@ -274,8 +274,8 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now) +@@ -252,6 +252,14 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now) + return false; + } + ++ if (t->read_only) { ++ trigger_convert_error( ++ t, ovsdb_error("not allowed", "conversion is not allowed " ++ "for read-only database %s", ++ t->db->schema->name)); ++ return false; ++ } ++ + /* Validate parameters. */ + const struct json *params = t->request->params; + if (params->type != JSON_ARRAY || params->array.n != 2) { +@@ -274,8 +282,8 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now) if (!error) { error = ovsdb_convert(t->db, new_schema, &newdb); } @@ -67607,7 +111569,7 @@ index 726c138bf0..3a693855b9 100644 trigger_convert_error(t, error); return false; } -@@ -286,7 +286,8 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now) +@@ -286,7 +294,8 @@ ovsdb_trigger_try(struct ovsdb_trigger *t, long long int now) /* Propose the change. */ t->progress = ovsdb_txn_propose_schema_change( @@ -67787,7 +111749,7 @@ index ef7bb4b8ee..c36a140a8e 100644 -#endif } diff --git a/python/ovs/db/idl.py b/python/ovs/db/idl.py -index 4ecdcaa197..1aa0c33d13 100644 +index 4ecdcaa197..64bf8afdcc 100644 --- a/python/ovs/db/idl.py +++ b/python/ovs/db/idl.py @@ -85,9 +85,9 @@ class Monitor(enum.IntEnum): @@ -67859,7 +111821,15 @@ index 4ecdcaa197..1aa0c33d13 100644 self.readonly = schema.readonly self._db = schema remotes = self._parse_remotes(remote) -@@ -282,15 +323,6 @@ class Idl(object): +@@ -258,6 +299,7 @@ class Idl(object): + self._server_schema_request_id = None + self._server_monitor_request_id = None + self._db_change_aware_request_id = None ++ self._monitor_cancel_request_id = None + self._server_db_name = '_Server' + self._server_db_table = 'Database' + self.server_tables = None +@@ -282,15 +324,6 @@ class Idl(object): self.cond_changed = False self.cond_seqno = 0 @@ -67875,7 +111845,7 @@ index 4ecdcaa197..1aa0c33d13 100644 def _parse_remotes(self, remote): # If remote is - # "tcp:10.0.0.1:6641,unix:/tmp/db.sock,t,s,tcp:10.0.0.2:6642" -@@ -330,7 +362,7 @@ class Idl(object): +@@ -330,7 +363,7 @@ class Idl(object): def ack_conditions(self): """Mark all requested table conditions as acked""" for table in self.tables.values(): @@ -67884,7 +111854,7 @@ index 4ecdcaa197..1aa0c33d13 100644 def sync_conditions(self): """Synchronize condition state when the FSM is restarted -@@ -356,14 +388,17 @@ class Idl(object): +@@ -356,14 +389,17 @@ class Idl(object): flushing the local cached DB contents. """ ack_all = self.last_id == str(uuid.UUID(int=0)) @@ -67906,7 +111876,18 @@ index 4ecdcaa197..1aa0c33d13 100644 def restart_fsm(self): # Resync data DB table conditions to avoid missing updated due to -@@ -459,6 +494,7 @@ class Idl(object): +@@ -446,6 +482,10 @@ class Idl(object): + break + else: + self.__parse_update(msg.params[1], OVSDB_UPDATE) ++ elif self.handle_monitor_canceled(msg): ++ break ++ elif self.handle_monitor_cancel_reply(msg): ++ break + elif (msg.type == ovs.jsonrpc.Message.T_REPLY + and self._monitor_request_id is not None + and self._monitor_request_id == msg.id): +@@ -459,6 +499,7 @@ class Idl(object): if not msg.result[0]: self.__clear() self.__parse_update(msg.result[2], OVSDB_UPDATE3) @@ -67914,7 +111895,7 @@ index 4ecdcaa197..1aa0c33d13 100644 elif self.state == self.IDL_S_DATA_MONITOR_COND_REQUESTED: self.__clear() self.__parse_update(msg.result, OVSDB_UPDATE2) -@@ -482,7 +518,7 @@ class Idl(object): +@@ -482,7 +523,7 @@ class Idl(object): sh.register_table(self._server_db_table) schema = sh.get_idl_schema() self._server_db = schema @@ -67923,7 +111904,41 @@ index 4ecdcaa197..1aa0c33d13 100644 self.__send_server_monitor_request() except error.Error as e: vlog.err("%s: error receiving server schema: %s" -@@ -588,10 +624,10 @@ class Idl(object): +@@ -580,6 +621,33 @@ class Idl(object): + + return initial_change_seqno != self.change_seqno + ++ def handle_monitor_canceled(self, msg): ++ if msg.type != msg.T_NOTIFY: ++ return False ++ if msg.method != "monitor_canceled": ++ return False ++ ++ if msg.params[0] == str(self.uuid): ++ params = [str(self.server_monitor_uuid)] ++ elif msg.params[0] == str(self.server_monitor_uuid): ++ params = [str(self.uuid)] ++ else: ++ return False ++ ++ mc_msg = ovs.jsonrpc.Message.create_request("monitor_cancel", params) ++ self._monitor_cancel_request_id = mc_msg.id ++ self.send_request(mc_msg) ++ self.restart_fsm() ++ return True ++ ++ def handle_monitor_cancel_reply(self, msg): ++ if msg.type != msg.T_REPLY: ++ return False ++ if msg.id != self._monitor_cancel_request_id: ++ return False ++ self._monitor_cancel_request_id = None ++ return True ++ + def compose_cond_change(self): + if not self.cond_changed: + return +@@ -588,10 +656,10 @@ class Idl(object): for table in self.tables.values(): # Always use the most recent conditions set by the IDL client when # requesting monitor_cond_change @@ -67937,7 +111952,7 @@ index 4ecdcaa197..1aa0c33d13 100644 if not change_requests: return -@@ -627,19 +663,20 @@ class Idl(object): +@@ -627,19 +695,20 @@ class Idl(object): cond = [False] # Compare the new condition to the last known condition @@ -67963,7 +111978,7 @@ index 4ecdcaa197..1aa0c33d13 100644 def wait(self, poller): """Arranges for poller.block() to wake up when self.run() has something -@@ -811,8 +848,8 @@ class Idl(object): +@@ -811,8 +880,8 @@ class Idl(object): columns.append(column) monitor_request = {"columns": columns} if method in ("monitor_cond", "monitor_cond_since") and ( @@ -67974,7 +111989,7 @@ index 4ecdcaa197..1aa0c33d13 100644 monitor_requests[table.name] = [monitor_request] args = [self._db.name, str(self.uuid), monitor_requests] -@@ -1148,13 +1185,6 @@ class Idl(object): +@@ -1148,13 +1217,6 @@ class Idl(object): return True @@ -67988,7 +112003,7 @@ index 4ecdcaa197..1aa0c33d13 100644 def _row_to_uuid(value): if isinstance(value, Row): return value.uuid -@@ -1266,7 +1296,19 @@ class Row(object): +@@ -1266,7 +1328,19 @@ class Row(object): return "{table}({data})".format( table=self._table.name, data=", ".join("{col}={val}".format(col=c, val=getattr(self, c)) @@ -68009,7 +112024,7 @@ index 4ecdcaa197..1aa0c33d13 100644 def __getattr__(self, column_name): assert self._changes is not None -@@ -1309,7 +1351,7 @@ class Row(object): +@@ -1309,7 +1383,7 @@ class Row(object): datum = data.Datum.from_python(column.type, dlist, _row_to_uuid) elif column.type.is_map(): @@ -68018,7 +112033,7 @@ index 4ecdcaa197..1aa0c33d13 100644 if inserts is not None: dmap.update(inserts) if removes is not None: -@@ -1326,7 +1368,7 @@ class Row(object): +@@ -1326,7 +1400,7 @@ class Row(object): else: datum = inserts @@ -68027,7 +112042,7 @@ index 4ecdcaa197..1aa0c33d13 100644 def __setattr__(self, column_name, value): assert self._changes is not None -@@ -1410,7 +1452,7 @@ class Row(object): +@@ -1410,7 +1484,7 @@ class Row(object): if value: try: old_value = data.Datum.to_python(self._data[column_name], @@ -69678,7 +113693,7 @@ index 757cf7186e..77b3acddb8 100644 + +AT_CLEANUP diff --git a/tests/nsh.at b/tests/nsh.at -index 4d49f12017..6b7b6856f2 100644 +index 4d49f12017..107673589d 100644 --- a/tests/nsh.at +++ b/tests/nsh.at @@ -27,7 +27,7 @@ AT_CHECK([ @@ -69771,7 +113786,63 @@ index 4d49f12017..6b7b6856f2 100644 bridge("br0") ------------- -@@ -724,7 +724,7 @@ ovs-appctl time/warp 1000 +@@ -521,51 +521,45 @@ AT_CHECK([ + set interface vxlangpe32 type=vxlan options:exts=gpe options:remote_ip=30.0.0.2 options:packet_type=ptap ofport_request=3020 + + ovs-appctl netdev-dummy/ip4addr br-p1 10.0.0.1/24 +- ovs-appctl ovs/route/add 10.0.0.0/24 br-p1 + ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3 + + ovs-appctl netdev-dummy/ip4addr br-p2 20.0.0.2/24 +- ovs-appctl ovs/route/add 20.0.0.0/24 br-p2 + ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3 + + ovs-appctl netdev-dummy/ip4addr br-p3 30.0.0.3/24 +- ovs-appctl ovs/route/add 30.0.0.0/24 br-p3 + ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/add 10.0.0.0/24 br-p1 + ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/add 20.0.0.0/24 br-p2 + ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/add 30.0.0.0/24 br-p3 + ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1 + ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2 + ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3 + ], [0], [stdout]) + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User: ++ ovs-appctl ovs/route/show | grep Cached: | sort + ], [0], [dnl +-User: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 +-User: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 +-User: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 ++Cached: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 local ++Cached: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 local ++Cached: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 local + ]) + + AT_CHECK([ +@@ -724,7 +718,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -69780,7 +113851,7 @@ index 4d49f12017..6b7b6856f2 100644 tunnel(tun_id=0x0,src=30.0.0.1,dst=30.0.0.3,flags(-df-csum+key)),recirc_id(0),in_port(4789),packet_type(ns=1,id=0x894f),eth_type(0x894f),nsh(np=1,spi=0x3000,si=255), packets:1, bytes:108, used:0.0s, actions:pop_nsh(),recirc(0x1) tunnel(tun_id=0x0,src=30.0.0.1,dst=30.0.0.3,flags(-df-csum+key)),recirc_id(0x1),in_port(4789),packet_type(ns=1,id=0x800),eth_type(0x0800),ipv4(frag=no), packets:1, bytes:84, used:0.0s, actions:push_eth(src=00:00:00:00:00:00,dst=aa:55:aa:55:00:03),6 ]) -@@ -778,8 +778,8 @@ ovs-appctl time/warp 1000 +@@ -778,8 +772,8 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -69909,7 +113980,7 @@ index 2c7e163bd6..7be6628c34 100644 AT_CLEANUP diff --git a/tests/ofproto-dpif.at b/tests/ofproto-dpif.at -index 7c2edeb9d4..16c8af67ed 100644 +index 7c2edeb9d4..23f29ac8c3 100644 --- a/tests/ofproto-dpif.at +++ b/tests/ofproto-dpif.at @@ -29,6 +29,58 @@ AT_CHECK([ovs-appctl revalidator/wait]) @@ -70120,7 +114191,27 @@ index 7c2edeb9d4..16c8af67ed 100644 OVS_VSWITCHD_STOP AT_CLEANUP -@@ -498,6 +550,72 @@ AT_CHECK([sed -n '/member p2/,/^$/p' bond3.txt | grep 'hash'], [0], [ignore]) +@@ -495,9 +547,92 @@ ovs-appctl time/warp 1000 100 + ovs-appctl bond/show > bond3.txt + AT_CHECK([sed -n '/member p2/,/^$/p' bond3.txt | grep 'hash'], [0], [ignore]) + ++# Check that both ports doing down and back up doesn't break statistics. ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p1 down], 0, [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p2 down], 0, [OK ++]) ++ovs-appctl time/warp 1000 100 ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p1 up], 0, [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/set-admin-state p2 up], 0, [OK ++]) ++ovs-appctl time/warp 1000 100 ++ ++AT_CHECK([SEND_TCP_BOND_PKTS([p5], [5], [65500])]) ++# We sent 49125 KB of data total in 3 batches. No hash should have more ++# than that amount of load. Just checking that it is within 5 digits. ++AT_CHECK([ovs-appctl bond/show | grep -E '[[0-9]]{6}'], [1]) ++ OVS_VSWITCHD_STOP() AT_CLEANUP @@ -70193,7 +114284,7 @@ index 7c2edeb9d4..16c8af67ed 100644 # Makes sure recirculation does not change the way packet is handled. AT_SETUP([ofproto-dpif - balance-tcp bonding, different recirc flow ]) -@@ -570,7 +688,7 @@ table=1 in_port=2 priority=1500 icmp actions=output(17),resubmit(,2) +@@ -570,7 +705,7 @@ table=1 in_port=2 priority=1500 icmp actions=output(17),resubmit(,2) table=1 in_port=3 priority=1500 icmp actions=output(14),resubmit(,2) ]) AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) @@ -70202,7 +114293,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -1 stdout], [0], [Datapath actions: 10,11,12,13,14,15,16,17,18,19,20,21 ]) -@@ -584,7 +702,7 @@ echo "table=0 in_port=1 actions=output(10),goto_table(1)" > flows.txt +@@ -584,7 +719,7 @@ echo "table=0 in_port=1 actions=output(10),goto_table(1)" > flows.txt for i in `seq 1 63`; do echo "table=$i actions=goto_table($(($i+1)))"; done >> flows.txt echo "table=64 actions=output(11)" >> flows.txt AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt]) @@ -70211,7 +114302,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -1 stdout], [0], [Datapath actions: 10,11 ]) -@@ -600,9 +718,9 @@ table=1 ip actions=write_actions(output(13)),goto_table(2) +@@ -600,9 +735,9 @@ table=1 ip actions=write_actions(output(13)),goto_table(2) table=2 ip actions=set_field:192.168.3.91->ip_src,output(11) ]) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt]) @@ -70223,7 +114314,7 @@ index 7c2edeb9d4..16c8af67ed 100644 Datapath actions: 10,set(ipv4(src=192.168.3.91)),11,set(ipv4(src=192.168.3.90)),13 ]) OVS_VSWITCHD_STOP -@@ -617,7 +735,7 @@ table=1 icmp6 actions=write_actions(output(13)),goto_table(2) +@@ -617,7 +752,7 @@ table=1 icmp6 actions=write_actions(output(13)),goto_table(2) table=2 in_port=1,icmp6,icmpv6_type=135 actions=set_field:fe80::4->nd_target,set_field:cc:cc:cc:cc:cc:cc->nd_sll,output(11) ]) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt]) @@ -70232,7 +114323,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -4 stdout], [0], [Megaflow: recirc_id=0,eth,icmp6,in_port=1,nw_frag=no,icmp_type=0x87/0xff,icmp_code=0x0/0xff,nd_target=fe80::2020,nd_sll=66:55:44:33:22:11 Datapath actions: 10,set(nd(target=fe80::4,sll=cc:cc:cc:cc:cc:cc)),11,set(nd(target=fe80::3,sll=aa:aa:aa:aa:aa:aa)),13 -@@ -635,7 +753,7 @@ table=0 in_port=1,ip actions=output(10),write_actions(set_field:192.168.3.90->ip +@@ -635,7 +770,7 @@ table=0 in_port=1,ip actions=output(10),write_actions(set_field:192.168.3.90->ip table=1 tcp actions=set_field:91->tp_src,output(11),clear_actions ]) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flows br0 flows.txt]) @@ -70241,7 +114332,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -2 stdout], [0], [Megaflow: recirc_id=0,eth,tcp,in_port=1,nw_frag=no,tp_src=8 Datapath actions: 10,set(tcp(src=91)),11 -@@ -649,7 +767,7 @@ add_of_ports br0 1 10 11 +@@ -649,7 +784,7 @@ add_of_ports br0 1 10 11 AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=set_field:192.168.3.90->ip_src,group:123,bucket=output:11']) AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=123,type=all,bucket=output:10']) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=group:1234']) @@ -70250,7 +114341,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -1 stdout], [0], [Datapath actions: set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),11 ]) -@@ -661,11 +779,11 @@ OVS_VSWITCHD_START +@@ -661,11 +796,11 @@ OVS_VSWITCHD_START add_of_ports br0 1 10 11 AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=output:10,set_field:192.168.3.90->ip_src,bucket=output:11']) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=group:1234']) @@ -70264,7 +114355,7 @@ index 7c2edeb9d4..16c8af67ed 100644 Datapath actions: set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),11 ]) OVS_VSWITCHD_STOP -@@ -676,7 +794,7 @@ OVS_VSWITCHD_START +@@ -676,7 +811,7 @@ OVS_VSWITCHD_START add_of_ports br0 1 10 AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 group_id=1234,type=indirect,bucket=output:10]) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=group:1234']) @@ -70273,7 +114364,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -1 stdout], [0], [Datapath actions: 10 ]) -@@ -697,7 +815,7 @@ done +@@ -697,7 +832,7 @@ done AT_CHECK([ovs-appctl dpctl/dump-flows | sed 's/dp_hash(.*\/0xf)/dp_hash(0xXXXX\/0xf)/' | sed 's/packets.*actions:/actions:/' | strip_ufid | strip_used | sort], [0], [dnl flow-dump from the main thread: recirc_id(0),in_port(1),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(frag=no), actions:hash(sym_l4(0)),recirc(0x1) @@ -70282,7 +114373,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -708,11 +826,11 @@ OVS_VSWITCHD_START +@@ -708,11 +843,11 @@ OVS_VSWITCHD_START add_of_ports br0 1 10 11 AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=output:10,set_field:192.168.3.90->ip_src,bucket=output:11']) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(group:1234)']) @@ -70296,7 +114387,7 @@ index 7c2edeb9d4..16c8af67ed 100644 Datapath actions: set(ipv4(src=192.168.3.90)),10,set(ipv4(src=192.168.0.1)),11 ]) OVS_VSWITCHD_STOP -@@ -723,7 +841,7 @@ OVS_VSWITCHD_START +@@ -723,7 +858,7 @@ OVS_VSWITCHD_START add_of_ports br0 1 10 AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 group_id=1234,type=indirect,bucket=output:10]) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(group:1234)']) @@ -70305,7 +114396,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -1 stdout], [0], [Datapath actions: 10 ]) -@@ -743,11 +861,11 @@ add_of_ports br0 1 +@@ -743,11 +878,11 @@ add_of_ports br0 1 add_of_ports br1 2 AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br1 'ip actions=write_actions(pop_vlan,output:2)']) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=output:10']) @@ -70319,7 +114410,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -1 stdout], [0], [Datapath actions: pop_vlan,2 ]) -@@ -1011,7 +1129,7 @@ OVS_VSWITCHD_START +@@ -1011,7 +1146,7 @@ OVS_VSWITCHD_START add_of_ports br0 1 10 11 AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=ff,bucket=watch_port:10,output:10,bucket=watch_port:11,output:11']) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(group:1234)']) @@ -70328,7 +114419,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -1 stdout], [0], [Datapath actions: 10 ]) -@@ -1142,7 +1260,7 @@ OVS_VSWITCHD_START +@@ -1142,7 +1277,7 @@ OVS_VSWITCHD_START add_of_ports br0 1 10 11 AT_CHECK([ovs-ofctl -O OpenFlow12 add-group br0 'group_id=1234,type=all,bucket=output:10,move:NXM_NX_REG1[[]]->NXM_OF_IP_SRC[[]],bucket=output:11']) AT_CHECK([ovs-ofctl -O OpenFlow12 add-flow br0 'ip actions=write_actions(load:0xffffffff->NXM_NX_REG1[[]],move:NXM_NX_REG1[[]]->NXM_NX_REG2[[]],group:1234)']) @@ -70337,7 +114428,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([tail -2 stdout], [0], [Megaflow: recirc_id=0,eth,ip,in_port=1,nw_src=192.168.0.1,nw_frag=no Datapath actions: set(ipv4(src=255.255.255.255)),10,set(ipv4(src=192.168.0.1)),11 -@@ -1288,18 +1406,18 @@ table=1 in_port=1 action=dec_ttl,output:3 +@@ -1288,18 +1423,18 @@ table=1 in_port=1 action=dec_ttl,output:3 AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=111,tos=0,ttl=2,frag=no)' -generate], [0], [stdout]) AT_CHECK([tail -4 stdout], [0], [ @@ -70360,7 +114451,7 @@ index 7c2edeb9d4..16c8af67ed 100644 Datapath actions: set(ipv6(hlimit=127)),2,set(ipv6(hlimit=126)),3,4 ]) -@@ -1311,7 +1429,7 @@ ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=50:54:00:00:00:05,dst=50: +@@ -1311,7 +1446,7 @@ ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=50:54:00:00:00:05,dst=50: OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=34 in_port=1 (via invalid_ttl) data_len=34 (unbuffered) @@ -70369,7 +114460,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP AT_CLEANUP -@@ -1409,7 +1527,7 @@ AT_CHECK([ovs-vsctl -- \ +@@ -1409,7 +1544,7 @@ AT_CHECK([ovs-vsctl -- \ --id=@q2 create Queue dscp=2], [0], [ignore]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(9),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=1.1.1.1,dst=2.2.2.2,proto=1,tos=0xff,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout]) AT_CHECK([tail -2 stdout], [0], @@ -70378,7 +114469,7 @@ index 7c2edeb9d4..16c8af67ed 100644 Datapath actions: dnl 100,dnl set(ipv4(tos=0x4/0xfc)),set(skb_priority(0x1)),1,dnl -@@ -1497,13 +1615,13 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 6]) +@@ -1497,13 +1632,13 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 6]) OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered) @@ -70395,7 +114486,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -1560,13 +1678,13 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 6]) +@@ -1560,13 +1695,13 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 6]) OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered) @@ -70412,7 +114503,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -1627,13 +1745,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -1627,13 +1762,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered) @@ -70429,7 +114520,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Hit table 0, Miss all other tables, sent to controller -@@ -1647,13 +1765,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -1647,13 +1782,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered) @@ -70446,7 +114537,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -1690,13 +1808,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -1690,13 +1825,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=2 cookie=0x0 total_len=118 in_port=1 (via action) data_len=118 (unbuffered) @@ -70463,7 +114554,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Hit table 1, Miss all other tables, sent to controller -@@ -1710,13 +1828,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -1710,13 +1845,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=253 cookie=0x0 total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered) @@ -70480,7 +114571,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -1900,13 +2018,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -1900,13 +2035,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via no_match) data_len=118 (unbuffered) @@ -70497,7 +114588,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Singleton controller action. -@@ -1920,11 +2038,11 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -1920,11 +2055,11 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered) @@ -70512,7 +114603,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Modified controller action. -@@ -1938,13 +2056,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -1938,13 +2073,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70529,7 +114620,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Modified VLAN controller action. -@@ -1958,13 +2076,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -1958,13 +2093,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=38 in_port=1 (via action) data_len=38 (unbuffered) @@ -70546,7 +114637,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Checksum TCP. -@@ -1978,31 +2096,31 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -1978,31 +2113,31 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x1 total_len=54 in_port=1 (via action) data_len=54 (unbuffered) @@ -70587,7 +114678,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Checksum UDP. -@@ -2016,31 +2134,31 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -2016,31 +2151,31 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x1 total_len=60 in_port=1 (via action) data_len=60 (unbuffered) @@ -70628,7 +114719,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Modified ARP controller action. -@@ -2087,31 +2205,31 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 18]) +@@ -2087,31 +2222,31 @@ OVS_WAIT_UNTIL([test `wc -l < ofctl_monitor.log` -ge 18]) OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x1 total_len=98 in_port=1 (via action) data_len=98 (unbuffered) @@ -70669,7 +114760,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-ofctl dump-flows br0 | ofctl_strip | sort], [0], [dnl -@@ -2151,13 +2269,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2151,13 +2286,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (xid=0x0): total_len=118 in_port=1 (via action) data_len=118 (unbuffered) @@ -70686,7 +114777,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -2325,13 +2443,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2325,13 +2460,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xa total_len=34 in_port=1 (via action) data_len=34 (unbuffered) @@ -70703,7 +114794,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl Modified MPLS controller action. -@@ -2539,13 +2657,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2539,13 +2674,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70720,7 +114811,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2565,13 +2683,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2565,13 +2700,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70737,7 +114828,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2591,13 +2709,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2591,13 +2726,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70754,7 +114845,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2617,13 +2735,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2617,13 +2752,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70771,7 +114862,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2643,13 +2761,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2643,13 +2778,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70788,7 +114879,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2669,13 +2787,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2669,13 +2804,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70805,7 +114896,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2695,13 +2813,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2695,13 +2830,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70822,7 +114913,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2721,13 +2839,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2721,13 +2856,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70839,7 +114930,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2771,13 +2889,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2771,13 +2906,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70856,7 +114947,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2797,13 +2915,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2797,13 +2932,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70873,7 +114964,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2823,13 +2941,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2823,13 +2958,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70890,7 +114981,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2931,13 +3049,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2931,13 +3066,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70907,7 +114998,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2959,13 +3077,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2959,13 +3094,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70924,7 +115015,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -2986,13 +3104,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -2986,13 +3121,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0xe total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70941,7 +115032,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -3314,13 +3432,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -3314,13 +3449,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (OF1.2) (xid=0x0): table_id=1 total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70958,7 +115049,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -3363,13 +3481,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -3363,13 +3498,13 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([strip_metadata < ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (OF1.2) (xid=0x0): total_len=58 in_port=1 (via action) data_len=58 (unbuffered) @@ -70975,7 +115066,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -3402,13 +3520,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -3402,13 +3537,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (xid=0x0): total_len=54 in_port=1 (via action) data_len=54 (unbuffered) @@ -70992,7 +115083,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -3444,13 +3562,13 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) +@@ -3444,13 +3579,13 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered) @@ -71009,7 +115100,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -3516,13 +3634,13 @@ send: OFPT_SET_ASYNC (OF1.3) (xid=0x2): +@@ -3516,13 +3651,13 @@ send: OFPT_SET_ASYNC (OF1.3) (xid=0x2): REQUESTFORWARD: (off) dnl OFPT_PACKET_IN (OF1.3) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered) @@ -71026,7 +115117,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -3558,13 +3676,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -3558,13 +3693,13 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([ovs-appctl revalidator/purge], [0]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (OF1.4) (xid=0x0): cookie=0x0 total_len=54 in_port=1 (via no_match) data_len=54 (unbuffered) @@ -71043,7 +115134,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -3607,34 +3725,34 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) +@@ -3607,34 +3742,34 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered) @@ -71088,7 +115179,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -3679,34 +3797,34 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) +@@ -3679,34 +3814,34 @@ AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (OF1.4) (xid=0x0): table_id=1 cookie=0x0 total_len=54 in_port=1 (via action) data_len=54 (unbuffered) @@ -71133,7 +115224,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore]) -@@ -3751,10 +3869,10 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -3751,10 +3886,10 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl OFPT_PACKET_IN (OF1.3) (xid=0x0): table_id=43 cookie=0x0 total_len=98 metadata=0x67871d4d000000,in_port=1 (via action) data_len=98 (unbuffered) @@ -71146,7 +115237,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -5007,7 +5125,7 @@ ovs-vsctl \ +@@ -5007,7 +5142,7 @@ ovs-vsctl \ AT_CHECK([ovs-ofctl add-flow br0 action=output:1]) # "in_port" defaults to OFPP_NONE if it's not specified. @@ -71155,7 +115246,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([ovs-appctl ofproto/trace br0 "$flow"], [0], [stdout]) AT_CHECK_UNQUOTED([tail -1 stdout], [0], [Datapath actions: 1,2 -@@ -5102,7 +5220,7 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) +@@ -5102,7 +5237,7 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) flow="in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)" AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "$flow"], [0], [stdout]) AT_CHECK_UNQUOTED([tail -1 stdout], [0], @@ -71164,7 +115255,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) flow="in_port(2),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)" -@@ -5141,7 +5259,7 @@ flow="in_port(2),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x080 +@@ -5141,7 +5276,7 @@ flow="in_port(2),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x080 AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "$flow"], [0], [stdout]) actual=`tail -1 stdout | sed 's/Datapath actions: //'` @@ -71173,7 +115264,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([ovs-dpctl normalize-actions "$flow" "$expected"], [0], [stdout]) mv stdout expout AT_CHECK([ovs-dpctl normalize-actions "$flow" "$actual"], [0], [expout]) -@@ -5317,7 +5435,7 @@ ovs-vsctl \ +@@ -5317,7 +5452,7 @@ ovs-vsctl \ AT_CHECK([ovs-ofctl add-flow br0 action=output:1]) # "in_port" defaults to OFPP_NONE if it's not specified. @@ -71182,7 +115273,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_CHECK([ovs-appctl ofproto/trace br0 "$flow"], [0], [stdout]) AT_CHECK_UNQUOTED([tail -1 stdout], [0], [Datapath actions: 1,trunc(100),2 -@@ -5409,7 +5527,7 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) +@@ -5409,7 +5544,7 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) flow="in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)" AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "$flow"], [0], [stdout]) AT_CHECK_UNQUOTED([tail -1 stdout], [0], @@ -71191,7 +115282,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) flow="in_port(2),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)" -@@ -5464,7 +5582,7 @@ ovs-vsctl \ +@@ -5464,7 +5599,7 @@ ovs-vsctl \ flow="in_port=1" AT_CHECK([ovs-appctl ofproto/trace br0 "$flow"], [0], [stdout]) @@ -71200,7 +115291,7 @@ index 7c2edeb9d4..16c8af67ed 100644 OVS_VSWITCHD_STOP AT_CLEANUP -@@ -5512,11 +5630,11 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) +@@ -5512,11 +5647,11 @@ AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) flow="in_port(1),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)" AT_CHECK([ovs-appctl ofproto/trace ovs-dummy "$flow" -generate], [0], [stdout]) @@ -71214,7 +115305,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -5573,7 +5691,70 @@ check_flows () { +@@ -5573,7 +5708,70 @@ check_flows () { echo "n_packets=$n" test "$n" = 1 } @@ -71286,7 +115377,7 @@ index 7c2edeb9d4..16c8af67ed 100644 OVS_VSWITCHD_STOP AT_CLEANUP -@@ -6202,6 +6383,20 @@ AT_CHECK([tail -2 stderr], [0], [dnl +@@ -6202,6 +6400,20 @@ AT_CHECK([tail -2 stderr], [0], [dnl ovs-appctl: ovs-vswitchd: server returned an error ]) @@ -71307,7 +115398,7 @@ index 7c2edeb9d4..16c8af67ed 100644 OVS_VSWITCHD_STOP AT_CLEANUP -@@ -7004,6 +7199,29 @@ AT_CHECK([ovs-appctl coverage/read-counter mac_learning_static_none_move], [0], +@@ -7004,6 +7216,29 @@ AT_CHECK([ovs-appctl coverage/read-counter mac_learning_static_none_move], [0], OVS_VSWITCHD_STOP AT_CLEANUP @@ -71337,7 +115428,7 @@ index 7c2edeb9d4..16c8af67ed 100644 AT_SETUP([ofproto-dpif - basic truncate action]) OVS_VSWITCHD_START add_of_ports br0 1 2 3 4 5 -@@ -7031,7 +7249,7 @@ dnl An 170 byte packet +@@ -7031,7 +7266,7 @@ dnl An 170 byte packet AT_CHECK([ovs-appctl netdev-dummy/receive p1 '000c29c8a0a4005056c0000808004500009cb4a6000040019003c0a8da01c0a8da640800cb5fa762000556f431ad0009388e08090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f']) AT_CHECK([ovs-ofctl parse-pcap p1.pcap], [0], [dnl @@ -71346,7 +115437,41 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-appctl revalidator/purge], [0]) -@@ -7600,13 +7818,28 @@ dnl configure bridge IPFIX and ensure that sample action generation works at the +@@ -7294,12 +7529,14 @@ dummy@ovs-dummy: hit:0 missed:0 + vm1 5/3: (dummy: ifindex=2011) + ]) + +-dnl set up route to 1.1.2.92 via br0 and action=normal ++dnl Add 1.1.2.92 to br0 and action=normal + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK +-]) + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++]) + + dnl Prime ARP Cache for 1.1.2.92 + AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)']) +@@ -7310,10 +7547,13 @@ ovs-vsctl \ + --id=@sf create sflow targets=\"127.0.0.1:$SFLOW_PORT\" agent=127.0.0.1 \ + header=128 sampling=1 polling=0 + +-dnl set up route to 192.168.1.2 via br0 ++dnl Add 192.168.1.2 to br0, + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 192.168.1.1/16], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 192.168.0.0/16 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 192.168.0.0/16 dev br0 SRC 192.168.1.1 local + ]) + + dnl add rule for int-br to force packet onto tunnel. There is no ifindex +@@ -7600,13 +7840,28 @@ dnl configure bridge IPFIX and ensure that sample action generation works at the dnl datapath level. AT_SETUP([ofproto-dpif - Bridge IPFIX sanity check]) OVS_VSWITCHD_START @@ -71376,7 +115501,7 @@ index 7c2edeb9d4..16c8af67ed 100644 dnl Send some packets that should be sampled. for i in `seq 1 3`; do AT_CHECK([ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800)']) -@@ -8666,7 +8899,7 @@ recirc_id(0),in_port(100),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(src=192.1 +@@ -8666,7 +8921,7 @@ recirc_id(0),in_port(100),packet_type(ns=0,id=0),eth_type(0x0800),ipv4(src=192.1 ]) AT_CHECK([grep -e '|ofproto_dpif_xlate|WARN|' ovs-vswitchd.log | sed "s/^.*|WARN|//"], [0], [dnl @@ -71385,7 +115510,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP(["/stack underflow/d"]) -@@ -8717,6 +8950,40 @@ AT_CHECK([tail -1 stdout], [0], +@@ -8717,6 +8972,40 @@ AT_CHECK([tail -1 stdout], [0], OVS_VSWITCHD_STOP AT_CLEANUP @@ -71426,7 +115551,7 @@ index 7c2edeb9d4..16c8af67ed 100644 dnl ---------------------------------------------------------------------- AT_BANNER([ofproto-dpif -- megaflows]) -@@ -9855,7 +10122,7 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -9855,7 +10144,7 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=86 in_port=1 (via no_match) data_len=86 (unbuffered) @@ -71435,7 +115560,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -9906,7 +10173,7 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -9906,7 +10195,7 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=86 in_port=1 (via action) data_len=86 (unbuffered) @@ -71444,7 +115569,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10166,10 +10433,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10166,10 +10455,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. We only see the latter two packets, not the first. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x0 total_len=106 reg0=0x1,reg1=0x4d2,reg2=0x1,reg3=0x1,reg4=0x1,in_port=1 (via action) data_len=106 (unbuffered) @@ -71457,7 +115582,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl -P nxt_packet_in --detach --no-chdir --pidfile 2> ofctl_monitor.log]) -@@ -10187,10 +10454,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10187,10 +10476,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. We should see both packets AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=6 cookie=0x0 total_len=106 reg0=0x1,reg1=0x4d2,reg2=0x1,reg3=0x1,reg4=0x1,in_port=1 (via action) data_len=106 (unbuffered) @@ -71470,7 +115595,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10239,10 +10506,10 @@ dnl Note that the first packet doesn't have the ct_state bits set. This +@@ -10239,10 +10528,10 @@ dnl Note that the first packet doesn't have the ct_state bits set. This dnl happens because the ct_state field is available only after recirc. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered) @@ -71483,7 +115608,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl -P nxt_packet_in --detach --no-chdir --pidfile 2> ofctl_monitor.log]) -@@ -10261,10 +10528,10 @@ dnl Note that the first packet doesn't have the ct_state bits set. This +@@ -10261,10 +10550,10 @@ dnl Note that the first packet doesn't have the ct_state bits set. This dnl happens because the ct_state field is available only after recirc. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered) @@ -71496,7 +115621,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl -@@ -10320,9 +10587,9 @@ dnl Note that the first packet doesn't have the ct_state bits set. This +@@ -10320,9 +10609,9 @@ dnl Note that the first packet doesn't have the ct_state bits set. This dnl happens because the ct_state field is available only after recirc. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=126 in_port=1 (via action) data_len=126 (unbuffered) @@ -71508,7 +115633,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10433,7 +10700,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10433,7 +10722,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. Only one reply must be there AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered) @@ -71517,7 +115642,7 @@ index 7c2edeb9d4..16c8af67ed 100644 dnl OFPT_ECHO_REQUEST (xid=0x0): 0 bytes of payload ]) -@@ -10467,7 +10734,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10467,7 +10756,7 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=86 ct_state=inv|trk,ipv6,in_port=2 (via action) data_len=86 (unbuffered) @@ -71526,7 +115651,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10523,16 +10790,16 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10523,16 +10812,16 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. We only see the latter two packets (for each zone), not the first. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered) @@ -71547,7 +115672,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10579,10 +10846,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10579,10 +10868,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. We only see the latter two packets, not the first. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered) @@ -71560,7 +115685,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10629,10 +10896,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10629,10 +10918,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. We only see the first and the last packet AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=47 ct_state=new|trk,ct_nw_src=172.16.0.1,ct_nw_dst=172.16.0.2,ct_nw_proto=17,ct_tp_src=41614,ct_tp_dst=5555,ip,in_port=1 (via action) data_len=47 (unbuffered) @@ -71573,7 +115698,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10681,19 +10948,19 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10681,19 +10970,19 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered) @@ -71598,7 +115723,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -10738,10 +11005,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -10738,10 +11027,10 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_label=0x1,ct_nw_src=10.1.1.1,ct_nw_dst=10.1.1.2,ct_nw_proto=17,ct_tp_src=1,ct_tp_dst=2,ip,in_port=2 (via action) data_len=106 (unbuffered) @@ -71611,7 +115736,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -11152,16 +11419,16 @@ dnl Note that the first packet doesn't have the ct_state bits set. This +@@ -11152,16 +11441,16 @@ dnl Note that the first packet doesn't have the ct_state bits set. This dnl happens because the ct_state field is available only after recirc. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): cookie=0x0 total_len=106 in_port=1 (via action) data_len=106 (unbuffered) @@ -71632,7 +115757,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) dnl The next test verifies that ct_clear at the datapath only gets executed -@@ -11235,13 +11502,13 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) +@@ -11235,13 +11524,13 @@ OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit]) dnl Check this output. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN (xid=0x0): table_id=1 cookie=0x0 total_len=106 ct_state=est|rpl|trk,ct_nw_src=10.1.2.100,ct_nw_dst=10.1.2.200,ct_nw_proto=17,ct_tp_src=6,ct_tp_dst=6,ip,in_port=2 (via action) data_len=106 (unbuffered) @@ -71649,7 +115774,7 @@ index 7c2edeb9d4..16c8af67ed 100644 ]) OVS_VSWITCHD_STOP -@@ -11504,7 +11771,7 @@ ovs-ofctl dump-flows br0 +@@ -11504,7 +11793,7 @@ ovs-ofctl dump-flows br0 AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.10.10.2,dst=10.10.10.1,proto=1,tos=1,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout]) AT_CHECK([tail -3 stdout], [0], [dnl @@ -71659,10 +115784,10 @@ index 7c2edeb9d4..16c8af67ed 100644 Translation failed (Recursion too deep), packet is dropped. ]) diff --git a/tests/ofproto-macros.at b/tests/ofproto-macros.at -index 736d9809cb..b18f0fbc1e 100644 +index 736d9809cb..9e32f53c0c 100644 --- a/tests/ofproto-macros.at +++ b/tests/ofproto-macros.at -@@ -134,6 +134,21 @@ strip_ufid () { +@@ -134,6 +134,36 @@ strip_ufid () { sed 's/mega_ufid:[[-0-9a-f]]* // s/ufid:[[-0-9a-f]]* //' } @@ -71673,6 +115798,21 @@ index 736d9809cb..b18f0fbc1e 100644 + s/bytes:[[0-9]]*/bytes:0/' +} + ++# Strips key32 field from output. ++strip_key32 () { ++ sed 's/key32([[0-9 \/]]*),//' ++} ++ ++# Strips packet-type from output. ++strip_ptype () { ++ sed 's/packet_type(ns=[[0-9]]*,id=[[0-9]]*),//' ++} ++ ++# Strips bare eth from output. ++strip_eth () { ++ sed 's/eth(),//' ++} ++ +# Changes all 'recirc(...)' and 'recirc=...' to say 'recirc(<recirc_id>)' and +# 'recirc=<recirc_id>' respectively. This should make output easier to +# compare. @@ -71684,7 +115824,7 @@ index 736d9809cb..b18f0fbc1e 100644 m4_divert_pop([PREPARE_TESTS]) m4_define([TESTABLE_LOG], [-vPATTERN:ANY:'%c|%p|%m']) -@@ -175,6 +190,7 @@ m4_define([_OVS_VSWITCHD_START], +@@ -175,6 +205,7 @@ m4_define([_OVS_VSWITCHD_START], /dpdk|INFO|DPDK Disabled - Use other_config:dpdk-init to enable/d /netlink_socket|INFO|netlink: could not enable listening to all nsid/d /probe tc:/d @@ -71692,7 +115832,7 @@ index 736d9809cb..b18f0fbc1e 100644 /tc: Using policy/d']]) ]) -@@ -239,6 +255,7 @@ check_logs () { +@@ -239,6 +270,7 @@ check_logs () { /timeval.*context switches: [[0-9]]* voluntary, [[0-9]]* involuntary/d /ovs_rcu.*blocked [[0-9]]* ms waiting for .* to quiesce/d /Dropped [[0-9]]* log messages/d @@ -71964,7 +116104,7 @@ index 156d3e058c..2ad5741fa1 100644 +OVS_VSWITCHD_STOP(["/br0<->unix:testcontroller: connection failed/d"]) +AT_CLEANUP diff --git a/tests/ovs-macros.at b/tests/ovs-macros.at -index 66545da572..d09dbb4cd5 100644 +index 66545da572..c03258088a 100644 --- a/tests/ovs-macros.at +++ b/tests/ovs-macros.at @@ -134,7 +134,7 @@ parent_pid () { @@ -72010,6 +116150,22 @@ index 66545da572..d09dbb4cd5 100644 [AT_LINE], [while $1])]) dnl OVS_APP_EXIT_AND_WAIT(DAEMON) +@@ -292,6 +308,15 @@ m4_define([OVS_APP_EXIT_AND_WAIT_BY_TARGET], + AT_CHECK([ovs-appctl --target=$1 exit]) + OVS_WAIT_WHILE([kill -0 $TMPPID 2>/dev/null])]) + ++dnl OVS_DAEMONIZE([command], [pidfile]) ++dnl ++dnl Run 'command' as a background process and record its pid to 'pidfile' to ++dnl allow cleanup on exit. ++m4_define([OVS_DAEMONIZE], ++ [$1 & echo $! > $2 ++ on_exit "kill `cat $2`" ++ ]) ++ + dnl on_exit "COMMAND" + dnl + dnl Add the shell COMMAND to a collection executed when the current test diff --git a/tests/ovs-ofctl.at b/tests/ovs-ofctl.at index 267711bfa4..c9c67f2b1e 100644 --- a/tests/ovs-ofctl.at @@ -72048,7 +116204,7 @@ index bba4fea2bc..977b2eba1f 100644 /|ERR|/p /|EMER|/p" ovs-vswitchd.log diff --git a/tests/ovsdb-client.at b/tests/ovsdb-client.at -index 06b671df8c..2d14f1ac26 100644 +index 06b671df8c..c618cf4839 100644 --- a/tests/ovsdb-client.at +++ b/tests/ovsdb-client.at @@ -3,6 +3,7 @@ AT_BANNER([OVSDB -- ovsdb-client commands]) @@ -72075,6 +116231,25 @@ index 06b671df8c..2d14f1ac26 100644 AT_CHECK([ovsdb-tool create db schema], [0], [], [ignore]) AT_CHECK([ovsdb-server --detach --no-chdir --pidfile --remote=punix:socket db], [0], [ignore], [ignore]) sed 's/5\.1\.3/5.1.4/' < schema > schema2 +@@ -267,8 +270,8 @@ AT_CHECK([ovsdb-client --replay=./replay_dir dnl + dnl Waiting for client to exit the same way as it exited during recording. + OVS_WAIT_WHILE([test -e ovsdb-client.pid]) + +-AT_CHECK([diff monitor.stdout monitor-replay.stdout]) +-AT_CHECK([diff monitor.stderr monitor-replay.stderr]) ++AT_CHECK([diff -u monitor.stdout monitor-replay.stdout]) ++AT_CHECK([diff -u monitor.stderr monitor-replay.stderr]) + + dnl Stripping out timestamps, PIDs and poll_loop warnings from the log. + dnl Also stripping socket_util errors as sockets are not used in replay. +@@ -281,6 +284,6 @@ m4_define([CLEAN_LOG_FILE], + CLEAN_LOG_FILE([monitor.log], [monitor.log.clear]) + CLEAN_LOG_FILE([monitor-replay.log], [monitor-replay.log.clear]) + +-AT_CHECK([diff monitor.log.clear monitor-replay.log.clear]) ++AT_CHECK([diff -u monitor.log.clear monitor-replay.log.clear]) + + AT_CLEANUP diff --git a/tests/ovsdb-cluster.at b/tests/ovsdb-cluster.at index fc6253cfe9..9fbf5dc897 100644 --- a/tests/ovsdb-cluster.at @@ -72416,10 +116591,172 @@ index e72bf06069..fd1c7a2395 100644 [{"count":1},{"details":"cannot delete b row <0> because of 2 remaining reference(s)","error":"referential integrity violation"}] [{"count":1}] diff --git a/tests/ovsdb-idl.at b/tests/ovsdb-idl.at -index 62e2b63832..bacb7f161a 100644 +index 62e2b63832..22d4481efb 100644 --- a/tests/ovsdb-idl.at +++ b/tests/ovsdb-idl.at -@@ -561,9 +561,9 @@ OVSDB_CHECK_IDL([simple idl, conditional, false condition], +@@ -1,17 +1,6 @@ + AT_BANNER([OVSDB -- interface description language (IDL)]) + + m4_divert_text([PREPARE_TESTS], [ +-# ovsdb_start_idltest [REMOTE] [SCHEMA] +-# +-# Creates a database using SCHEMA (default: idltest.ovsschema) and +-# starts a database server listening on punix:socket and REMOTE (if +-# specified). +-ovsdb_start_idltest () { +- ovsdb-tool create db ${2:-$abs_srcdir/idltest.ovsschema} || return $? +- ovsdb-server -vconsole:warn --log-file --detach --no-chdir --pidfile --remote=punix:socket ${1:+--remote=$1} db || return $? +- on_exit 'kill `cat ovsdb-server.pid`' +-} +- + # ovsdb_cluster_leader [REMOTES] [DATABASE] + # + # Returns the leader of the DATABASE cluster. +@@ -29,6 +18,24 @@ ovsdb_cluster_leader () { + done + }]) + ++ ++# OVSDB_START_IDLTEST([REMOTE], [SCHEMA]) ++# ++# Creates a database using SCHEMA (default: idltest.ovsschema) and ++# starts a database server listening on punix:socket and REMOTE (if ++# specified). ++m4_define([OVSDB_START_IDLTEST], ++[ ++ AT_CHECK([ovsdb-tool create db dnl ++ m4_if([$2], [], [$abs_srcdir/idltest.ovsschema], [$2])]) ++ AT_CHECK([ovsdb-server -vconsole:warn --log-file --detach --no-chdir dnl ++ --pidfile --remote=punix:socket dnl ++ m4_if([$1], [], [], [--remote=$1]) db dnl ++ ]) ++ on_exit 'kill `cat ovsdb-server.pid`' ++]) ++ ++ + # OVSDB_CLUSTER_START_IDLTEST([N], [REMOTE]) + # + # Creates a clustered database using idltest.ovsschema and starts a database +@@ -45,9 +52,9 @@ m4_define([OVSDB_CLUSTER_START_IDLTEST], + done + on_exit 'kill $(cat s*.pid)' + for i in $(seq $n); do +- AT_CHECK([ovsdb-server -vraft -vconsole:warn --detach --no-chdir \ +- --log-file=s$i.log --pidfile=s$i.pid --unixctl=s$i \ +- --remote=punix:s$i.ovsdb \ ++ AT_CHECK([ovsdb-server -vraft -vconsole:warn -vfile:dbg --detach \ ++ --no-chdir --log-file=s$i.log --pidfile=s$i.pid \ ++ --unixctl=s$i --remote=punix:s$i.ovsdb \ + m4_if([$2], [], [], [--remote=$2]) s$i.db]) + done + +@@ -77,7 +84,7 @@ m4_define([OVSDB_CLUSTER_START_IDLTEST], + m4_define([OVSDB_CHECK_IDL_C], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl positive $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t10 idl unix:socket $3], +@@ -91,7 +98,7 @@ m4_define([OVSDB_CHECK_IDL_C], + m4_define([OVSDB_CHECK_IDL_TCP_C], + [AT_SETUP([$1 - C - tcp]) + AT_KEYWORDS([ovsdb server idl positive tcp socket $5]) +- AT_CHECK([ovsdb_start_idltest "ptcp:0:127.0.0.1"]) ++ OVSDB_START_IDLTEST(["ptcp:0:127.0.0.1"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + + m4_if([$2], [], [], +@@ -109,7 +116,7 @@ m4_define([OVSDB_CHECK_IDL_TCP6_C], + AT_SKIP_IF([test "$IS_WIN32" = "yes"]) + AT_SKIP_IF([test $HAVE_IPV6 = no]) + AT_KEYWORDS([ovsdb server idl positive tcp6 socket $5]) +- AT_CHECK([ovsdb_start_idltest "ptcp:0:[[::1]]"]) ++ OVSDB_START_IDLTEST(["ptcp:0:[[::1]]"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + + m4_if([$2], [], [], +@@ -125,7 +132,7 @@ m4_define([OVSDB_CHECK_IDL_TCP6_C], + m4_define([OVSDB_CHECK_IDL_PY], + [AT_SETUP([$1 - Python3]) + AT_KEYWORDS([ovsdb server idl positive Python $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t10 idl $srcdir/idltest.ovsschema unix:socket $3], +@@ -138,7 +145,7 @@ m4_define([OVSDB_CHECK_IDL_PY], + m4_define([OVSDB_CHECK_IDL_REGISTER_COLUMNS_PY], + [AT_SETUP([$1 - Python3 - register_columns]) + AT_KEYWORDS([ovsdb server idl positive Python register_columns $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t10 idl $srcdir/idltest.ovsschema unix:socket ?simple:b,ba,i,ia,r,ra,s,sa,u,ua?simple3:name,uset,uref?simple4:name?simple6:name,weak_ref?link1:i,k,ka,l2?link2:i,l1?singleton:name $3], +@@ -152,7 +159,7 @@ m4_define([OVSDB_CHECK_IDL_REGISTER_COLUMNS_PY], + m4_define([OVSDB_CHECK_IDL_TCP_PY], + [AT_SETUP([$1 - Python3 - tcp]) + AT_KEYWORDS([ovsdb server idl positive Python with tcp socket $5]) +- AT_CHECK([ovsdb_start_idltest "ptcp:0:127.0.0.1"]) ++ OVSDB_START_IDLTEST(["ptcp:0:127.0.0.1"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + + m4_if([$2], [], [], +@@ -169,7 +176,7 @@ m4_define([OVSDB_CHECK_IDL_TCP_PY], + m4_define([OVSDB_CHECK_IDL_TCP_MULTIPLE_REMOTES_PY], + [AT_SETUP([$1 - Python3 (multiple remotes) - tcp]) + AT_KEYWORDS([ovsdb server idl positive Python with tcp socket $5]) +- AT_CHECK([ovsdb_start_idltest "ptcp:0:127.0.0.1"]) ++ OVSDB_START_IDLTEST(["ptcp:0:127.0.0.1"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + WRONG_PORT_1=$((TCP_PORT + 101)) + WRONG_PORT_2=$((TCP_PORT + 102)) +@@ -189,7 +196,7 @@ m4_define([OVSDB_CHECK_IDL_TCP6_PY], + AT_SKIP_IF([test "$IS_WIN32" = "yes"]) + AT_SKIP_IF([test $HAVE_IPV6 = no]) + AT_KEYWORDS([ovsdb server idl positive Python with tcp6 socket $5]) +- AT_CHECK([ovsdb_start_idltest "ptcp:0:[[::1]]"]) ++ OVSDB_START_IDLTEST(["ptcp:0:[[::1]]"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + echo "TCP_PORT=$TCP_PORT" + +@@ -207,7 +214,7 @@ m4_define([OVSDB_CHECK_IDL_TCP6_MULTIPLE_REMOTES_PY], + AT_SKIP_IF([test "$IS_WIN32" = "yes"]) + AT_SKIP_IF([test $HAVE_IPV6 = no]) + AT_KEYWORDS([ovsdb server idl positive Python with tcp6 socket $5]) +- AT_CHECK([ovsdb_start_idltest "ptcp:0:[[::1]]"]) ++ OVSDB_START_IDLTEST(["ptcp:0:[[::1]]"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + WRONG_PORT_1=$((TCP_PORT + 101)) + WRONG_PORT_2=$((TCP_PORT + 102)) +@@ -272,13 +279,13 @@ m4_define([OVSDB_CHECK_IDL_PASSIVE_TCP_PY], + [AT_SETUP([$1 - Python3 - ptcp]) + AT_KEYWORDS([ovsdb server idl positive Python with tcp socket $5]) + # find free TCP port +- AT_CHECK([ovsdb_start_idltest "ptcp:0:127.0.0.1"]) ++ OVSDB_START_IDLTEST(["ptcp:0:127.0.0.1"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + OVSDB_SERVER_SHUTDOWN + rm -f db + + # start OVSDB server in passive mode +- AT_CHECK([ovsdb_start_idltest "tcp:127.0.0.1:$TCP_PORT"]) ++ OVSDB_START_IDLTEST(["tcp:127.0.0.1:$TCP_PORT"]) + AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t10 idl_passive $srcdir/idltest.ovsschema ptcp:127.0.0.1:$TCP_PORT $3], + [0], [stdout], [ignore]) + AT_CHECK([sort stdout | uuidfilt]m4_if([$6],,, [[| $6]]), +@@ -458,7 +465,7 @@ OVSDB_CHECK_IDL([simple idl, writing via IDL with unicode], + m4_define([OVSDB_CHECK_IDL_PY_WITH_EXPOUT], + [AT_SETUP([$1 - Python3]) + AT_KEYWORDS([ovsdb server idl positive Python $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t10 idl $srcdir/idltest.ovsschema unix:socket $3], +@@ -561,9 +568,9 @@ OVSDB_CHECK_IDL([simple idl, conditional, false condition], "b": true}}]']], [['condition simple []' \ 'condition simple [true]']], @@ -72431,7 +116768,7 @@ index 62e2b63832..bacb7f161a 100644 003: table simple: i=1 r=2 b=true s= u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> 004: done ]]) -@@ -577,13 +577,40 @@ OVSDB_CHECK_IDL([simple idl, conditional, true condition], +@@ -577,13 +584,40 @@ OVSDB_CHECK_IDL([simple idl, conditional, true condition], "b": true}}]']], [['condition simple []' \ 'condition simple [true]']], @@ -72474,7 +116811,7 @@ index 62e2b63832..bacb7f161a 100644 OVSDB_CHECK_IDL([simple idl, conditional, multiple clauses in condition], [['["idltest", {"op": "insert", -@@ -598,9 +625,9 @@ OVSDB_CHECK_IDL([simple idl, conditional, multiple clauses in condition], +@@ -598,9 +632,9 @@ OVSDB_CHECK_IDL([simple idl, conditional, multiple clauses in condition], "b": true}}]']], [['condition simple []' \ 'condition simple [["i","==",1],["i","==",2]]']], @@ -72486,7 +116823,7 @@ index 62e2b63832..bacb7f161a 100644 003: table simple: i=1 r=2 b=true s= u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> 003: table simple: i=2 r=3 b=true s= u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<2> 004: done -@@ -615,9 +642,9 @@ OVSDB_CHECK_IDL([simple idl, conditional, modify as insert due to condition], +@@ -615,9 +649,9 @@ OVSDB_CHECK_IDL([simple idl, conditional, modify as insert due to condition], "b": true}}]']], [['condition simple []' \ 'condition simple [["i","==",1]]']], @@ -72498,7 +116835,7 @@ index 62e2b63832..bacb7f161a 100644 003: table simple: i=1 r=2 b=true s= u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> 004: done ]]) -@@ -638,11 +665,11 @@ OVSDB_CHECK_IDL([simple idl, conditional, modify as delete due to condition], +@@ -638,11 +672,11 @@ OVSDB_CHECK_IDL([simple idl, conditional, modify as delete due to condition], "row": {"i": 2, "r": 3.0, "b": true}}]']], @@ -72513,7 +116850,7 @@ index 62e2b63832..bacb7f161a 100644 005: empty 006: {"error":null,"result":[{"uuid":["uuid","<2>"]}]} 007: table simple: i=2 r=3 b=true s= u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<2> -@@ -673,14 +700,16 @@ OVSDB_CHECK_IDL([simple idl, conditional, multiple tables], +@@ -673,14 +707,16 @@ OVSDB_CHECK_IDL([simple idl, conditional, multiple tables], "table": "link2", "row": {"i": 3}, "uuid-name": "row0"}]']], @@ -72534,7 +116871,43 @@ index 62e2b63832..bacb7f161a 100644 007: {"error":null,"result":[{"uuid":["uuid","<3>"]}]} 008: table link1: i=0 k=0 ka=[] l2= uuid=<2> 008: table link2: i=3 l1= uuid=<3> -@@ -1237,10 +1266,10 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, orphan weak refer +@@ -946,7 +982,7 @@ AT_KEYWORDS([ovsdb server idl positive]) + # table link2 and column l2 have been deleted. But the IDL still + # expects them to be there, so this test checks that it properly + # tolerates them being missing. +-AT_CHECK([ovsdb_start_idltest "" "$abs_srcdir/idltest2.ovsschema"]) ++OVSDB_START_IDLTEST([], ["$abs_srcdir/idltest2.ovsschema"]) + AT_CHECK([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t10 idl unix:socket ['["idltest", + {"op": "insert", + "table": "link1", +@@ -1019,7 +1055,7 @@ AT_CLEANUP + m4_define([OVSDB_CHECK_IDL_FETCH_COLUMNS_PY], + [AT_SETUP([$1 - Python3 - fetch]) + AT_KEYWORDS([ovsdb server idl positive Python increment fetch $6]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t10 idl $srcdir/idltest.ovsschema unix:socket [$3] $4], +@@ -1063,7 +1099,7 @@ OVSDB_CHECK_IDL_FETCH_COLUMNS([simple idl, initially populated], + m4_define([OVSDB_CHECK_IDL_WO_MONITOR_COND_PY], + [AT_SETUP([$1 - Python3]) + AT_KEYWORDS([ovsdb server idl Python monitor $4]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + AT_CHECK([ovs-appctl -t ovsdb-server ovsdb-server/disable-monitor-cond]) + AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t10 idl $srcdir/idltest.ovsschema unix:socket $2], + [0], [stdout], [ignore]) +@@ -1156,7 +1192,7 @@ OVSDB_CHECK_IDL_WO_MONITOR_COND([simple idl disable monitor-cond], + m4_define([OVSDB_CHECK_IDL_TRACK_C], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl tracking positive $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t10 -c idl unix:socket $3], +@@ -1237,10 +1273,10 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, orphan weak refer {"op": "delete", "table": "simple6", "where": []}]']], @@ -72547,7 +116920,7 @@ index 62e2b63832..bacb7f161a 100644 003: table simple6: name=first_row weak_ref=[<1>] uuid=<0> 003: table simple: inserted row: i=0 r=0 b=false s=row1_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> 003: table simple: updated columns: s -@@ -1279,19 +1308,19 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, orphan rows, cond +@@ -1279,19 +1315,19 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, orphan rows, cond {"op": "delete", "table": "simple6", "where": []}]']], @@ -72571,7 +116944,7 @@ index 62e2b63832..bacb7f161a 100644 007: table simple6: name=first_row weak_ref=[<1>] uuid=<0> 007: table simple: deleted row: i=0 r=0 b=false s=row1_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<3> 007: table simple: inserted row: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> -@@ -1333,14 +1362,14 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, references, condi +@@ -1333,14 +1369,14 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, references, condi {"op": "delete", "table": "simple6", "where": []}]']], @@ -72589,7 +116962,7 @@ index 62e2b63832..bacb7f161a 100644 005: table simple6: name=first_row weak_ref=[<3>] uuid=<0> 005: table simple: deleted row: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> 005: table simple: inserted row: i=1 r=0 b=false s=row1_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<3> -@@ -1376,7 +1405,8 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, references, singl +@@ -1376,7 +1412,8 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, references, singl {"op": "insert", "table": "simple", "row": {"s": "row0_s"}}]']], @@ -72599,7 +116972,64 @@ index 62e2b63832..bacb7f161a 100644 001: table simple6: inserted row: name=row0_s6 weak_ref=[<0>] uuid=<1> 001: table simple6: updated columns: name weak_ref 001: table simple: inserted row: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<0> -@@ -1418,7 +1448,8 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, weak references, +@@ -1392,6 +1429,56 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, references, singl + 006: done + ]]) + ++OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, weak references, insert+delete batch], ++ [['["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row0_s"}, ++ "uuid-name": "uuid_row0_s"}, ++ {"op": "insert", ++ "table": "simple6", ++ "row": {"name": "row0_s6", ++ "weak_ref": ["set", ++ [["named-uuid", "uuid_row0_s"]] ++ ]}}]']], ++ [['condition simple [true];simple6 [true]' \ ++ '["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row1_s"}, ++ "uuid-name": "uuid_row1_s"}, ++ {"op": "mutate", ++ "table": "simple6", ++ "where": [["name", "==", "row0_s6"]], ++ "mutations": [["weak_ref", "insert", ["set", [["named-uuid", "uuid_row1_s"]]]]]}]' \ ++ '+["idltest", ++ {"op": "delete", ++ "table": "simple", ++ "where": [["s", "==", "row1_s"]]}]' \ ++ '["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"s": "row2_s"}}]']], ++ [[000: simple6: conditions unchanged ++000: simple: conditions unchanged ++001: table simple6: inserted row: name=row0_s6 weak_ref=[<0>] uuid=<1> ++001: table simple6: updated columns: name weak_ref ++001: table simple: inserted row: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<0> ++001: table simple: updated columns: s ++002: {"error":null,"result":[{"uuid":["uuid","<3>"]},{"count":1}]} ++003: {"error":null,"result":[{"count":1}]} ++004: table simple6: name=row0_s6 weak_ref=[<0>] uuid=<1> ++004: table simple6: updated columns: weak_ref ++004: table simple: inserted/deleted row: i=0 r=0 b=false s=row1_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<3> ++004: table simple: updated columns: s ++005: {"error":null,"result":[{"uuid":["uuid","<4>"]}]} ++006: table simple6: name=row0_s6 weak_ref=[<0>] uuid=<1> ++006: table simple: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<0> ++006: table simple: inserted row: i=0 r=0 b=false s=row2_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<4> ++006: table simple: updated columns: s ++007: done ++]]) ++ + dnl This test checks that deleting both the destination and source of the + dnl reference doesn't remove the reference in the source tracked record. + OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, weak references, multiple deletes], +@@ -1418,7 +1505,8 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, weak references, {"op": "insert", "table": "simple", "row": {"s": "row0_s"}}]']], @@ -72609,7 +117039,7 @@ index 62e2b63832..bacb7f161a 100644 001: table simple6: inserted row: name=row0_s6 weak_ref=[<0>] uuid=<1> 001: table simple6: updated columns: name weak_ref 001: table simple: inserted row: i=0 r=0 b=false s=row0_s u=<2> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<0> -@@ -1458,7 +1489,9 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, strong references +@@ -1458,7 +1546,9 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, strong references {"op": "insert", "table": "simple", "row": {"s": "row0_s"}}]']], @@ -72620,7 +117050,7 @@ index 62e2b63832..bacb7f161a 100644 001: table simple3: inserted row: name=row0_s3 uset=[] uref=[<0>] uuid=<1> 001: table simple3: updated columns: name uref 001: table simple4: inserted row: name=row0_s4 uuid=<0> -@@ -1493,12 +1526,14 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, strong references +@@ -1493,12 +1583,14 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially populated, strong references {"op": "insert", "table": "simple", "row": {"s": "row0_s"}}]']], @@ -72637,7 +117067,7 @@ index 62e2b63832..bacb7f161a 100644 003: table simple3: name=row0_s3 uset=[] uref=[] uuid=<1> 003: table simple4: deleted row: name=row0_s4 uuid=<0> 004: {"error":null,"result":[{"uuid":["uuid","<2>"]}]} -@@ -1529,10 +1564,12 @@ OVSDB_CHECK_IDL([simple idl, initially populated, strong references, conditional +@@ -1529,10 +1621,12 @@ OVSDB_CHECK_IDL([simple idl, initially populated, strong references, conditional {"op": "insert", "table": "simple", "row": {"s": "row0_s"}}]']], @@ -72652,7 +117082,61 @@ index 62e2b63832..bacb7f161a 100644 003: table simple3: name=row0_s3 uset=[] uref=[] uuid=<1> 004: {"error":null,"result":[{"uuid":["uuid","<2>"]}]} 005: table simple3: name=row0_s3 uset=[] uref=[] uuid=<1> -@@ -2230,6 +2267,23 @@ CHECK_STREAM_OPEN_BLOCK([Python3], [$PYTHON3 $srcdir/test-stream.py], +@@ -1650,7 +1744,7 @@ OVSDB_CHECK_IDL_TRACK([track, simple idl, initially empty, various ops], + m4_define([OVSDB_CHECK_IDL_PARTIAL_UPDATE_MAP_COLUMN], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl partial update map column positive $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t10 -c idl-partial-update-map-column unix:socket $3], +@@ -1711,7 +1805,7 @@ OVSDB_CHECK_IDL_PY([partial-map update set refmap idl], + m4_define([OVSDB_CHECK_IDL_PARTIAL_UPDATE_SET_COLUMN], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl partial update set column positive $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t10 -c idl-partial-update-set-column unix:socket $3], +@@ -1900,7 +1994,7 @@ OVSDB_CHECK_IDL_NOTIFY([simple idl verify notify], + m4_define([OVSDB_CHECK_IDL_COMPOUND_INDEX_SINGLE_COLUMN_C], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl compound_index_single_column compound_index positive $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + # Generate the data to be tested. +@@ -2047,7 +2141,7 @@ OVSDB_CHECK_IDL_COMPOUND_INDEX_SINGLE_COLUMN_C([Compound_index, single column te + m4_define([OVSDB_CHECK_IDL_COMPOUND_INDEX_DOUBLE_COLUMN_C], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl compound_index_double_column compound_index positive $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + # Generate the data to be tested. +@@ -2186,7 +2280,7 @@ OVSDB_CHECK_IDL_COMPOUND_INDEX_DOUBLE_COLUMN_C([Compound_index, double column te + m4_define([OVSDB_CHECK_IDL_COMPOUND_INDEX_WITH_REF], + [AT_SETUP([$1 - C]) + AT_KEYWORDS([ovsdb server idl compound_index compound_index_with_ref positive $5]) +- AT_CHECK([ovsdb_start_idltest]) ++ OVSDB_START_IDLTEST + m4_if([$2], [], [], + [AT_CHECK([ovsdb-client transact unix:socket $2], [0], [ignore], [ignore])]) + AT_CHECK([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t10 -c idl-compound-index-with-ref unix:socket $3], +@@ -2214,7 +2308,7 @@ m4_define([CHECK_STREAM_OPEN_BLOCK], + AT_SKIP_IF([test "$3" = "tcp6" && test "$IS_WIN32" = "yes"]) + AT_SKIP_IF([test "$3" = "tcp6" && test "$HAVE_IPV6" = "no"]) + AT_KEYWORDS([ovsdb server stream open_block $3]) +- AT_CHECK([ovsdb_start_idltest "ptcp:0:$4"]) ++ OVSDB_START_IDLTEST(["ptcp:0:$4"]) + PARSE_LISTENING_PORT([ovsdb-server.log], [TCP_PORT]) + WRONG_PORT=$(($TCP_PORT + 101)) + AT_CHECK([$2 tcp:$4:$TCP_PORT], [0], [ignore]) +@@ -2230,6 +2324,23 @@ CHECK_STREAM_OPEN_BLOCK([Python3], [$PYTHON3 $srcdir/test-stream.py], CHECK_STREAM_OPEN_BLOCK([Python3], [$PYTHON3 $srcdir/test-stream.py], [tcp6], [[[::1]]]) @@ -72676,7 +117160,7 @@ index 62e2b63832..bacb7f161a 100644 # same as OVSDB_CHECK_IDL but uses Python IDL implementation with tcp # with multiple remotes to assert the idl connects to the leader of the Raft cluster m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], -@@ -2245,10 +2299,11 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], +@@ -2245,10 +2356,11 @@ m4_define([OVSDB_CHECK_IDL_LEADER_ONLY_PY], pids=$(cat s2.pid s3.pid s1.pid | tr '\n' ',') echo $pids AT_CHECK([$PYTHON3 $srcdir/test-ovsdb.py -t30 idl-cluster $srcdir/idltest.ovsschema $remotes $pids $3], @@ -72689,7 +117173,7 @@ index 62e2b63832..bacb7f161a 100644 AT_CLEANUP]) OVSDB_CHECK_IDL_LEADER_ONLY_PY([Check Python IDL connects to leader], 3, ['remote']) -@@ -2291,6 +2346,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_C], +@@ -2291,6 +2403,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_C], AT_CHECK([sort stdout | uuidfilt]m4_if([$7],,, [[| $7]]), [0], [$5]) m4_ifval([$8], [AT_CHECK([grep '$8' stderr], [1])], [], []) @@ -72697,7 +117181,7 @@ index 62e2b63832..bacb7f161a 100644 AT_CLEANUP]) # Same as OVSDB_CHECK_CLUSTER_IDL_C but uses the Python IDL implementation. -@@ -2311,6 +2367,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_PY], +@@ -2311,6 +2424,7 @@ m4_define([OVSDB_CHECK_CLUSTER_IDL_PY], AT_CHECK([sort stdout | uuidfilt]m4_if([$7],,, [[| $7]]), [0], [$5]) m4_if([$8], [AT_CHECK([grep '$8' stderr], [1])], [], []) @@ -72705,7 +117189,7 @@ index 62e2b63832..bacb7f161a 100644 AT_CLEANUP]) m4_define([OVSDB_CHECK_CLUSTER_IDL], -@@ -2341,11 +2398,11 @@ OVSDB_CHECK_CLUSTER_IDL([simple idl, monitor_cond_since, cluster disconnect], +@@ -2341,11 +2455,11 @@ OVSDB_CHECK_CLUSTER_IDL([simple idl, monitor_cond_since, cluster disconnect], "table": "simple", "where": [["i", "==", 1]], "row": {"r": 2.0 }}]']], @@ -72720,7 +117204,16 @@ index 62e2b63832..bacb7f161a 100644 005: reconnect 006: table simple 007: {"error":null,"result":[{"count":1}]} -@@ -2437,3 +2494,92 @@ unix:socket2 remote has col id in table simple7 +@@ -2402,7 +2516,7 @@ reconnect.*waiting .* seconds before reconnect) + + AT_SETUP([idl table and column presence check]) + AT_KEYWORDS([ovsdb server idl table column check]) +-AT_CHECK([ovsdb_start_idltest "" "$abs_srcdir/idltest2.ovsschema"]) ++OVSDB_START_IDLTEST([], ["$abs_srcdir/idltest2.ovsschema"]) + + AT_CHECK(ovsdb-tool create db2 $abs_srcdir/idltest.ovsschema) + AT_CHECK(ovsdb-server -vconsole:warn --log-file=ovsdb-server2.log --detach dnl +@@ -2437,3 +2551,182 @@ unix:socket2 remote has col id in table simple7 OVSDB_SERVER_SHUTDOWN AT_CLEANUP @@ -72813,8 +117306,98 @@ index 62e2b63832..bacb7f161a 100644 +005: table link2: i=1 l1= uuid=<1> +006: done +]]) ++ ++ ++m4_define([OVSDB_CHECK_IDL_CHANGE_AWARE], ++ [AT_SETUP([simple idl, database change aware, online conversion - $1]) ++ AT_KEYWORDS([ovsdb server idl db_change_aware conversion $1]) ++ ++ m4_if([$1], [clustered], ++ [OVSDB_CLUSTER_START_IDLTEST([1], [punix:socket])], ++ [OVSDB_START_IDLTEST]) ++ ++ dnl Add some data. ++ AT_CHECK([[ovsdb-client transact unix:socket '["idltest", ++ {"op": "insert", ++ "table": "simple", ++ "row": {"i": 1, ++ "r": 2.0, ++ "b": true, ++ "s": "first row", ++ "u": ["uuid", "84f5c8f5-ac76-4dbc-a24f-8860eb407fc1"], ++ "ia": ["set", [1, 2, 3]], ++ "ra": ["set", [-0.5]], ++ "ba": ["set", [true]], ++ "sa": ["set", ["abc", "def"]], ++ "ua": ["set", [["uuid", "69443985-7806-45e2-b35f-574a04e720f9"], ++ ["uuid", "aad11ef0-816a-4b01-93e6-03b8b4256b98"]]]}}, ++ {"op": "insert", ++ "table": "simple", ++ "row": {"b": false, "s": "second row"}}, ++ {"op": "insert", ++ "table": "simple", ++ "row": {"b": true, "s": "third row"}} ++ ]']], [0], [stdout]) ++ ++ dnl Create a new schema by adding 'extra_column' to the 'simple' table. ++ AT_CHECK([sed 's/"ua": {/"extra_column":{"type": "string"},"ua": {/ ++ s/1.2.3/1.2.4/' \ ++ $abs_srcdir/idltest.ovsschema > new-idltest.ovsschema]) ++ dnl Try "needs-conversion". ++ AT_CHECK([ovsdb-client needs-conversion unix:socket $abs_srcdir/idltest.ovsschema], [0], [no ++]) ++ AT_CHECK([ovsdb-client needs-conversion unix:socket new-idltest.ovsschema], [0], [yes ++]) ++ ++ dnl Conditionally exclude the second row from monitoring. ++ m4_define([COND], [['condition simple [["b","==",true]]']]) ++ ++ dnl Start monitoring. ++ OVS_DAEMONIZE([test-ovsdb '-vPATTERN:console:test-ovsdb|%c|%m' -vjsonrpc -t30 \ ++ idl unix:socket COND monitor \ ++ >idl-c.out 2>idl-c.err], [idl-c.pid]) ++ AT_CAPTURE_FILE([idl-c.out]) ++ AT_CAPTURE_FILE([idl-c.err]) ++ ++ OVS_DAEMONIZE([$PYTHON3 $srcdir/test-ovsdb.py -t30 \ ++ idl $srcdir/idltest.ovsschema unix:socket COND monitor \ ++ >idl-python.out 2>idl-python.err], [idl-python.pid]) ++ AT_CAPTURE_FILE([idl-python.out]) ++ AT_CAPTURE_FILE([idl-python.err]) ++ ++ dnl Wait for monitors to receive the data. ++ OVS_WAIT_UNTIL([grep -q 'third row' idl-c.err]) ++ OVS_WAIT_UNTIL([grep -q 'third row' idl-python.err]) ++ ++ dnl Convert the database. ++ AT_CHECK([ovsdb-client convert unix:socket new-idltest.ovsschema]) ++ ++ dnl Check for the monitor cancellation and the data being requested again. ++ m4_foreach([FILE], [[idl-c], [idl-python]], ++ [OVS_WAIT_UNTIL([grep -q 'monitor_canceled' FILE.err]) ++ OVS_WAIT_UNTIL([test 2 -eq $(grep -c 'send request, method="monitor_cond_since", params=."idltest"' FILE.err)]) ++ ++ dnl XXX: Checking for the new schema bits conditionally because standalone ++ dnl databases are not updating the schema in the _Server database properly. ++ m4_if([$1], [clustered], [OVS_WAIT_UNTIL([grep -q 'extra_column' FILE.err])]) ++ ++ dnl Check that there were no unexpected messages. ++ AT_CHECK([! grep 'unexpected' FILE.err]) ++ ++ dnl Check that the data is received twice and the condition is working. ++ AT_CHECK([sort FILE.out | uuidfilt], [0], ++[[000: simple: change conditions ++001: table simple: i=0 r=0 b=true s=third row u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> ++001: table simple: i=1 r=2 b=true s=first row u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<5> ++002: table simple: i=0 r=0 b=true s=third row u=<0> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1> ++002: table simple: i=1 r=2 b=true s=first row u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<5> ++]])]) ++ AT_CLEANUP]) ++ ++OVSDB_CHECK_IDL_CHANGE_AWARE([standalone]) ++OVSDB_CHECK_IDL_CHANGE_AWARE([clustered]) diff --git a/tests/ovsdb-server.at b/tests/ovsdb-server.at -index 876cb836cd..a20d7a926d 100644 +index 876cb836cd..ad9219da3c 100644 --- a/tests/ovsdb-server.at +++ b/tests/ovsdb-server.at @@ -4,7 +4,7 @@ m4_define([OVSDB_SERVER_SHUTDOWN], @@ -73117,7 +117700,7 @@ index 876cb836cd..a20d7a926d 100644 cat stdout > dump1 OVS_WAIT_UNTIL([ ovsdb-client dump unix:db2.sock | grep zero ]) AT_CHECK([ovsdb-client dump unix:db2.sock], [0], [stdout], [ignore]) -@@ -1744,16 +1765,15 @@ AT_CLEANUP +@@ -1744,16 +1765,20 @@ AT_CLEANUP #ovsdb-server/connect-active-ovsdb-server AT_SETUP([ovsdb-server/connect-active-server]) @@ -73127,16 +117710,22 @@ index 876cb836cd..a20d7a926d 100644 AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore]) AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore]) - AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore]) +-AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore]) -on_exit 'test ! -e pid || kill `cat pid`' ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], ++ [0], [ignore], [ignore]) -AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=pid2 --remote=punix:db2.sock --unixctl=unixctl2 db2], [0], [ignore], [ignore]) -on_exit 'test ! -e pid2 || kill `cat pid2`' -+AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=2.pid --remote=punix:db2.sock --unixctl=unixctl2 db2], [0], [ignore], [ignore]) ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server2.log --pidfile=2.pid \ ++ --remote=punix:db2.sock --unixctl=unixctl2 db2], ++ [0], [ignore], [ignore]) dnl Try to connect without specifying the active server. AT_CHECK([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/connect-active-ovsdb-server], [0], -@@ -1783,6 +1803,7 @@ AT_CLEANUP +@@ -1783,6 +1808,7 @@ AT_CLEANUP #ovsdb-server/disconnect-active-server command AT_SETUP([ovsdb-server/disconnect-active-server]) @@ -73144,7 +117733,7 @@ index 876cb836cd..a20d7a926d 100644 AT_KEYWORDS([ovsdb server replication disconnect-active-server]) AT_SKIP_IF([test $DIFF_SUPPORTS_NORMAL_FORMAT = no]) -@@ -1791,10 +1812,8 @@ AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore]) +@@ -1791,10 +1817,8 @@ AT_CHECK([ovsdb-tool create db1 schema], [0], [stdout], [ignore]) AT_CHECK([ovsdb-tool create db2 schema], [0], [stdout], [ignore]) AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock db1], [0], [ignore], [ignore]) @@ -73156,7 +117745,7 @@ index 876cb836cd..a20d7a926d 100644 AT_CHECK([ovsdb-client transact unix:db.sock \ '[["mydb", -@@ -1840,7 +1859,7 @@ AT_CHECK([uuidfilt output], [0], [7,9c7,8 +@@ -1840,7 +1864,7 @@ AT_CHECK([uuidfilt output], [0], [7,9c7,8 --- > _uuid name number > ----- ---- ------ @@ -73165,7 +117754,7 @@ index 876cb836cd..a20d7a926d 100644 dnl The backup server now become active, and can accept write transactions. AT_CHECK([ovsdb-client transact unix:db2.sock \ -@@ -1891,13 +1910,12 @@ dnl Start both 'db1' and 'db2' in backup mode. Let them backup from each +@@ -1891,13 +1915,12 @@ dnl Start both 'db1' and 'db2' in backup mode. Let them backup from each dnl other. This is not an supported operation state, but to simulate a start dnl up condition where an HA manger can select which one to be an active dnl server soon after. @@ -73182,7 +117771,7 @@ index 876cb836cd..a20d7a926d 100644 dnl dnl make sure both servers reached the replication state -@@ -1965,8 +1983,8 @@ AT_CHECK([ovsdb-tool transact db \ +@@ -1965,8 +1988,8 @@ AT_CHECK([ovsdb-tool transact db \ "row": {"number": 9, "name": "nine"}}]]'], [0], [ignore], [ignore]) dnl Start 'db', then try to be a back up server of itself. @@ -73193,7 +117782,7 @@ index 876cb836cd..a20d7a926d 100644 dnl Save the current content AT_CHECK([ovsdb-client dump unix:db.sock], [0], [stdout]) -@@ -1984,6 +2002,7 @@ AT_CHECK([diff dump1 dump2]) +@@ -1984,6 +2007,7 @@ AT_CHECK([diff dump1 dump2]) AT_CLEANUP AT_SETUP([ovsdb-server/read-only db:ptcp connection]) @@ -73201,7 +117790,7 @@ index 876cb836cd..a20d7a926d 100644 AT_KEYWORDS([ovsdb server read-only]) AT_DATA([schema], [[{"name": "mydb", -@@ -2072,12 +2091,10 @@ AT_CHECK([ovsdb-tool transact db2 \ +@@ -2072,12 +2096,17 @@ AT_CHECK([ovsdb-tool transact db2 \ "row": {"number": 10, "name": "ten"}}]]'], [0], [ignore], [ignore]) dnl Start both 'db1' and 'db2'. @@ -73209,15 +117798,22 @@ index 876cb836cd..a20d7a926d 100644 -on_exit 'test ! -e pid || kill `cat pid`' - +on_exit 'kill `cat *.pid`' -+AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server1.log --pidfile --remote=punix:db.sock --unixctl="`pwd`"/unixctl db1 --active ], [0], [ignore], [ignore]) ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server1.log --pidfile \ ++ --remote=punix:db.sock \ ++ --unixctl="$(pwd)"/unixctl db1 --active ], ++ [0], [ignore], [ignore]) -AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile="`pwd`"/pid2 --remote=punix:db2.sock --unixctl="`pwd`"/unixctl2 db2], [0], [ignore], [ignore]) -on_exit 'test ! -e pid2 || kill `cat pid2`' -+AT_CHECK([ovsdb-server --detach --no-chdir --log-file=ovsdb-server2.log --pidfile=2.pid --remote=punix:db2.sock --unixctl="`pwd`"/unixctl2 db2], [0], [ignore], [ignore]) ++AT_CHECK([ovsdb-server -vfile --detach --no-chdir \ ++ --log-file=ovsdb-server2.log --pidfile=2.pid \ ++ --remote=punix:db2.sock --unixctl="$(pwd)"/unixctl2 db2], ++ [0], [ignore], [ignore]) OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl ovsdb-server/sync-status |grep active]) OVS_WAIT_UNTIL([ovs-appctl -t "`pwd`"/unixctl2 ovsdb-server/sync-status |grep active]) -@@ -2177,7 +2194,7 @@ dnl Starting a dummy server only to reserve some tcp port. +@@ -2177,7 +2206,7 @@ dnl Starting a dummy server only to reserve some tcp port. AT_CHECK([cp db db.tmp]) AT_CHECK([ovsdb-server -vfile -vvlog:off --log-file=listener.log dnl --detach --no-chdir dnl @@ -73226,6 +117822,14 @@ index 876cb836cd..a20d7a926d 100644 --remote=ptcp:0:127.0.0.1 dnl db.tmp], [0], [stdout], [stderr]) PARSE_LISTENING_PORT([listener.log], [BAD_TCP_PORT]) +@@ -2304,6 +2333,6 @@ CLEAN_LOG_FILE([2.log], [2.log.clear]) + + dnl Checking that databases and logs are equal. + AT_CHECK([diff db.clear ./replay_dir/db.copy.clear]) +-AT_CHECK([diff 1.log.clear 2.log.clear]) ++AT_CHECK([diff -u 1.log.clear 2.log.clear]) + + AT_CLEANUP diff --git a/tests/ovsdb-tool.at b/tests/ovsdb-tool.at index 12ad6fb3fc..5496ccda77 100644 --- a/tests/ovsdb-tool.at @@ -73311,10 +117915,45 @@ index 12ad6fb3fc..5496ccda77 100644 +AT_CHECK([diff standalonedump clusterdump]) +AT_CLEANUP diff --git a/tests/packet-type-aware.at b/tests/packet-type-aware.at -index 054dcc9ccf..d63528e69e 100644 +index 054dcc9ccf..63054478e7 100644 --- a/tests/packet-type-aware.at +++ b/tests/packet-type-aware.at -@@ -326,7 +326,7 @@ ovs-appctl time/warp 1000 +@@ -142,30 +142,27 @@ AT_CHECK([ + ### Setup GRE tunnels + AT_CHECK([ + ovs-appctl netdev-dummy/ip4addr br-p1 10.0.0.1/24 && +- ovs-appctl ovs/route/add 10.0.0.0/24 br-p1 && + ovs-appctl tnl/arp/set br-p1 10.0.0.1 $HWADDR_BRP1 && + ovs-appctl tnl/arp/set br-p1 10.0.0.2 $HWADDR_BRP2 && + ovs-appctl tnl/arp/set br-p1 10.0.0.3 $HWADDR_BRP3 && + + ovs-appctl netdev-dummy/ip4addr br-p2 20.0.0.2/24 && +- ovs-appctl ovs/route/add 20.0.0.0/24 br-p2 && + ovs-appctl tnl/arp/set br-p2 20.0.0.1 $HWADDR_BRP1 && + ovs-appctl tnl/arp/set br-p2 20.0.0.2 $HWADDR_BRP2 && + ovs-appctl tnl/arp/set br-p2 20.0.0.3 $HWADDR_BRP3 && + + ovs-appctl netdev-dummy/ip4addr br-p3 30.0.0.3/24 && +- ovs-appctl ovs/route/add 30.0.0.0/24 br-p3 && + ovs-appctl tnl/arp/set br-p3 30.0.0.1 $HWADDR_BRP1 && + ovs-appctl tnl/arp/set br-p3 30.0.0.2 $HWADDR_BRP2 && + ovs-appctl tnl/arp/set br-p3 30.0.0.3 $HWADDR_BRP3 + ], [0], [ignore]) + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User: ++ ovs-appctl ovs/route/show | grep Cached: | sort + ], [0], [dnl +-User: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 +-User: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 +-User: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 ++Cached: 10.0.0.0/24 dev br-p1 SRC 10.0.0.1 local ++Cached: 20.0.0.0/24 dev br-p2 SRC 20.0.0.2 local ++Cached: 30.0.0.0/24 dev br-p3 SRC 30.0.0.3 local + ]) + + AT_CHECK([ +@@ -326,7 +323,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73323,7 +117962,7 @@ index 054dcc9ccf..d63528e69e 100644 tunnel(src=30.0.0.1,dst=30.0.0.3,flags(-df-csum)),recirc_id(0),in_port(gre_sys),packet_type(ns=0,id=0),eth(dst=1e:2c:e9:2a:66:9e),eth_type(0x0800),ipv4(dst=192.168.10.30,frag=no), packets:1, bytes:98, used:0.0s, actions:set(eth(dst=aa:55:aa:55:00:03)),n3 ]) -@@ -344,7 +344,7 @@ ovs-appctl time/warp 1000 +@@ -344,7 +341,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73332,7 +117971,7 @@ index 054dcc9ccf..d63528e69e 100644 tunnel(src=20.0.0.1,dst=20.0.0.2,flags(-df-csum)),recirc_id(0),in_port(gre_sys),packet_type(ns=0,id=0),eth(dst=46:1e:7d:1a:95:a1),eth_type(0x0800),ipv4(dst=192.168.10.20,frag=no), packets:1, bytes:98, used:0.0s, actions:set(eth(dst=aa:55:aa:55:00:02)),n2 ]) -@@ -362,7 +362,7 @@ ovs-appctl time/warp 1000 +@@ -362,7 +359,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73341,7 +117980,7 @@ index 054dcc9ccf..d63528e69e 100644 tunnel(src=10.0.0.2,dst=10.0.0.1,flags(-df-csum)),recirc_id(0),in_port(gre_sys),packet_type(ns=0,id=0),eth(dst=3a:6d:d2:09:9c:ab),eth_type(0x0800),ipv4(dst=192.168.10.10,frag=no), packets:1, bytes:98, used:0.0s, actions:set(eth(dst=aa:55:aa:55:00:01)),n1 ]) -@@ -380,8 +380,8 @@ ovs-appctl time/warp 1000 +@@ -380,8 +377,8 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73352,7 +117991,7 @@ index 054dcc9ccf..d63528e69e 100644 tunnel(src=30.0.0.1,dst=30.0.0.3,flags(-df-csum)),recirc_id(0),in_port(gre_sys),packet_type(ns=0,id=0),eth(dst=1e:2c:e9:2a:66:9e),eth_type(0x0800),ipv4(dst=192.168.10.30,frag=no), packets:1, bytes:98, used:0.0s, actions:set(eth(dst=aa:55:aa:55:00:03)),n3 ]) -@@ -399,9 +399,9 @@ ovs-appctl time/warp 1000 +@@ -399,9 +396,9 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73364,7 +118003,7 @@ index 054dcc9ccf..d63528e69e 100644 ]) # Clear up megaflow cache -@@ -418,7 +418,7 @@ ovs-appctl time/warp 1000 +@@ -418,7 +415,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73373,7 +118012,7 @@ index 054dcc9ccf..d63528e69e 100644 tunnel(src=20.0.0.3,dst=20.0.0.2,flags(-df-csum)),recirc_id(0),in_port(gre_sys),packet_type(ns=0,id=0),eth(dst=46:1e:7d:1a:95:a1),eth_type(0x0800),ipv4(dst=192.168.10.20,frag=no), packets:1, bytes:98, used:0.0s, actions:set(eth(dst=aa:55:aa:55:00:02)),n2 ]) -@@ -504,7 +504,7 @@ ovs-appctl time/warp 1000 +@@ -504,7 +501,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73382,7 +118021,24 @@ index 054dcc9ccf..d63528e69e 100644 tunnel(src=20.0.0.3,dst=20.0.0.2,flags(-df-csum)),recirc_id(0),in_port(gre_sys),packet_type(ns=1,id=0x800),eth_type(0x0800),ipv4(dst=192.168.10.20,frag=no), packets:1, bytes:84, used:0.0s, actions:drop ]) -@@ -726,7 +726,7 @@ ovs-appctl time/warp 1000 +@@ -681,14 +678,13 @@ AT_CHECK([ + + AT_CHECK([ + ovs-appctl netdev-dummy/ip4addr br2 10.0.0.1/24 && +- ovs-appctl ovs/route/add 10.0.0.0/24 br2 && + ovs-appctl tnl/arp/set br2 10.0.0.2 de:af:be:ef:ba:be + ], [0], [ignore]) + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User: ++ ovs-appctl ovs/route/show | grep Cached: + ], [0], [dnl +-User: 10.0.0.0/24 dev br2 SRC 10.0.0.1 ++Cached: 10.0.0.0/24 dev br2 SRC 10.0.0.1 local + ]) + + +@@ -726,7 +722,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73391,7 +118047,7 @@ index 054dcc9ccf..d63528e69e 100644 ]) AT_CHECK([ -@@ -814,7 +814,7 @@ ovs-appctl time/warp 1000 +@@ -814,7 +810,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73400,7 +118056,7 @@ index 054dcc9ccf..d63528e69e 100644 ]) AT_CHECK([ -@@ -892,7 +892,7 @@ ovs-appctl time/warp 1000 +@@ -892,7 +888,7 @@ ovs-appctl time/warp 1000 AT_CHECK([ ovs-appctl dpctl/dump-flows --names dummy@ovs-dummy | strip_used | grep -v ipv6 | sort ], [0], [flow-dump from the main thread: @@ -73409,7 +118065,27 @@ index 054dcc9ccf..d63528e69e 100644 ]) AT_CHECK([ -@@ -1021,7 +1021,7 @@ AT_CHECK([ +@@ -955,7 +951,6 @@ AT_CHECK([ + + AT_CHECK([ + ovs-appctl netdev-dummy/ip4addr br0 20.0.0.1/24 && +- ovs-appctl ovs/route/add 20.0.0.2/24 br0 && + ovs-appctl tnl/neigh/set br0 20.0.0.1 aa:bb:cc:00:00:01 && + ovs-appctl tnl/neigh/set br0 20.0.0.2 aa:bb:cc:00:00:02 + ], [0], [ignore]) +@@ -963,9 +958,9 @@ AT_CHECK([ + ovs-appctl time/warp 1000 + + AT_CHECK([ +- ovs-appctl ovs/route/show | grep User ++ ovs-appctl ovs/route/show | grep Cached: + ],[0], [dnl +-User: 20.0.0.0/24 dev br0 SRC 20.0.0.1 ++Cached: 20.0.0.0/24 dev br0 SRC 20.0.0.1 local + ]) + + AT_CHECK([ +@@ -1021,7 +1016,7 @@ AT_CHECK([ ], [0], [flow-dump from the main thread: recirc_id(0),in_port(p0),packet_type(ns=0,id=0),eth(src=aa:bb:cc:00:00:02,dst=aa:bb:cc:00:00:01),eth_type(0x0800),ipv4(dst=20.0.0.1,proto=47,frag=no), packets:3, bytes:378, used:0.0s, actions:tnl_pop(gre_sys) tunnel(src=20.0.0.2,dst=20.0.0.1,flags(-df-csum)),recirc_id(0),in_port(gre_sys),packet_type(ns=1,id=0x8847),eth_type(0x8847),mpls(label=999/0x0,tc=0/0,ttl=64/0x0,bos=1/1), packets:3, bytes:264, used:0.0s, actions:push_eth(src=00:00:00:00:00:00,dst=00:00:00:00:00:00),pop_mpls(eth_type=0x800),recirc(0x1) @@ -73855,8 +118531,73 @@ index 0f74709f5a..5bca84351c 100644 run should send probe in IDLE for 0 ms (1000 ms backoff) +diff --git a/tests/rstp.at b/tests/rstp.at +index 600e85dabd..e0d4bed4f0 100644 +--- a/tests/rstp.at ++++ b/tests/rstp.at +@@ -253,3 +253,60 @@ AT_CHECK([ovs-vsctl del-port br0 p1]) + + OVS_VSWITCHD_STOP + AT_CLEANUP ++ ++AT_SETUP([RSTP - patch ports]) ++# Create br0 with interfaces p1 and p7 ++# and br1 with interfaces p2 and p8 ++# with p1 and p2 being connected patch ports. ++OVS_VSWITCHD_START( ++ [set port br0 other_config:rstp-enable=false -- \ ++ set bridge br0 rstp-enable=true ++]) ++ ++AT_CHECK([add_of_br 1 \ ++ set port br1 other_config:rstp-enable=false -- \ ++ set bridge br1 rstp-enable=true]) ++ ++ovs-appctl time/stop ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p1 -- \ ++ set interface p1 type=patch options:peer=p2 ofport_request=1 -- \ ++ set port p1 other_config:rstp-enable=true -- \ ++ add-port br1 p2 -- \ ++ set interface p2 type=patch options:peer=p1 ofport_request=2 -- \ ++ set port p2 other_config:rstp-enable=true -- \ ++]) ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p7 -- \ ++ set interface p7 ofport_request=7 type=dummy -- \ ++ set port p7 other_config:rstp-enable=false -- \ ++ add-port br1 p8 -- \ ++ set interface p8 ofport_request=8 type=dummy -- \ ++ set port p8 other_config:rstp-enable=false -- \ ++]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=7 icmp actions=1"]) ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=1 icmp actions=7"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=8 icmp actions=2"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=2 icmp actions=8"]) ++ ++# Give time for RSTP to synchronize. ++ovs-appctl time/warp 5000 500 ++ ++OVS_WAIT_UNTIL_EQUAL([cat ovs-vswitchd.log | FILTER_STP_TOPOLOGY], [dnl ++port p1: RSTP state changed from Disabled to Discarding ++port p2: RSTP state changed from Disabled to Discarding ++port p2: RSTP state changed from Discarding to Forwarding ++port p1: RSTP state changed from Discarding to Forwarding]) ++ ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(7),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 8 ++]) ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(8),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(src=10.0.0.3,dst=10.0.0.4,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 7 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP diff --git a/tests/stp.at b/tests/stp.at -index 7ddacfc3a0..69475843e5 100644 +index 7ddacfc3a0..df4b77dd4e 100644 --- a/tests/stp.at +++ b/tests/stp.at @@ -368,7 +368,7 @@ AT_CLEANUP @@ -73868,6 +118609,72 @@ index 7ddacfc3a0..69475843e5 100644 s/duration=[0-9.]*s*/duration=Xs/ s/idle_age=[0-9]*,/idle_age=X,/ ']]) +@@ -464,6 +464,65 @@ Datapath actions: 2 + + AT_CLEANUP + ++AT_SETUP([STP - patch ports]) ++# Create br0 with interfaces p1 and p7 ++# and br1 with interfaces p2 and p8 ++# with p1 and p2 being connected patch ports. ++OVS_VSWITCHD_START( ++ [set port br0 other_config:stp-enable=false -- \ ++ set bridge br0 stp-enable=true ++]) ++ ++AT_CHECK([add_of_br 1 \ ++ set port br1 other_config:stp-enable=false -- \ ++ set bridge br1 stp-enable=true]) ++ ++ovs-appctl time/stop ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p1 -- \ ++ set interface p1 type=patch options:peer=p2 ofport_request=1 -- \ ++ set port p1 other_config:stp-enable=true -- \ ++ add-port br1 p2 -- \ ++ set interface p2 type=patch options:peer=p1 ofport_request=2 -- \ ++ set port p2 other_config:stp-enable=true -- \ ++]) ++ ++AT_CHECK([ovs-vsctl \ ++ add-port br0 p7 -- \ ++ set interface p7 ofport_request=7 type=dummy -- \ ++ set port p7 other_config:stp-enable=false -- \ ++ add-port br1 p8 -- \ ++ set interface p8 ofport_request=8 type=dummy -- \ ++ set port p8 other_config:stp-enable=false -- \ ++]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=7 icmp actions=1"]) ++AT_CHECK([ovs-ofctl add-flow br0 "in_port=1 icmp actions=7"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=8 icmp actions=2"]) ++AT_CHECK([ovs-ofctl add-flow br1 "in_port=2 icmp actions=8"]) ++ ++# Give time for STP to synchronize. ++ovs-appctl time/warp 30000 3000 ++ ++OVS_WAIT_UNTIL_EQUAL([cat ovs-vswitchd.log | FILTER_STP_TOPOLOGY], [dnl ++port <>: STP state changed from disabled to listening ++port <>: STP state changed from disabled to listening ++port <>: STP state changed from listening to learning ++port <>: STP state changed from listening to learning ++port <>: STP state changed from learning to forwarding ++port <>: STP state changed from learning to forwarding]) ++ ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(7),eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x0800),ipv4(src=10.0.0.2,dst=10.0.0.1,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 8 ++]) ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(8),eth(src=50:54:00:00:00:0b,dst=50:54:00:00:00:0c),eth_type(0x0800),ipv4(src=10.0.0.3,dst=10.0.0.4,proto=1,tos=0,ttl=64,frag=no),icmp(type=8,code=0)' | grep Datapath], [0], [dnl ++Datapath actions: 7 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_SETUP([STP - flush the fdb and mdb when topology changed]) + OVS_VSWITCHD_START([]) + diff --git a/tests/system-afxdp.at b/tests/system-afxdp.at index 0d09906fb6..88f6605663 100644 --- a/tests/system-afxdp.at @@ -74064,10 +118871,22 @@ index 784bada12c..15e789a245 100644 +"]) +AT_CLEANUP diff --git a/tests/system-ipsec.at b/tests/system-ipsec.at -index f45a153edd..718a5aa4d0 100644 +index f45a153edd..d3d27133b9 100644 --- a/tests/system-ipsec.at +++ b/tests/system-ipsec.at -@@ -143,10 +143,10 @@ m4_define([CHECK_ESP_TRAFFIC], +@@ -133,20 +133,18 @@ m4_define([CHECK_ESP_TRAFFIC], + NS_EXEC([right], [ip link set dev br-ipsec up]) + + dnl Capture any underlay esp packets +- tcpdump -l -nn -i ovs-p0 esp > $ovs_base/left/tcpdump.log & +- on_exit "kill $!" +- tcpdump -l -nn -i ovs-p1 esp > $ovs_base/right/tcpdump.log & +- on_exit "kill $!" ++ OVS_DAEMONIZE([tcpdump -l -nn -i ovs-p0 esp > $ovs_base/left/tcpdump.log], [tcpdump0.pid]) ++ OVS_DAEMONIZE([tcpdump -l -nn -i ovs-p1 esp > $ovs_base/right/tcpdump.log], [tcpdump1.pid]) + + dnl Wait for all loaded connections to be active + OVS_WAIT_UNTIL([test `IPSEC_STATUS_LOADED(left)` -eq `IPSEC_STATUS_ACTIVE(left)`]) OVS_WAIT_UNTIL([test `IPSEC_STATUS_LOADED(right)` -eq `IPSEC_STATUS_ACTIVE(right)`]) dnl Ping over IPsec tunnel @@ -74113,7 +118932,7 @@ index 86d633ac4f..a9c7398e1c 100644 + AT_CHECK([ip link del dev ovs_bareudp0]) +]) diff --git a/tests/system-layer3-tunnels.at b/tests/system-layer3-tunnels.at -index d21fd777dd..6fbdedb64f 100644 +index d21fd777dd..5dcdd2afae 100644 --- a/tests/system-layer3-tunnels.at +++ b/tests/system-layer3-tunnels.at @@ -34,15 +34,15 @@ AT_CHECK([ovs-ofctl add-flow br0 "priority=100 ip,nw_dst=10.1.1.2 action=mod_dl_ @@ -74135,7 +118954,7 @@ index d21fd777dd..6fbdedb64f 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -83,15 +83,15 @@ AT_CHECK([ovs-ofctl add-flow br0 "priority=100 ip,nw_dst=10.1.1.2 action=mod_dl_ +@@ -83,78 +83,23 @@ AT_CHECK([ovs-ofctl add-flow br0 "priority=100 ip,nw_dst=10.1.1.2 action=mod_dl_ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.1.1.2]) dnl First, check the underlay @@ -74154,24 +118973,71 @@ index d21fd777dd..6fbdedb64f 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -147,14 +147,14 @@ AT_CHECK([tail -1 stdout], [0], - dnl Check GRE tunnel push - AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(3),eth(dst=f9:bc:12:44:34:b6,src=af:55:aa:55:00:03),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.92,proto=1,tos=0,ttl=64,frag=no)'], [0], [stdout]) - AT_CHECK([tail -1 stdout], [0], -- [Datapath actions: clone(tnl_push(tnl_port(4),header(size=38,type=3,eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),gre((flags=0x0,proto=0x6558))),out_port(2)),1) -+ [Datapath actions: tnl_push(tnl_port(4),header(size=38,type=3,eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),gre((flags=0x0,proto=0x6558))),out_port(2)),1 - ]) - - OVS_VSWITCHD_STOP + OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP +-AT_SETUP([layer3 - use non-local port as tunnel endpoint]) +- +-OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy ofport_request=1]) +-AT_CHECK([ovs-vsctl add-port br0 vtep0 -- set int vtep0 type=dummy], [0]) +-AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy], [0]) +-AT_CHECK([ovs-vsctl add-port int-br t1 -- set Interface t1 type=gre \ +- options:remote_ip=1.1.2.92 ofport_request=3], [0]) +- +-AT_CHECK([ovs-appctl dpif/show], [0], [dnl +-dummy@ovs-dummy: hit:0 missed:0 +- br0: +- br0 65534/100: (dummy-internal) +- p0 1/1: (dummy) +- vtep0 2/2: (dummy) +- int-br: +- int-br 65534/3: (dummy-internal) +- t1 3/4: (gre: remote_ip=1.1.2.92) +-]) +- +-AT_CHECK([ovs-appctl netdev-dummy/ip4addr vtep0 1.1.2.88/24], [0], [OK +-]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 vtep0], [0], [OK +-]) +-AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +-AT_CHECK([ovs-ofctl add-flow int-br action=normal]) +- +-dnl Use arp request and reply to achieve tunnel next hop mac binding +-dnl By default, vtep0's MAC address is aa:55:aa:55:00:03 +-AT_CHECK([ovs-appctl netdev-dummy/receive vtep0 'recirc_id(0),in_port(2),eth(dst=ff:ff:ff:ff:ff:ff,src=aa:55:aa:55:00:03),eth_type(0x0806),arp(tip=1.1.2.92,sip=1.1.2.88,op=1,sha=aa:55:aa:55:00:03,tha=00:00:00:00:00:00)']) +-AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0806),arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=aa:55:aa:55:00:03)']) +- +-AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl +-1.1.2.92 f8:bc:12:44:34:b6 br0 +-]) +- +-AT_CHECK([ovs-appctl ovs/route/show | tail -n+2 | sort], [0], [dnl +-User: 1.1.2.0/24 dev vtep0 SRC 1.1.2.88 +-]) +- +-dnl Check GRE tunnel pop +-AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0800),ipv4(src=1.1.2.92,dst=1.1.2.88,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) +- +-AT_CHECK([tail -1 stdout], [0], +- [Datapath actions: tnl_pop(4) +-]) +- +-dnl Check GRE tunnel push +-AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(3),eth(dst=f9:bc:12:44:34:b6,src=af:55:aa:55:00:03),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.92,proto=1,tos=0,ttl=64,frag=no)'], [0], [stdout]) +-AT_CHECK([tail -1 stdout], [0], +- [Datapath actions: clone(tnl_push(tnl_port(4),header(size=38,type=3,eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),gre((flags=0x0,proto=0x6558))),out_port(2)),1) +-]) +- +-OVS_VSWITCHD_STOP +-AT_CLEANUP +- AT_SETUP([layer3 - ping over MPLS Bareudp]) -OVS_CHECK_MIN_KERNEL(5, 7) +OVS_CHECK_BAREUDP() OVS_TRAFFIC_VSWITCHD_START([_ADD_BR([br1])]) ADD_NAMESPACES(at_ns0, at_ns1) -@@ -191,18 +191,18 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) +@@ -191,18 +136,18 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br0 flows0.txt]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br1 flows1.txt]) @@ -74193,7 +119059,7 @@ index d21fd777dd..6fbdedb64f 100644 OVS_TRAFFIC_VSWITCHD_START([_ADD_BR([br1])]) ADD_NAMESPACES(at_ns0, at_ns1) -@@ -239,11 +239,11 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) +@@ -239,11 +184,11 @@ AT_CHECK([ovs-vsctl add-port br1 patch1]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br0 flows0.txt]) AT_CHECK([ovs-ofctl -O OpenFlow13 add-flows br1 flows1.txt]) @@ -74208,7 +119074,7 @@ index d21fd777dd..6fbdedb64f 100644 ]) OVS_TRAFFIC_VSWITCHD_STOP diff --git a/tests/system-offloads-traffic.at b/tests/system-offloads-traffic.at -index 80bc1dd5c3..1a7f1e827a 100644 +index 80bc1dd5c3..9d47461cc2 100644 --- a/tests/system-offloads-traffic.at +++ b/tests/system-offloads-traffic.at @@ -16,7 +16,7 @@ ADD_NAMESPACES(at_ns0, at_ns1) @@ -74247,7 +119113,7 @@ index 80bc1dd5c3..1a7f1e827a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -168,3 +168,184 @@ matchall +@@ -168,3 +168,218 @@ matchall ]) OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP @@ -74432,6 +119298,40 @@ index 80bc1dd5c3..1a7f1e827a 100644 +/failed to offload flow/d +"]) +AT_CLEANUP ++ ++AT_SETUP([offloads - handling of geneve corrupted metadata - offloads enabled]) ++OVS_CHECK_GENEVE() ++ ++OVS_TRAFFIC_VSWITCHD_START( ++ [_ADD_BR([br-underlay]) -- \ ++ set bridge br0 other-config:hwaddr=f2:ff:00:00:00:01 -- \ ++ set bridge br-underlay other-config:hwaddr=f2:ff:00:00:00:02]) ++ ++AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:hw-offload=true]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "actions=normal"]) ++AT_CHECK([ovs-ofctl add-flow br-underlay "actions=normal"]) ++ ++ADD_NAMESPACES(at_ns0) ++ ++dnl Set up underlay link from host into the namespace using veth pair. ++ADD_VETH(p0, at_ns0, br-underlay, "172.31.1.1/24", f2:ff:00:00:00:03) ++AT_CHECK([ip addr add dev br-underlay "172.31.1.100/24"]) ++AT_CHECK([ip link set dev br-underlay up]) ++ ++dnl Set up tunnel endpoints on OVS outside the namespace and with a native ++dnl linux device inside the namespace. ++ADD_OVS_TUNNEL([geneve], [br0], [at_gnv0], [172.31.1.1], [10.1.1.100/24]) ++ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], ++ [vni 0], [address f2:ff:00:00:00:04]) ++ ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 03 08 00 45 00 00 52 00 01 00 00 40 11 1f f7 ac 1f 01 01 ac 1f 01 64 de c1 17 c1 00 3e 59 e9 01 00 65 58 00 00 00 00 00 03 00 02 f2 ff 00 00 00 01 f2 ff 00 00 00 04 08 00 45 00 00 1c 00 01 00 00 40 01 64 7a 0a 01 01 01 0a 01 01 64 08 00 f7 ff 00 00 00 00 > /dev/null]) ++ ++OVS_WAIT_UNTIL([grep -q 'Invalid Geneve tunnel metadata' ovs-vswitchd.log]) ++ ++OVS_TRAFFIC_VSWITCHD_STOP(["/Invalid Geneve tunnel metadata on bridge br0 while processing icmp,in_port=1,vlan_tci=0x0000,dl_src=f2:ff:00:00:00:04,dl_dst=f2:ff:00:00:00:01,nw_src=10.1.1.1,nw_dst=10.1.1.100,nw_tos=0,nw_ecn=0,nw_ttl=64,nw_frag=no,icmp_type=8,icmp_code=0/d ++/Unable to parse geneve options/d"]) ++AT_CLEANUP diff --git a/tests/system-route.at b/tests/system-route.at index 1714273e35..270956d13f 100644 --- a/tests/system-route.at @@ -74464,7 +119364,7 @@ index 871a3bda4f..3d84a53182 100644 ]) diff --git a/tests/system-traffic.at b/tests/system-traffic.at -index f22d86e466..dc01ee0d8a 100644 +index f22d86e466..099e972812 100644 --- a/tests/system-traffic.at +++ b/tests/system-traffic.at @@ -10,13 +10,13 @@ ADD_NAMESPACES(at_ns0, at_ns1) @@ -74655,13 +119555,11 @@ index f22d86e466..dc01ee0d8a 100644 dnl Okay, now check the overlay with different packet sizes -NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl +NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -W 2 10.1.1.100 | FORMAT_PING], [0], [dnl - 3 packets transmitted, 3 received, 0% packet loss, time 0ms - ]) --NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) +NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -W 2 10.1.1.100 | FORMAT_PING], [0], [dnl - 3 packets transmitted, 3 received, 0% packet loss, time 0ms - ]) --NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) +NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -W 2 10.1.1.100 | FORMAT_PING], [0], [dnl +3 packets transmitted, 3 received, 0% packet loss, time 0ms +]) @@ -74700,12 +119598,14 @@ index f22d86e466..dc01ee0d8a 100644 + +dnl First, check the underlay +NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -W 2 172.31.1.100 | FORMAT_PING], [0], [dnl -+3 packets transmitted, 3 received, 0% packet loss, time 0ms -+]) + 3 packets transmitted, 3 received, 0% packet loss, time 0ms + ]) +-NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl +dnl Okay, now check the overlay with different packet sizes +NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -W 2 10.1.1.100 | FORMAT_PING], [0], [dnl -+3 packets transmitted, 3 received, 0% packet loss, time 0ms -+]) + 3 packets transmitted, 3 received, 0% packet loss, time 0ms + ]) +-NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl +NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -W 2 10.1.1.100 | FORMAT_PING], [0], [dnl +3 packets transmitted, 3 received, 0% packet loss, time 0ms +]) @@ -74967,7 +119867,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_CHECK_GENEVE_UDP6ZEROCSUM() OVS_TRAFFIC_VSWITCHD_START() -@@ -704,25 +803,74 @@ ADD_NATIVE_TUNNEL6([geneve], [ns_gnv0], [at_ns0], [fc00::100], [10.1.1.1/24], +@@ -704,18 +803,143 @@ ADD_NATIVE_TUNNEL6([geneve], [ns_gnv0], [at_ns0], [fc00::100], [10.1.1.1/24], OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::100]) dnl First, check the underlay @@ -74982,13 +119882,11 @@ index f22d86e466..dc01ee0d8a 100644 +3 packets transmitted, 3 received, 0% packet loss, time 0ms +]) +NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -W 2 10.1.1.100 | FORMAT_PING], [0], [dnl - 3 packets transmitted, 3 received, 0% packet loss, time 0ms - ]) --NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) +NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -W 2 10.1.1.100 | FORMAT_PING], [0], [dnl - 3 packets transmitted, 3 received, 0% packet loss, time 0ms - ]) --NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) + +OVS_TRAFFIC_VSWITCHD_STOP +AT_CLEANUP @@ -75022,9 +119920,9 @@ index f22d86e466..dc01ee0d8a 100644 + +dnl First, check the underlay. +NS_CHECK_EXEC([at_ns0], [ping6 -q -c 3 -i 0.3 -W 2 fc00::100 | FORMAT_PING], [0], [dnl - 3 packets transmitted, 3 received, 0% packet loss, time 0ms - ]) - ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++ +dnl Start tcpdump to capture the encapsulated packets. +NETNS_DAEMONIZE([at_ns0], [tcpdump -n -U -i p0 -w p0.pcap], [tcpdump.pid]) +sleep 1 @@ -75034,11 +119932,90 @@ index f22d86e466..dc01ee0d8a 100644 +sleep 1 + +dnl Stop OVS and tcpdump and verify the results. - OVS_TRAFFIC_VSWITCHD_STOP ++OVS_TRAFFIC_VSWITCHD_STOP + +ovs-pcap p0.pcap + +AT_CHECK([ovs-pcap p0.pcap | grep -Eq "^[[[:xdigit:]]]{24}86dd60000000003a1140fc000000000000000000000000000100fc000000000000000000000000000001[[[:xdigit:]]]{4}17c1003a[[[:xdigit:]]]{4}0000655800000000fffffffffffffa163e949d8008060001080006040001[[[:xdigit:]]]{12}0a0000f40000000000000a0000fe$"]) ++AT_CLEANUP ++ ++AT_SETUP([datapath - bridging two geneve tunnels]) ++OVS_CHECK_TUNNEL_TSO() ++OVS_CHECK_GENEVE() ++ ++OVS_TRAFFIC_VSWITCHD_START() ++ADD_BR([br-underlay-0]) ++ADD_BR([br-underlay-1]) ++ ++ADD_NAMESPACES(at_ns0) ++ADD_NAMESPACES(at_ns1) ++ ++dnl Set up underlay link from host into the namespaces using veth pairs. ++ADD_VETH(p0, at_ns0, br-underlay-0, "172.31.1.1/24") ++AT_CHECK([ip addr add dev br-underlay-0 "172.31.1.100/24"]) ++AT_CHECK([ip link set dev br-underlay-0 up]) ++ ++ADD_VETH(p1, at_ns1, br-underlay-1, "172.31.2.1/24") ++AT_CHECK([ip addr add dev br-underlay-1 "172.31.2.100/24"]) ++AT_CHECK([ip link set dev br-underlay-1 up]) ++ ++dnl Set up two OVS tunnel endpoints in a root namespace and two native ++dnl linux devices inside the test namespaces. ++dnl ++dnl ns_gnv0 | ns_gnv1 ++dnl ip: 10.1.1.1/24 | ip: 10.1.1.2/24 ++dnl remote_ip: 172.31.1.100 | remote_ip: 172.31.2.100 ++dnl | | | ++dnl | | | ++dnl p0 | p1 ++dnl ip: 172.31.1.1/24 | ip: 172.31.2.1/24 ++dnl | NS0 | NS1 | ++dnl ---------|------------------------+------------------|-------------------- ++dnl | | ++dnl br-underlay-0: br-underlay-1: ++dnl ip: 172.31.1.100/24 ip: 172.31.2.100/24 ++dnl ovs-p0 ovs-p1 ++dnl | | ++dnl | br0 | ++dnl encap/decap --- ip: 10.1.1.100/24 --------- encap/decap ++dnl at_gnv0 ++dnl remote_ip: 172.31.1.1 ++dnl at_gnv1 ++dnl remote_ip: 172.31.2.1 ++dnl ++ADD_OVS_TUNNEL([geneve], [br0], [at_gnv0], [172.31.1.1], [10.1.1.100/24]) ++ADD_NATIVE_TUNNEL([geneve], [ns_gnv0], [at_ns0], [172.31.1.100], [10.1.1.1/24], ++ [vni 0]) ++ADD_OVS_TUNNEL([geneve], [br0], [at_gnv1], [172.31.2.1], [10.1.1.101/24]) ++ADD_NATIVE_TUNNEL([geneve], [ns_gnv1], [at_ns1], [172.31.2.100], [10.1.1.2/24], ++ [vni 0]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 "actions=normal"]) ++AT_CHECK([ovs-ofctl add-flow br-underlay-0 "actions=normal"]) ++AT_CHECK([ovs-ofctl add-flow br-underlay-1 "actions=normal"]) ++ ++dnl First, check both underlays. ++NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -W 2 172.31.1.100 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++NS_CHECK_EXEC([at_ns1], [ping -q -c 3 -i 0.3 -W 2 172.31.2.100 | FORMAT_PING], [0], [dnl + 3 packets transmitted, 3 received, 0% packet loss, time 0ms + ]) +-NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl ++ ++dnl Now, check the overlay with different packet sizes. ++NS_CHECK_EXEC([at_ns0], [ping -q -c 3 -i 0.3 -W 2 10.1.1.2 | FORMAT_PING], [0], [dnl ++3 packets transmitted, 3 received, 0% packet loss, time 0ms ++]) ++NS_CHECK_EXEC([at_ns0], [ping -s 1600 -q -c 3 -i 0.3 -W 2 10.1.1.2 | FORMAT_PING], [0], [dnl + 3 packets transmitted, 3 received, 0% packet loss, time 0ms + ]) +-NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -w 2 10.1.1.100 | FORMAT_PING], [0], [dnl ++NS_CHECK_EXEC([at_ns0], [ping -s 3200 -q -c 3 -i 0.3 -W 2 10.1.1.2 | FORMAT_PING], [0], [dnl + 3 packets transmitted, 3 received, 0% packet loss, time 0ms + ]) + +@@ -723,6 +947,7 @@ OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP AT_SETUP([datapath - ping over gre tunnel by simulated packets]) @@ -75046,7 +120023,12 @@ index f22d86e466..dc01ee0d8a 100644 OVS_CHECK_MIN_KERNEL(3, 10) OVS_TRAFFIC_VSWITCHD_START() -@@ -748,7 +896,7 @@ ip netns exec at_ns0 tcpdump -n -i p0 dst host 172.31.1.1 -l > p0.pcap & +@@ -744,11 +969,11 @@ ADD_OVS_TUNNEL([gre], [br0], [at_gre0], [172.31.1.1], [10.1.1.100/24]) + + IPTABLES_ACCEPT([br-underlay]) + +-ip netns exec at_ns0 tcpdump -n -i p0 dst host 172.31.1.1 -l > p0.pcap & ++NETNS_DAEMONIZE([at_ns0], [tcpdump -n -i p0 dst host 172.31.1.1 -l > p0.pcap 2>/dev/null], [tcpdump.pid]) sleep 1 dnl First, check the underlay. @@ -75055,7 +120037,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -759,16 +907,17 @@ dnl ADD_NATIVE_TUNNEL([gretap], [ns_gre0], [at_ns0], [172.31.1.100], [10.1.1.1/2 +@@ -759,16 +984,17 @@ dnl ADD_NATIVE_TUNNEL([gretap], [ns_gre0], [at_ns0], [172.31.1.100], [10.1.1.1/2 dnl Now, check the overlay by sending out raw arp and icmp packets. ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff00000003080045000042ec2c4000402ff3bcac1f0101ac1f016400006558fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=NORMAL" @@ -75075,7 +120057,12 @@ index f22d86e466..dc01ee0d8a 100644 OVS_CHECK_MIN_KERNEL(3, 10) OVS_TRAFFIC_VSWITCHD_START() -@@ -795,7 +944,7 @@ ip netns exec at_ns0 tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap & +@@ -791,11 +1017,11 @@ ADD_OVS_TUNNEL([erspan], [br0], [at_erspan0], [172.31.1.1], [10.1.1.100/24], [op + + IPTABLES_ACCEPT([br-underlay]) + +-ip netns exec at_ns0 tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap & ++NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap 2>/dev/null], [tcpdump.pid]) sleep 1 dnl First, check the underlay @@ -75084,7 +120071,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -803,20 +952,21 @@ dnl Okay, now send out an arp request from 10.1.1.1 for 10.1.1.100 in erspan. +@@ -803,20 +1029,21 @@ dnl Okay, now send out an arp request from 10.1.1.1 for 10.1.1.100 in erspan. ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000308004500004e151d4000402fcac0ac1f0101ac1f0164100088be000000061000000100000007fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=normal" dnl 0002 is arp reply, followed by mac address of 10.1.1.100. @@ -75110,7 +120097,12 @@ index f22d86e466..dc01ee0d8a 100644 OVS_CHECK_MIN_KERNEL(3, 10) OVS_TRAFFIC_VSWITCHD_START() -@@ -847,29 +997,30 @@ ip netns exec at_ns0 tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap & +@@ -843,33 +1070,34 @@ dnl around it. + iptables -I INPUT 1 -i br-underlay -j ACCEPT + on_exit 'iptables -D INPUT 1' + +-ip netns exec at_ns0 tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap & ++NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host 172.31.1.1 -l > p0.pcap 2>/dev/null], [tcpdump.pid]) sleep 1 dnl First, check the underlay. @@ -75147,7 +120139,12 @@ index f22d86e466..dc01ee0d8a 100644 OVS_CHECK_MIN_KERNEL(3, 10) OVS_TRAFFIC_VSWITCHD_START() -@@ -903,7 +1054,7 @@ ip netns exec at_ns0 tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap & +@@ -899,11 +1127,11 @@ dnl around it. + ip6tables -I INPUT 1 -i br-underlay -j ACCEPT + on_exit 'ip6tables -D INPUT 1' + +-ip netns exec at_ns0 tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap & ++NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap 2>/dev/null], [tcpdump.pid]) sleep 1 dnl First, check the underlay. @@ -75156,7 +120153,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -911,20 +1062,21 @@ dnl Okay, now send raw arp request and icmp echo request. +@@ -911,20 +1139,21 @@ dnl Okay, now send raw arp request and icmp echo request. ovs-ofctl -O OpenFlow13 packet-out br-underlay "in_port=1 packet=f2ff00000002f2ff0000000386dd60008531003a2f40fc000100000000000000000000000001fc000100000000000000000000000100100088be000000051000007b00000007fffffffffffff2ff0000000408060001080006040001f2ff000000040a0101010000000000000a010164 actions=normal" dnl Check arp reply. @@ -75183,7 +120180,12 @@ index f22d86e466..dc01ee0d8a 100644 OVS_CHECK_MIN_KERNEL(3, 10) OVS_TRAFFIC_VSWITCHD_START() -@@ -958,22 +1110,22 @@ ip netns exec at_ns0 tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap & +@@ -954,26 +1183,26 @@ dnl around it. + ip6tables -I INPUT 1 -i br-underlay -j ACCEPT + on_exit 'ip6tables -D INPUT 1' + +-ip netns exec at_ns0 tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap & ++NETNS_DAEMONIZE([at_ns0], [tcpdump -n -x -i p0 dst host fc00:100::1 -l > p0.pcap 2>/dev/null], [tcpdump.pid]) sleep 1 dnl First, check the underlay. @@ -75212,7 +120214,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -997,16 +1149,16 @@ priority=10 in_port=2,ip,actions=clone(mod_dl_src(ae:c6:7e:54:8d:4d),mod_dl_dst( +@@ -997,16 +1226,16 @@ priority=10 in_port=2,ip,actions=clone(mod_dl_src(ae:c6:7e:54:8d:4d),mod_dl_dst( AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-ofctl monitor br0 65534 invalid_ttl --detach --no-chdir --pidfile 2> ofctl_monitor.log]) @@ -75233,7 +120235,7 @@ index f22d86e466..dc01ee0d8a 100644 ]) OVS_TRAFFIC_VSWITCHD_STOP -@@ -1038,11 +1190,11 @@ table=1,priority=10 actions=normal +@@ -1038,11 +1267,11 @@ table=1,priority=10 actions=normal AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-ofctl add-flows br1 flows.txt]) @@ -75247,7 +120249,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -1077,11 +1229,11 @@ table=3,priority=10 actions=normal +@@ -1077,11 +1306,11 @@ table=3,priority=10 actions=normal AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) AT_CHECK([ovs-ofctl add-flows br1 flows.txt]) @@ -75261,7 +120263,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) OVS_TRAFFIC_VSWITCHD_STOP -@@ -1738,6 +1890,69 @@ masks-cache:size:256 +@@ -1738,6 +1967,120 @@ masks-cache:size:256 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP @@ -75328,10 +120330,70 @@ index f22d86e466..dc01ee0d8a 100644 +OVS_TRAFFIC_VSWITCHD_STOP +AT_CLEANUP + ++AT_SETUP([datapath - Neighbor Discovery with loose match]) ++OVS_TRAFFIC_VSWITCHD_START() ++ ++ADD_NAMESPACES(at_ns0, at_ns1) ++ ++ADD_VETH(p0, at_ns0, br0, "2001::1:0:392/64", 36:b1:ee:7c:01:03) ++ADD_VETH(p1, at_ns1, br0, "2001::1:0:9/64", 36:b1:ee:7c:01:02) ++ ++dnl Set up flows for moving icmp ND Solicit around. This should be the ++dnl same for the other ND types. ++AT_DATA([flows.txt], [dnl ++table=0 priority=95 icmp6,icmp_type=136,nd_target=2001::1:0:9 actions=resubmit(,10) ++table=0 priority=95 icmp6,icmp_type=136,nd_target=2001::1:0:392 actions=resubmit(,10) ++table=0 priority=65 actions=resubmit(,20) ++table=10 actions=NORMAL ++table=20 actions=drop ++]) ++AT_CHECK([ovs-ofctl del-flows br0]) ++AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) ++ ++dnl Send a mismatching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 f1 f2 20 00 00 00 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++dnl Send a matching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 fe 5f 20 00 00 00 20 01 00 00 00 00 00 00 00 00 00 01 00 00 03 92 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | strip_stats | strip_used | dnl ++ strip_key32 | strip_ptype | strip_eth | strip_recirc | dnl ++ grep ",nd" | sort], [0], [dnl ++recirc_id(<recirc>),in_port(2),eth(src=36:b1:ee:7c:01:03,dst=36:b1:ee:7c:01:02),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=2001::1:0:392), packets:0, bytes:0, used:never, actions:1,3 ++recirc_id(<recirc>),in_port(2),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=3000::1), packets:0, bytes:0, used:never, actions:drop ++]) ++ ++OVS_WAIT_UNTIL([ovs-appctl dpctl/dump-flows | grep ",nd" | wc -l | grep -E ^0]) ++ ++dnl Send a matching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 fe 5f 20 00 00 00 20 01 00 00 00 00 00 00 00 00 00 01 00 00 03 92 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++dnl Send a mismatching neighbor discovery. ++NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 86 dd 60 00 00 00 00 20 3a ff fe 80 00 00 00 00 00 00 f8 16 3e ff fe 04 66 04 fe 80 00 00 00 00 00 00 f8 16 3e ff fe a7 dd 0e 88 00 f1 f2 20 00 00 00 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 02 01 36 b1 ee 7c 01 03 > /dev/null]) ++ ++AT_CHECK([ovs-appctl dpctl/dump-flows | strip_stats | strip_used | dnl ++ strip_key32 | strip_ptype | strip_eth | strip_recirc | dnl ++ grep ",nd" | sort], [0], [dnl ++recirc_id(<recirc>),in_port(2),eth(src=36:b1:ee:7c:01:03,dst=36:b1:ee:7c:01:02),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=2001::1:0:392), packets:0, bytes:0, used:never, actions:1,3 ++recirc_id(<recirc>),in_port(2),eth_type(0x86dd),ipv6(proto=58,frag=no),icmpv6(type=136),nd(target=3000::1), packets:0, bytes:0, used:never, actions:drop ++]) ++ ++OVS_TRAFFIC_VSWITCHD_STOP ++AT_CLEANUP ++ AT_BANNER([MPLS]) AT_SETUP([mpls - encap header dp-support]) -@@ -1765,10 +1980,10 @@ dnl p1(at_ns1) interface +@@ -1756,7 +2099,7 @@ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"]) + + rm -rf p1.pcap +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is a icmp packet. pkt=eth/ip/icmp +@@ -1765,10 +2108,10 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 > /dev/null]) dnl Check the expected mpls encapsulated packet on the egress interface @@ -75346,7 +120408,16 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -1797,10 +2012,10 @@ dnl p1(at_ns1) interface +@@ -1788,7 +2131,7 @@ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"]) + + rm -rf p1.pcap +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is a icmp packet. pkt=eth/ip/icmp +@@ -1797,10 +2140,10 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 > /dev/null]) dnl Check the expected mpls encapsulated packet on the egress interface @@ -75361,7 +120432,16 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -1830,10 +2045,10 @@ dnl p1(at_ns1) interface +@@ -1821,7 +2164,7 @@ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls_mc),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"]) + + rm -rf p1.pcap +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is a icmp packet. pkt=eth/ip/icmp +@@ -1830,10 +2173,10 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 > /dev/null]) dnl Check the expected mpls encapsulated packet on the egress interface @@ -75376,7 +120456,16 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -1862,10 +2077,10 @@ dnl p1(at_ns1) interface +@@ -1853,7 +2196,7 @@ dnl eth/ip/icmp --> OVS --> eth/mpls/eth/ip/icmp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x0800 actions=encap(mpls_mc),set_mpls_label:2,encap(ethernet),set_field:00:00:00:00:00:02->dl_dst,set_field:00:00:00:00:00:01->dl_src,ovs-p1"]) + + rm -rf p1.pcap +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is a icmp packet. pkt=eth/ip/icmp +@@ -1862,10 +2205,10 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 > /dev/null]) dnl Check the expected mpls encapsulated packet on the egress interface @@ -75391,7 +120480,16 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -1896,13 +2111,13 @@ dnl p1(at_ns1) interface +@@ -1887,7 +2230,7 @@ dnl eth/mpls/eth/ip/icmp --> OVS --> eth/ip/icmp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x8847,mpls_label=2 actions=decap(),decap(packet_type(ns=0,type=0)),ovs-p1"]) + + rm -rf p1.pcap +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is an mpls packet encapsulating ethernet packet. pkt=eth/mpls/eth/ip/icmp +@@ -1896,13 +2239,13 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 00 00 00 00 00 02 00 00 00 00 00 01 88 47 00 00 21 40 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 > /dev/null]) dnl Check the expected decapsulated on the egress interface @@ -75412,7 +120510,16 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP -@@ -1933,14 +2148,50 @@ dnl p1(at_ns1) interface +@@ -1924,7 +2267,7 @@ dnl eth/mpls/eth/ip/icmp --> OVS --> eth/ip/icmp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x8847,mpls_label=2 actions=decap(),decap(packet_type(ns=0,type=0)),ovs-p1"]) + + rm -rf p1.pcap +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is an mpls packet encapsulating ethernet packet. pkt=eth/mpls/eth/ip/icmp +@@ -1933,14 +2276,50 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 00 00 00 00 00 02 00 00 00 00 00 01 88 47 00 00 21 40 36 b1 ee 7c 01 02 36 b1 ee 7c 01 03 08 00 45 00 00 54 03 44 40 00 40 01 21 61 0a 01 01 01 0a 01 01 02 08 00 ef ac 7c e4 00 03 5b 2c 1f 61 00 00 00 00 50 0b 02 00 00 00 00 00 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 > /dev/null]) dnl Check the expected decapsulated on the egress interface @@ -75445,7 +120552,7 @@ index f22d86e466..dc01ee0d8a 100644 + +ADD_VETH(p0, at_ns0, br0, "10.1.1.1/24") +ADD_VETH(p1, at_ns1, br0, "10.1.1.2/24") - ++ +dnl Adding a custom qdisc to ovs-p1, ovs-p0 will have the default qdisc. +AT_CHECK([tc qdisc add dev ovs-p1 root noqueue]) +AT_CHECK([tc qdisc show dev ovs-p1 | grep -q noqueue]) @@ -75458,7 +120565,7 @@ index f22d86e466..dc01ee0d8a 100644 + other_config:min-rate=2000000 other_config:max-rate=3000000 dnl + other_config:burst=3000000], + [ignore], [ignore]) -+ + +dnl Wait for qdiscs to be applied. +OVS_WAIT_UNTIL([tc qdisc show dev ovs-p0 | grep -q htb]) +OVS_WAIT_UNTIL([tc qdisc show dev ovs-p1 | grep -q htb]) @@ -75470,7 +120577,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -1985,9 +2236,9 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -1985,9 +2364,9 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) dnl Check this output. We only see the latter two packets, not the first. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN2 (xid=0x0): total_len=42 in_port=1 (via action) data_len=42 (unbuffered) @@ -75482,7 +120589,7 @@ index f22d86e466..dc01ee0d8a 100644 ]) OVS_TRAFFIC_VSWITCHD_STOP -@@ -2033,9 +2284,9 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -2033,9 +2412,9 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) dnl Check this output. We only see the latter two packets, not the first. AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=42 in_port=1 (via action) data_len=42 (unbuffered) @@ -75494,7 +120601,7 @@ index f22d86e466..dc01ee0d8a 100644 ]) dnl -@@ -2101,7 +2352,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl +@@ -2101,7 +2480,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl ]) dnl Test ICMP traffic @@ -75503,7 +120610,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2141,7 +2392,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 +@@ -2141,7 +2520,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) dnl Pings from ns0->ns1 should work fine. @@ -75512,7 +120619,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2182,7 +2433,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 +@@ -2182,7 +2561,7 @@ priority=100,in_port=2,icmp,ct_state=+trk+est,action=1 AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) dnl Pings from ns0->ns1 should work fine. @@ -75521,7 +120628,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2282,7 +2533,7 @@ NS_CHECK_EXEC([at_ns1], [ping6 -q -c 3 -i 0.3 -w 2 fc00::1 | FORMAT_PING], [0], +@@ -2282,7 +2661,7 @@ NS_CHECK_EXEC([at_ns1], [ping6 -q -c 3 -i 0.3 -w 2 fc00::1 | FORMAT_PING], [0], ]) dnl Pings from ns0->ns1 should work fine. @@ -75530,7 +120637,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -2980,6 +3231,15 @@ NXST_FLOW reply: +@@ -2980,6 +3359,15 @@ NXST_FLOW reply: table=1, priority=100,ct_state=+est+trk,in_port=1 actions=output:2 ]) @@ -75546,7 +120653,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -3140,11 +3400,11 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) +@@ -3140,11 +3528,11 @@ OVS_APP_EXIT_AND_WAIT([ovs-ofctl]) dnl Check this output. We only see the latter two packets, not the first. AT_CHECK([cat ofctl_monitor.log | grep -v ff02 | grep -v fe80 | grep -v no_match], [0], [dnl NXT_PACKET_IN2 (xid=0x0): table_id=1 cookie=0x0 total_len=75 ct_state=inv|trk,ip,in_port=2 (via action) data_len=75 (unbuffered) @@ -75561,7 +120668,7 @@ index f22d86e466..dc01ee0d8a 100644 ]) AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.1)], [0], [dnl -@@ -3181,12 +3441,12 @@ dnl Modify userspace conntrack fragmentation handling. +@@ -3181,12 +3569,12 @@ dnl Modify userspace conntrack fragmentation handling. DPCTL_MODIFY_FRAGMENTATION() dnl Ipv4 fragmentation connectivity check. @@ -75576,7 +120683,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3258,12 +3518,12 @@ dnl Modify userspace conntrack fragmentation handling. +@@ -3258,12 +3646,12 @@ dnl Modify userspace conntrack fragmentation handling. DPCTL_MODIFY_FRAGMENTATION() dnl Ipv4 fragmentation connectivity check. @@ -75591,7 +120698,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3303,22 +3563,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) +@@ -3303,22 +3691,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) dnl Ipv4 fragmentation connectivity check. @@ -75618,7 +120725,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3345,6 +3605,11 @@ AT_CHECK([ovs-ofctl bundle br0 bundle.txt]) +@@ -3345,6 +3733,11 @@ AT_CHECK([ovs-ofctl bundle br0 bundle.txt]) AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl ]) @@ -75630,7 +120737,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -3472,12 +3737,12 @@ dnl "connect: Cannot assign requested address" +@@ -3472,12 +3865,12 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl Ipv6 fragmentation connectivity check. @@ -75645,7 +120752,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3554,12 +3819,12 @@ dnl "connect: Cannot assign requested address" +@@ -3554,12 +3947,12 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl Ipv4 fragmentation connectivity check. @@ -75660,7 +120767,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3597,22 +3862,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) +@@ -3597,22 +3990,22 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00:1::4]) dnl Ipv6 fragmentation connectivity check. @@ -75687,7 +120794,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3823,18 +4088,18 @@ ADD_NATIVE_TUNNEL([vxlan], [at_vxlan1], [at_ns0], [172.31.1.100], [10.1.1.1/24], +@@ -3823,18 +4216,18 @@ ADD_NATIVE_TUNNEL([vxlan], [at_vxlan1], [at_ns0], [172.31.1.100], [10.1.1.1/24], [id 0 dstport 4789]) dnl First, check the underlay @@ -75710,7 +120817,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -3883,18 +4148,18 @@ dnl "connect: Cannot assign requested address" +@@ -3883,18 +4276,18 @@ dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) dnl First, check the underlay @@ -75733,7 +120840,18 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4007,7 +4272,7 @@ dnl The default udp_single and icmp_first timeouts are 30 seconds in +@@ -3919,8 +4312,8 @@ NS_CHECK_EXEC([at_ns0], [ip route add 10.1.1.0/24 via 10.2.1.2]) + NS_CHECK_EXEC([at_ns1], [ip route add 10.1.1.0/24 via 10.2.1.1]) + + dnl Solely for debugging when things go wrong +-NS_EXEC([at_ns0], [tcpdump -l -n -xx -U -i p0 -w p0.pcap >tcpdump.out 2>/dev/null &]) +-NS_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 -w p1.pcap >tcpdump.out 2>/dev/null &]) ++NETNS_DAEMONIZE([at_ns0], [tcpdump -l -n -xx -U -i p0 -w p0.pcap >tcpdump.out 2>/dev/null], [tcpdump_0.pid]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 -w p1.pcap >tcpdump.out 2>/dev/null], [tcpdump_1.pid]) + + AT_DATA([flows.txt], [dnl + table=0,arp,actions=normal +@@ -4007,7 +4400,7 @@ dnl The default udp_single and icmp_first timeouts are 30 seconds in dnl kernel DP, and 60 seconds in userspace DP. dnl Send ICMP and UDP traffic @@ -75742,7 +120860,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4033,7 +4298,7 @@ done +@@ -4033,7 +4426,7 @@ done AT_CHECK([ovs-vsctl --may-exist add-zone-tp $DP_TYPE zone=5 udp_first=1 udp_single=1 icmp_first=1 icmp_reply=1]) dnl Send ICMP and UDP traffic @@ -75751,7 +120869,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4051,7 +4316,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl +@@ -4051,7 +4444,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2)], [0], [dnl ]) dnl Re-send ICMP and UDP traffic to test conntrack cache @@ -75760,7 +120878,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4072,7 +4337,7 @@ dnl Set the timeout policy to default again. +@@ -4072,7 +4465,7 @@ dnl Set the timeout policy to default again. AT_CHECK([ovs-vsctl del-zone-tp $DP_TYPE zone=5]) dnl Send ICMP and UDP traffic @@ -75769,7 +120887,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) AT_CHECK([ovs-ofctl -O OpenFlow13 packet-out br0 "in_port=1 packet=50540000000a50540000000908004500001c000000000011a4cd0a0101010a0101020001000200080000 actions=resubmit(,0)"]) -@@ -4100,15 +4365,15 @@ action=normal +@@ -4100,15 +4493,15 @@ action=normal AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) @@ -75788,7 +120906,7 @@ index f22d86e466..dc01ee0d8a 100644 "1616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161610a, actions=ct(table=1)"]) AT_CHECK([ovs-appctl dpctl/dump-flows | head -2 | tail -1 | grep -q -e ["]udp[(]src=5001["]]) -@@ -4265,7 +4530,7 @@ table=2,in_port=1,ip,ct_state=+trk+est,ct_zone=2,action=LOCAL +@@ -4265,7 +4658,7 @@ table=2,in_port=1,ip,ct_state=+trk+est,ct_zone=2,action=LOCAL AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) @@ -75797,7 +120915,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -4336,7 +4601,7 @@ table=4,priority=100,ip,action=output:NXM_NX_REG0[[]] +@@ -4336,7 +4729,7 @@ table=4,priority=100,ip,action=output:NXM_NX_REG0[[]] AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) @@ -75806,16 +120924,16 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -5360,7 +5625,7 @@ table=10 priority=0 action=drop +@@ -5360,7 +5753,7 @@ table=10 priority=0 action=drop AT_CHECK([ovs-ofctl --bundle add-flows br0 flows.txt]) rm p0.pcap -tcpdump -U -i ovs-p0 -w p0.pcap & -+tcpdump -n -U -i ovs-p0 -w p0.pcap & ++OVS_DAEMONIZE([tcpdump -n -U -i ovs-p0 -w p0.pcap], [tcpdump.pid]) sleep 1 dnl UDP packets from ns0->ns1 should solicit "destination unreachable" response. -@@ -5384,7 +5649,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst= +@@ -5384,7 +5777,7 @@ AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(10.1.1.2) | sed -e 's/dst= udp,orig=(src=10.1.1.1,dst=10.1.1.2,sport=<cleared>,dport=<cleared>),reply=(src=10.1.1.2,dst=10.1.1.2XX,sport=<cleared>,dport=<cleared>),mark=1 ]) @@ -75824,7 +120942,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -6074,7 +6339,7 @@ dnl waiting, we get occasional failures due to the following error: +@@ -6074,7 +6467,7 @@ dnl waiting, we get occasional failures due to the following error: dnl "connect: Cannot assign requested address" OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::240]) @@ -75833,12 +120951,12 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -6128,13 +6393,13 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) +@@ -6128,13 +6521,13 @@ OVS_WAIT_UNTIL([ip netns exec at_ns0 ping6 -c 1 fc00::2]) AT_CHECK([ovs-appctl dpctl/flush-conntrack]) rm p0.pcap -tcpdump -U -i ovs-p0 -w p0.pcap & -+tcpdump -n -U -i ovs-p0 -w p0.pcap & ++OVS_DAEMONIZE([tcpdump -n -U -i ovs-p0 -w p0.pcap], [tcpdump.pid]) sleep 1 dnl UDP packets from ns0->ns1 should solicit "destination unreachable" response. @@ -75849,7 +120967,7 @@ index f22d86e466..dc01ee0d8a 100644 AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fc00::2)], [0], [dnl udp,orig=(src=fc00::1,dst=fc00::2,sport=<cleared>,dport=<cleared>),reply=(src=fc00::2,dst=fc00::240,sport=<cleared>,dport=<cleared>) -@@ -6454,7 +6719,7 @@ on_exit 'ovs-appctl revalidator/purge' +@@ -6454,7 +6847,7 @@ on_exit 'ovs-appctl revalidator/purge' on_exit 'ovs-appctl dpif/dump-flows br0' dnl Should work with the virtual IP address through NAT @@ -75858,7 +120976,7 @@ index f22d86e466..dc01ee0d8a 100644 echo Request $i NS_CHECK_EXEC([at_ns1], [wget 10.1.1.64 -t 5 -T 1 --retry-connrefused -v -o wget$i.log]) done -@@ -6743,6 +7008,239 @@ AT_CHECK([ovs-ofctl dump-flows br0 | grep table=2, | OFPROTO_CLEAR_DURATION_IDLE +@@ -6743,6 +7136,239 @@ AT_CHECK([ovs-ofctl dump-flows br0 | grep table=2, | OFPROTO_CLEAR_DURATION_IDLE OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP @@ -76098,7 +121216,7 @@ index f22d86e466..dc01ee0d8a 100644 AT_BANNER([802.1ad]) AT_SETUP([802.1ad - vlan_limit]) -@@ -6768,7 +7266,7 @@ dnl CVLAN traffic should match the flow and drop +@@ -6768,7 +7394,7 @@ dnl CVLAN traffic should match the flow and drop AT_CHECK([ovs-appctl revalidator/purge]) AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:vlan-limit=1]) AT_CHECK([ovs-ofctl add-flow br0 "priority=100 dl_type=0x8100 action=drop"]) @@ -76107,7 +121225,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -6818,11 +7316,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) +@@ -6818,11 +7444,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -76121,7 +121239,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -6874,11 +7372,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) +@@ -6874,11 +7500,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br2 flows-customer-br.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -76135,7 +121253,7 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -6926,24 +7424,24 @@ AT_CHECK([ovs-vsctl set port ovs-p2 vlan_mode=dot1q-tunnel tag=4094 cvlans=100,2 +@@ -6926,24 +7552,24 @@ AT_CHECK([ovs-vsctl set port ovs-p2 vlan_mode=dot1q-tunnel tag=4094 cvlans=100,2 OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.3.2.2]) @@ -76165,7 +121283,7 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP(["/dropping VLAN \(0\|300\) packet received on dot1q-tunnel port/d"]) AT_CLEANUP -@@ -6972,11 +7470,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows-br0.txt]) +@@ -6972,11 +7598,11 @@ AT_CHECK([ovs-ofctl --bundle add-flows br0 flows-br0.txt]) OVS_WAIT_UNTIL([ip netns exec at_ns0 ping -c 1 10.2.2.2]) @@ -76179,7 +121297,16 @@ index f22d86e466..dc01ee0d8a 100644 3 packets transmitted, 3 received, 0% packet loss, time 0ms ]) -@@ -7007,12 +7505,12 @@ dnl p1(at_ns1) interface +@@ -6998,7 +7624,7 @@ dnl The flow will encap a nsh header to the TCP syn packet + dnl eth/ip/tcp --> OVS --> eth/nsh/eth/ip/tcp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,in_port=ovs-p0,ip,actions=encap(nsh(md_type=1)),set_field:0x1234->nsh_spi,set_field:0x11223344->nsh_c1,encap(ethernet),set_field:f2:ff:00:00:00:02->dl_dst,set_field:f2:ff:00:00:00:01->dl_src,ovs-p1"]) + +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is a TCP syn packet. pkt=eth/ip/tcp +@@ -7007,12 +7633,12 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null]) dnl Check the expected nsh encapsulated packet on the egress interface @@ -76198,7 +121325,16 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -7039,10 +7537,10 @@ dnl p1(at_ns1) interface +@@ -7030,7 +7656,7 @@ dnl The flow will decap a nsh header which in turn carries a TCP syn packet + dnl eth/nsh/eth/ip/tcp --> OVS --> eth/ip/tcp + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,in_port=ovs-p0,dl_type=0x894f, actions=decap(),decap(), ovs-p1"]) + +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is NSH packet with TCP syn payload. pkt=eth/nsh/eth/ip/tcp +@@ -7039,10 +7665,10 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 00 64 03 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null]) dnl Check the expected de-capsulated TCP packet on the egress interface @@ -76213,7 +121349,16 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -7072,12 +7570,12 @@ dnl p1(at_ns1) interface +@@ -7062,7 +7688,7 @@ dnl The flow will add another NSH header with nsh_spi=0x101, nsh_si=4, + dnl nsh_ttl=7 and change the md1 context + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,in_port=ovs-p0,dl_type=0x894f,nsh_spi=0x100,nsh_si=0x03,actions=decap(),decap(),encap(nsh(md_type=1)),set_field:0x07->nsh_ttl,set_field:0x0101->nsh_spi,set_field:0x04->nsh_si,set_field:0x100f0e0d->nsh_c1,set_field:0x0c0b0a09->nsh_c2,set_field:0x08070605->nsh_c3,set_field:0x04030201->nsh_c4,encap(ethernet),set_field:f2:ff:00:00:00:02->dl_dst,set_field:f2:ff:00:00:00:01->dl_src,ovs-p1"]) + +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) + sleep 1 + + dnl The hex dump is NSH packet with TCP syn payload. pkt=eth/nsh/eth/ip/tcp +@@ -7072,12 +7698,12 @@ dnl p1(at_ns1) interface NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 01 00 03 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null]) dnl Check the expected NSH packet with new fields in the header @@ -76232,7 +121377,17 @@ index f22d86e466..dc01ee0d8a 100644 OVS_TRAFFIC_VSWITCHD_STOP AT_CLEANUP -@@ -7106,23 +7604,23 @@ dnl First send packet from at_ns0 --> OVS with SPI=0x100 and SI=2 +@@ -7098,31 +7724,31 @@ dnl packet to to at_ns2. + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x894f,nsh_spi=0x100,nsh_si=0x02,actions=ovs-p1"]) + AT_CHECK([ovs-ofctl -Oopenflow13 add-flow br0 "table=0,priority=100,dl_type=0x894f,nsh_spi=0x100,nsh_si=0x01,actions=ovs-p2"]) + +-NS_CHECK_EXEC([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap &]) +-NS_CHECK_EXEC([at_ns2], [tcpdump -l -n -xx -U -i p2 > p2.pcap &]) ++NETNS_DAEMONIZE([at_ns1], [tcpdump -l -n -xx -U -i p1 > p1.pcap], [tcpdump.pid]) ++NETNS_DAEMONIZE([at_ns2], [tcpdump -l -n -xx -U -i p2 > p2.pcap], [tcpdump2.pid]) + sleep 1 + + dnl First send packet from at_ns0 --> OVS with SPI=0x100 and SI=2 NS_CHECK_EXEC([at_ns0], [$PYTHON3 $srcdir/sendpkt.py p0 f2 ff 00 00 00 02 f2 ff 00 00 00 01 89 4f 02 06 01 03 00 01 00 02 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 f2 00 00 00 00 02 f2 00 00 00 00 01 08 00 45 00 00 28 00 01 00 00 40 06 b0 13 c0 a8 00 0a 0a 00 00 0a 04 00 08 00 00 00 00 c8 00 00 00 00 50 02 20 00 b8 5e 00 00 > /dev/null]) dnl Check for the above packet on p1 interface @@ -76880,7 +122035,7 @@ index a38bf9e6df..86a223caff 100644 for (i = 0; i < n_threads; i++) { printf(" %6" PRIu64, thread_working_ms[i]); diff --git a/tests/test-ovsdb.c b/tests/test-ovsdb.c -index ca4e87b811..cd1c31a6c2 100644 +index ca4e87b811..461b6bc928 100644 --- a/tests/test-ovsdb.c +++ b/tests/test-ovsdb.c @@ -294,11 +294,24 @@ print_and_free_ovsdb_error(struct ovsdb_error *error) @@ -77278,7 +122433,21 @@ index ca4e87b811..cd1c31a6c2 100644 } else { /* Wait for update. */ for (;;) { -@@ -2701,8 +2788,8 @@ do_idl(struct ovs_cmdl_context *ctx) +@@ -2689,6 +2776,13 @@ do_idl(struct ovs_cmdl_context *ctx) + } else { + print_idl(idl, step++, terse); + } ++ ++ /* Just run IDL forever for a simple monitoring. */ ++ if (!strcmp(arg, "monitor")) { ++ seqno = ovsdb_idl_get_seqno(idl); ++ i--; ++ continue; ++ } + } + seqno = ovsdb_idl_get_seqno(idl); + +@@ -2701,8 +2795,8 @@ do_idl(struct ovs_cmdl_context *ctx) arg + strlen(remote_s), ovsdb_idl_is_connected(idl) ? "true" : "false"); } else if (!strncmp(arg, cond_s, strlen(cond_s))) { @@ -77290,7 +122459,7 @@ index ca4e87b811..cd1c31a6c2 100644 idl_set(idl, arg, step++); } else { diff --git a/tests/test-ovsdb.py b/tests/test-ovsdb.py -index 853264f22b..3c76835d4b 100644 +index 853264f22b..b6144e0d53 100644 --- a/tests/test-ovsdb.py +++ b/tests/test-ovsdb.py @@ -37,7 +37,7 @@ vlog.init(None) @@ -77382,24 +122551,48 @@ index 853264f22b..3c76835d4b 100644 step += 1 for command in commands: -@@ -722,6 +735,16 @@ def do_idl(schema_file, remote, *commands): +@@ -722,18 +735,35 @@ def do_idl(schema_file, remote, *commands): if command.startswith("+"): # The previous transaction didn't change anything. command = command[1:] +- else: +- # Wait for update. +- while idl.change_seqno == seqno and not idl.run(): + elif command.startswith("^"): + # Wait for condition change to be acked by the server. + command = command[1:] + while idl.cond_seqno != next_cond_seqno and not idl.run(): -+ rpc.run() -+ -+ poller = ovs.poller.Poller() -+ idl.wait(poller) -+ rpc.wait(poller) -+ poller.block() - else: - # Wait for update. - while idl.change_seqno == seqno and not idl.run(): -@@ -743,9 +766,7 @@ def do_idl(schema_file, remote, *commands): + rpc.run() + + poller = ovs.poller.Poller() + idl.wait(poller) + rpc.wait(poller) + poller.block() ++ else: ++ # Wait for update. ++ while True: ++ while idl.change_seqno == seqno and not idl.run(): ++ rpc.run() + +- print_idl(idl, step, terse) +- step += 1 ++ poller = ovs.poller.Poller() ++ idl.wait(poller) ++ rpc.wait(poller) ++ poller.block() ++ ++ print_idl(idl, step, terse) ++ step += 1 ++ ++ # Run IDL forever in case of a simple monitor, otherwise ++ # break and execute the command. ++ seqno = idl.change_seqno ++ if command != "monitor": ++ break + + seqno = idl.change_seqno + +@@ -743,9 +773,7 @@ def do_idl(schema_file, remote, *commands): step += 1 idl.force_reconnect() elif "condition" in command: @@ -77410,7 +122603,7 @@ index 853264f22b..3c76835d4b 100644 step += 1 elif not command.startswith("["): idl_set(idl, command, step) -@@ -1012,14 +1033,14 @@ def main(argv): +@@ -1012,14 +1040,14 @@ def main(argv): sys.exit(1) func, n_args = commands[command_name] @@ -77471,6 +122664,19 @@ index 965f3c49f3..bb17092bf0 100644 +} + +OVSTEST_REGISTER("test-rcu", test_rcu); +diff --git a/tests/test-rstp.c b/tests/test-rstp.c +index 01aeaf8478..56028b1d97 100644 +--- a/tests/test-rstp.c ++++ b/tests/test-rstp.c +@@ -466,6 +466,8 @@ test_rstp_main(int argc, char *argv[]) + vlog_set_pattern(VLF_CONSOLE, "%c|%p|%m"); + vlog_set_levels(NULL, VLF_SYSLOG, VLL_OFF); + ++ rstp_init(); ++ + if (argc != 2) { + ovs_fatal(0, "usage: test-rstp INPUT.RSTP"); + } diff --git a/tests/test-util.c b/tests/test-util.c index f0fd042108..7d899fbbfd 100644 --- a/tests/test-util.c @@ -77529,10 +122735,29 @@ index 58adfa09cf..57085074f1 100644 m4_include([tests/drop-stats.at]) +m4_include([tests/learning-switch.at]) diff --git a/tests/tunnel-push-pop-ipv6.at b/tests/tunnel-push-pop-ipv6.at -index 3f58e3e8fd..c96b77cd15 100644 +index 3f58e3e8fd..cb1fdf638e 100644 --- a/tests/tunnel-push-pop-ipv6.at +++ b/tests/tunnel-push-pop-ipv6.at -@@ -63,7 +63,7 @@ AT_CHECK([ovs-ofctl add-flow int-br action=2]) +@@ -19,13 +19,15 @@ dummy@ovs-dummy: hit:0 missed:0 + t2 2/6: (ip6gre: remote_ip=2001:cafe::92) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -63,7 +65,7 @@ AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:01),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77541,7 +122766,26 @@ index 3f58e3e8fd..c96b77cd15 100644 ]) OVS_VSWITCHD_STOP -@@ -151,14 +151,14 @@ dnl Check ERSPAN v1 tunnel push +@@ -93,13 +95,15 @@ dummy@ovs-dummy: hit:0 missed:0 + t3 3/6: (ip6erspan: erspan_dir=1, erspan_hwid=0x7, erspan_ver=2, key=567, remote_ip=2001:cafe::93) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -151,14 +155,14 @@ dnl Check ERSPAN v1 tunnel push AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:01),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77558,7 +122802,27 @@ index 3f58e3e8fd..c96b77cd15 100644 ]) ovs-appctl vlog/set dbg -@@ -388,28 +388,28 @@ dnl Check VXLAN tunnel push +@@ -225,14 +229,15 @@ gre_sys (3) ref_cnt=2 + vxlan_sys_4789 (4789) ref_cnt=2 + ]) + +- +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 2001:cafe::92/24 br0], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -388,28 +393,28 @@ dnl Check VXLAN tunnel push AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:01),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77591,7 +122855,7 @@ index 3f58e3e8fd..c96b77cd15 100644 ]) dnl Check Geneve tunnel push with options -@@ -417,7 +417,7 @@ AT_CHECK([ovs-ofctl add-tlv-map int-br "{class=0xffff,type=0x80,len=4}->tun_meta +@@ -417,7 +422,7 @@ AT_CHECK([ovs-ofctl add-tlv-map int-br "{class=0xffff,type=0x80,len=4}->tun_meta AT_CHECK([ovs-ofctl add-flow int-br "actions=set_field:2001:cafe::92->tun_ipv6_dst,set_field:0xa->tun_metadata0,5"]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:01),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77600,7 +122864,7 @@ index 3f58e3e8fd..c96b77cd15 100644 ]) dnl Check decapsulation of GRE packet -@@ -452,7 +452,7 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -452,7 +457,7 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=98 tun_id=0x7b,tun_ipv6_src=2001:cafe::92,tun_ipv6_dst=2001:cafe::88,tun_metadata0=0xa,in_port=5 (via action) data_len=98 (unbuffered) @@ -77609,7 +122873,7 @@ index 3f58e3e8fd..c96b77cd15 100644 ]) AT_CHECK([ovs-ofctl dump-ports int-br | grep 'port 5'], [0], [dnl -@@ -472,7 +472,7 @@ dnl Check VXLAN tunnel push +@@ -472,7 +477,7 @@ dnl Check VXLAN tunnel push AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=36:b1:ee:7c:01:01,dst=36:b1:ee:7c:01:02),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77618,7 +122882,7 @@ index 3f58e3e8fd..c96b77cd15 100644 ]) AT_CHECK([ovs-appctl tnl/arp/show | tail -n+3 | sort], [0], [dnl -@@ -490,7 +490,7 @@ dnl Check VXLAN tunnel push +@@ -490,7 +495,7 @@ dnl Check VXLAN tunnel push AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=36:b1:ee:7c:01:01,dst=36:b1:ee:7c:01:02),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77627,11 +122891,122 @@ index 3f58e3e8fd..c96b77cd15 100644 ]) AT_CHECK([ovs-appctl tnl/arp/show | tail -n+3 | sort], [0], [dnl +@@ -527,3 +532,87 @@ Listening ports: + + OVS_VSWITCHD_STOP + AT_CLEANUP ++ ++AT_SETUP([tunnel_push_pop_ipv6 - local_ip configuration]) ++ ++OVS_VSWITCHD_START( ++ [add-port br0 p0 \ ++ -- set Interface p0 type=dummy ofport_request=1 \ ++ other-config:hwaddr=aa:55:aa:55:00:00]) ++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg]) ++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy]) ++AT_CHECK([ovs-vsctl add-port int-br t2 \ ++ -- set Interface t2 type=geneve \ ++ options:local_ip=2001:beef::88 \ ++ options:remote_ip=2001:cafe::92 \ ++ options:key=123 ofport_request=2]) ++ ++dnl Setup multiple IP addresses. ++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/64], [0], [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:beef::88/64], [0], [OK ++]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 2001:beef::/64 dev br0 SRC 2001:beef::88 local ++Cached: 2001:cafe::/64 dev br0 SRC 2001:cafe::88 local ++]) ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++AT_CHECK([ovs-ofctl add-flow int-br action=normal]) ++ ++dnl This Neighbor Advertisement from p0 has two effects: ++dnl 1. The neighbor cache will learn that 2001:cafe::92 is at f8:bc:12:44:34:b6. ++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0. ++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl ++ 'recirc_id(0),in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x86dd),dnl ++ ipv6(src=2001:cafe::92,dst=2001:cafe::88,label=0,proto=58,tclass=0,hlimit=255,frag=no),dnl ++ icmpv6(type=136,code=0),dnl ++ nd(target=2001:cafe::92,sll=00:00:00:00:00:00,tll=f8:bc:12:44:34:b6)' ++]) ++ ++dnl Check that local_ip is used for encapsulation in the trace. ++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \ ++ | grep -E 'tunnel|actions'], [0], [dnl ++ -> output to native tunnel ++ -> tunneling to 2001:cafe::92 via br0 ++ -> tunneling from aa:55:aa:55:00:00 2001:beef::88 to f8:bc:12:44:34:b6 2001:cafe::92 ++Datapath actions: tnl_push(tnl_port(6081),header(size=70,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl ++ipv6(src=2001:beef::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl ++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++dnl Now check that the packet actually has the local_ip in the header. ++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap]) ++ ++packet=50540000000a5054000000091234 ++eth=f8bc124434b6aa55aa55000086dd ++ip6=60000000001e11402001beef0000000000000000000000882001cafe000000000000000000000092 ++dnl Source port is based on a packet hash, so it may differ depending on the ++dnl compiler flags and CPU type. Same for UDP checksum. Masked with '....'. ++udp=....17c1001e.... ++geneve=0000655800007b00 ++encap=${eth}${ip6}${udp}${geneve} ++dnl Output to tunnel from a int-br internal port. ++dnl Checking that the packet arrived and it was correctly encapsulated. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1]) ++dnl Sending again to exercise the non-miss upcall path. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2]) ++ ++dnl Finally, checking that the datapath flow also has a local_ip. ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \ ++ | strip_ufid | strip_used], [0], [dnl ++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl ++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl ++packets:1, bytes:14, used:0.0s, dnl ++actions:tnl_push(tnl_port(6081),header(size=70,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x86dd),dnl ++ipv6(src=2001:beef::88,dst=2001:cafe::92,label=0,proto=17,tclass=0x0,hlimit=64),dnl ++udp(src=0,dst=6081,csum=0xffff),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP diff --git a/tests/tunnel-push-pop.at b/tests/tunnel-push-pop.at -index 57589758f4..65b1469e49 100644 +index 57589758f4..0771c4453a 100644 --- a/tests/tunnel-push-pop.at +++ b/tests/tunnel-push-pop.at -@@ -85,7 +85,7 @@ AT_CHECK([ovs-vsctl -- set Interface br0 options:pcap=br0.pcap]) +@@ -30,17 +30,15 @@ dummy@ovs-dummy: hit:0 missed:0 + t4 5/3: (erspan: erspan_dir=flow, erspan_hwid=flow, erspan_idx=flow, erspan_ver=flow, key=56, remote_ip=flow) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK +-]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0 pkt_mark=1234], [0], [OK ++dnl Checking that a local routes for added IPs were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -85,7 +83,7 @@ AT_CHECK([ovs-vsctl -- set Interface br0 options:pcap=br0.pcap]) AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77640,7 +123015,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check ERSPAN v2 tunnel push -@@ -93,7 +93,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br action=3]) +@@ -93,7 +91,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br action=3]) AT_CHECK([ovs-appctl revalidator/wait]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77649,7 +123024,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check ERSPAN v2 flow-based tunnel push -@@ -101,7 +101,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie +@@ -101,7 +99,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie AT_CHECK([ovs-appctl revalidator/wait]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77658,7 +123033,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check ERSPAN v2 flow-based tunnel push, erspan_ver=flow -@@ -110,7 +110,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie +@@ -110,7 +108,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie AT_CHECK([ovs-appctl revalidator/wait]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77667,7 +123042,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Dynamically set erspan v1 -@@ -118,7 +118,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie +@@ -118,7 +116,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie AT_CHECK([ovs-appctl revalidator/wait]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77676,7 +123051,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check ERSPAN v2 flow-based tunnel push -@@ -126,7 +126,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie +@@ -126,7 +124,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie AT_CHECK([ovs-appctl revalidator/wait]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77685,7 +123060,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check ERSPAN v2 flow-based tunnel push, erspan_ver=flow -@@ -135,7 +135,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie +@@ -135,7 +133,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie AT_CHECK([ovs-appctl revalidator/wait]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77694,7 +123069,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Dynamically set erspan v1 -@@ -143,7 +143,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie +@@ -143,7 +141,7 @@ AT_CHECK([ovs-ofctl mod-flows int-br "action=set_field:1.1.2.94->tun_dst,set_fie AT_CHECK([ovs-appctl revalidator/wait]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77703,7 +123078,35 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check ERSPAN tunnel pop -@@ -369,6 +369,26 @@ AT_CHECK([ovs-appctl tnl/neigh/show | grep br | sort], [0], [dnl +@@ -237,18 +235,21 @@ dummy@ovs-dummy: hit:0 missed:0 + t8 9/2152: (gtpu: key=123, remote_ip=1.1.2.92) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP addresses. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) + AT_CHECK([ovs-appctl netdev-dummy/ip6addr br0 2001:cafe::88/24], [0], [OK + ]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK +-]) +- ++dnl Add a static route with a mark. + AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0 pkt_mark=1234], [0], [OK + ]) ++dnl Checking that local routes for added IPs and the static route with a mark ++dnl were successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep br0 | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2001:ca00::/24 dev br0 SRC 2001:cafe::88 local ++User: 1.1.2.0/24 MARK 1234 dev br0 SRC 1.1.2.88 ++]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) + +@@ -369,6 +370,26 @@ AT_CHECK([ovs-appctl tnl/neigh/show | grep br | sort], [0], [dnl 1.1.2.92 f8:bc:12:44:34:b6 br0 ]) @@ -77730,7 +123133,7 @@ index 57589758f4..65b1469e49 100644 dnl Receive ARP reply without VLAN header AT_CHECK([ovs-vsctl set port br0 tag=0]) AT_CHECK([ovs-appctl tnl/neigh/flush], [0], [OK -@@ -431,49 +451,49 @@ dnl Check VXLAN tunnel push +@@ -431,49 +452,49 @@ dnl Check VXLAN tunnel push AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77787,7 +123190,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check Geneve tunnel push with options -@@ -481,7 +501,7 @@ AT_CHECK([ovs-ofctl add-tlv-map int-br "{class=0xffff,type=0x80,len=4}->tun_meta +@@ -481,7 +502,7 @@ AT_CHECK([ovs-ofctl add-tlv-map int-br "{class=0xffff,type=0x80,len=4}->tun_meta AT_CHECK([ovs-ofctl add-flow int-br "actions=set_field:1.1.2.92->tun_dst,set_field:0xa->tun_metadata0,5"]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77796,7 +123199,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Check GTP-U tunnel push -@@ -489,7 +509,7 @@ AT_CHECK([ovs-ofctl add-flow int-br "actions=9"]) +@@ -489,7 +510,7 @@ AT_CHECK([ovs-ofctl add-flow int-br "actions=9"]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77805,7 +123208,7 @@ index 57589758f4..65b1469e49 100644 ]) AT_CHECK([ovs-ofctl del-flows int-br]) -@@ -546,6 +566,28 @@ AT_CHECK([ovs-ofctl dump-ports int-br | grep 'port [[37]]' | sort], [0], [dnl +@@ -546,6 +567,28 @@ AT_CHECK([ovs-ofctl dump-ports int-br | grep 'port [[37]]' | sort], [0], [dnl port 7: rx pkts=5, bytes=434, drop=?, errs=?, frame=?, over=?, crc=? ]) @@ -77834,7 +123237,7 @@ index 57589758f4..65b1469e49 100644 dnl Check decapsulation of Geneve packet with options AT_CAPTURE_FILE([ofctl_monitor.log]) AT_CHECK([ovs-ofctl monitor int-br 65534 --detach --no-chdir --pidfile 2> ofctl_monitor.log]) -@@ -559,14 +601,14 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) +@@ -559,14 +602,14 @@ OVS_APP_EXIT_AND_WAIT(ovs-ofctl) AT_CHECK([cat ofctl_monitor.log], [0], [dnl NXT_PACKET_IN2 (xid=0x0): cookie=0x0 total_len=98 tun_id=0x7b,tun_src=1.1.2.92,tun_dst=1.1.2.88,tun_metadata0=0xa,in_port=5 (via action) data_len=98 (unbuffered) @@ -77852,7 +123255,7 @@ index 57589758f4..65b1469e49 100644 ]) dnl Receive VXLAN with different MAC and verify that the neigh cache gets updated -@@ -579,7 +621,7 @@ dnl Check VXLAN tunnel push +@@ -579,7 +622,7 @@ dnl Check VXLAN tunnel push AT_CHECK([ovs-ofctl add-flow int-br action=2]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=36:b1:ee:7c:01:01,dst=36:b1:ee:7c:01:02),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77861,7 +123264,7 @@ index 57589758f4..65b1469e49 100644 ]) AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl -@@ -596,7 +638,7 @@ ovs-appctl time/warp 1000 +@@ -596,7 +639,7 @@ ovs-appctl time/warp 1000 dnl Check VXLAN tunnel push AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=36:b1:ee:7c:01:01,dst=36:b1:ee:7c:01:02),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=47,tos=0,ttl=64,frag=no)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77870,7 +123273,40 @@ index 57589758f4..65b1469e49 100644 ]) AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl -@@ -718,14 +760,14 @@ dnl Output to tunnel from a int-br internal port. +@@ -648,12 +691,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 -- set Interface t2 type=geneve \ + options:remote_ip=1.1.2.92 options:key=123 ofport_request=2 \ + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +- +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) + + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) +@@ -689,11 +732,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 dnl + -- set Interface t2 type=geneve options:remote_ip=1.1.2.92 dnl + options:key=123 ofport_request=2]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) + +@@ -718,14 +762,14 @@ dnl Output to tunnel from a int-br internal port. dnl Checking that the packet arrived and it was correctly encapsulated. AT_CHECK([ovs-ofctl add-flow int-br "in_port=LOCAL,actions=debug_slow,output:2"]) AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}4"]) @@ -77888,7 +123324,109 @@ index 57589758f4..65b1469e49 100644 dnl Datapath actions should not have tunnel push action. AT_CHECK([ovs-appctl dpctl/dump-flows | grep -q tnl_push], [1]) -@@ -772,7 +814,7 @@ AT_CHECK([ovs-ofctl add-flow int-br action=3]) +@@ -735,6 +779,88 @@ AT_CHECK([ovs-appctl dpctl/dump-flows | grep -q 'slow_path(action)'], [0]) + OVS_VSWITCHD_STOP + AT_CLEANUP + ++AT_SETUP([tunnel_push_pop - local_ip configuration]) ++ ++OVS_VSWITCHD_START( ++ [add-port br0 p0 \ ++ -- set Interface p0 type=dummy ofport_request=1 \ ++ other-config:hwaddr=aa:55:aa:55:00:00]) ++AT_CHECK([ovs-appctl vlog/set dpif_netdev:dbg]) ++AT_CHECK([ovs-vsctl add-br int-br -- set bridge int-br datapath_type=dummy]) ++AT_CHECK([ovs-vsctl add-port int-br t2 \ ++ -- set Interface t2 type=geneve \ ++ options:local_ip=2.2.2.88 \ ++ options:remote_ip=1.1.2.92 \ ++ options:key=123 ofport_request=2]) ++ ++dnl Setup multiple IP addresses. ++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK ++]) ++AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 2.2.2.88/24], [0], [OK ++]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached | sort], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local ++Cached: 2.2.2.0/24 dev br0 SRC 2.2.2.88 local ++]) ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++AT_CHECK([ovs-ofctl add-flow int-br action=normal]) ++ ++dnl This ARP reply from p0 has two effects: ++dnl 1. The ARP cache will learn that 1.1.2.92 is at f8:bc:12:44:34:b6. ++dnl 2. The br0 mac learning will learn that f8:bc:12:44:34:b6 is on p0. ++AT_CHECK([ovs-appctl netdev-dummy/receive p0 dnl ++ 'recirc_id(0),in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),dnl ++ arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=00:00:00:00:00:00)' ++]) ++ ++dnl Check that local_ip is used for encapsulation in the trace. ++AT_CHECK([ovs-appctl ofproto/trace int-br in_port=LOCAL \ ++ | grep -E 'tunnel|actions'], [0], [dnl ++ -> output to native tunnel ++ -> tunneling to 1.1.2.92 via br0 ++ -> tunneling from aa:55:aa:55:00:00 2.2.2.88 to f8:bc:12:44:34:b6 1.1.2.92 ++Datapath actions: tnl_push(tnl_port(6081),header(size=50,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl ++ipv4(src=2.2.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl ++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++dnl Now check that the packet actually has the local_ip in the header. ++AT_CHECK([ovs-vsctl -- set Interface p0 options:tx_pcap=p0.pcap]) ++ ++packet=50540000000a5054000000091234 ++eth=f8bc124434b6aa55aa5500000800 ++ip4=450000320000400040113305020202580101025c ++dnl Source port is based on a packet hash, so it may differ depending on the ++dnl compiler flags and CPU type. Masked with '....'. ++udp=....17c1001e0000 ++geneve=0000655800007b00 ++encap=${eth}${ip4}${udp}${geneve} ++dnl Output to tunnel from a int-br internal port. ++dnl Checking that the packet arrived and it was correctly encapsulated. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 1]) ++dnl Sending again to exercise the non-miss upcall path. ++AT_CHECK([ovs-appctl netdev-dummy/receive int-br "${packet}"]) ++OVS_WAIT_UNTIL([test $(ovs-pcap p0.pcap | grep -c "${encap}${packet}") -eq 2]) ++ ++dnl Finally, checking that the datapath flow also has a local_ip. ++AT_CHECK([ovs-appctl dpctl/dump-flows | grep tnl_push \ ++ | strip_ufid | strip_used], [0], [dnl ++recirc_id(0),in_port(2),packet_type(ns=0,id=0),dnl ++eth(src=50:54:00:00:00:09,dst=50:54:00:00:00:0a),eth_type(0x1234), dnl ++packets:1, bytes:14, used:0.0s, dnl ++actions:tnl_push(tnl_port(6081),header(size=50,type=5,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:00,dl_type=0x0800),dnl ++ipv4(src=2.2.2.88,dst=1.1.2.92,proto=17,tos=0,ttl=64,frag=0x4000),dnl ++udp(src=0,dst=6081,csum=0x0),geneve(vni=0x7b)),out_port(100)),1 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_SETUP([tunnel_push_pop - underlay bridge match]) + + OVS_VSWITCHD_START([add-port br0 p0 -- set Interface p0 type=dummy ofport_request=1 other-config:hwaddr=aa:55:aa:55:00:00]) +@@ -754,8 +880,11 @@ dummy@ovs-dummy: hit:0 missed:0 + + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) ++ + AT_CHECK([ovs-ofctl add-flow br0 'arp,priority=1,action=normal']) + + dnl Use arp reply to achieve tunnel next hop mac binding +@@ -772,7 +901,7 @@ AT_CHECK([ovs-ofctl add-flow int-br action=3]) AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(2),eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:00),eth_type(0x0800),ipv4(src=1.1.3.88,dst=1.1.3.112,proto=17,tos=0,ttl=64,frag=no),udp(src=51283,dst=4789)'], [0], [stdout]) AT_CHECK([tail -1 stdout], [0], @@ -77897,7 +123435,23 @@ index 57589758f4..65b1469e49 100644 ]) dnl Verify outer L2 and L3 header flow fields can be matched in the underlay bridge -@@ -842,3 +884,54 @@ Datapath actions: 7 +@@ -798,11 +927,12 @@ AT_CHECK([ovs-vsctl add-port int-br t2 dnl + -- set Interface t2 type=geneve options:remote_ip=1.1.2.92 dnl + options:key=123 ofport_request=2]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 1.1.2.88/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 1.1.2.92/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev br0 SRC 1.1.2.88 local + ]) + AT_CHECK([ovs-ofctl add-flow br0 action=normal]) + +@@ -842,3 +972,134 @@ Datapath actions: 7 OVS_VSWITCHD_STOP AT_CLEANUP @@ -77925,10 +123479,12 @@ index 57589758f4..65b1469e49 100644 + -- set port br0 tag=42 dnl + -- set port p7 tag=200]) + -+dnl Set IP address and route for br0. ++dnl Set an IP address for br0. +AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 10.0.0.2/24], [0], [OK +]) -+AT_CHECK([ovs-appctl ovs/route/add 10.0.0.11/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 10.0.0.0/24 dev br0 SRC 10.0.0.2 local +]) + +dnl Send an ARP reply to port b8 on br0, so that packets will be forwarded @@ -77952,8 +123508,86 @@ index 57589758f4..65b1469e49 100644 + +OVS_VSWITCHD_STOP +AT_CLEANUP ++ ++AT_SETUP([tunnel_push_pop - use non-local port as tunnel endpoint]) ++ ++OVS_VSWITCHD_START([add-port br0 p0 \ ++ -- set Interface p0 type=dummy ofport_request=1]) ++ ++dnl Adding another port separately to ensure that it gets an ++dnl aa:55:aa:55:00:03 MAC address (dummy port number 3). ++AT_CHECK([ovs-vsctl add-port br0 vtep0 \ ++ -- set interface vtep0 type=dummy ofport_request=2]) ++AT_CHECK([ovs-vsctl \ ++ -- add-br int-br \ ++ -- set bridge int-br datapath_type=dummy \ ++ -- set Interface int-br ofport_request=3]) ++AT_CHECK([ovs-vsctl \ ++ -- add-port int-br t1 \ ++ -- set Interface t1 type=gre ofport_request=4 \ ++ options:remote_ip=1.1.2.92 ++]) ++ ++AT_CHECK([ovs-appctl dpif/show], [0], [dnl ++dummy@ovs-dummy: hit:0 missed:0 ++ br0: ++ br0 65534/100: (dummy-internal) ++ p0 1/1: (dummy) ++ vtep0 2/2: (dummy) ++ int-br: ++ int-br 65534/3: (dummy-internal) ++ t1 4/4: (gre: remote_ip=1.1.2.92) ++]) ++ ++AT_CHECK([ovs-appctl netdev-dummy/ip4addr vtep0 1.1.2.88/24], [0], [OK ++]) ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 1.1.2.0/24 dev vtep0 SRC 1.1.2.88 local ++]) ++ ++AT_CHECK([ovs-ofctl add-flow br0 action=normal]) ++AT_CHECK([ovs-ofctl add-flow int-br action=normal]) ++ ++dnl Use arp request and reply to achieve tunnel next hop mac binding. ++dnl By default, vtep0's MAC address is aa:55:aa:55:00:03. ++AT_CHECK([ovs-appctl netdev-dummy/receive vtep0 'recirc_id(0),in_port(2),dnl ++ eth(dst=ff:ff:ff:ff:ff:ff,src=aa:55:aa:55:00:03),eth_type(0x0806),dnl ++ arp(tip=1.1.2.92,sip=1.1.2.88,op=1,sha=aa:55:aa:55:00:03,tha=00:00:00:00:00:00)']) ++AT_CHECK([ovs-appctl netdev-dummy/receive p0 'recirc_id(0),in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0806),dnl ++ arp(sip=1.1.2.92,tip=1.1.2.88,op=2,sha=f8:bc:12:44:34:b6,tha=aa:55:aa:55:00:03)']) ++ ++AT_CHECK([ovs-appctl tnl/neigh/show | tail -n+3 | sort], [0], [dnl ++1.1.2.92 f8:bc:12:44:34:b6 br0 ++]) ++ ++dnl Check GRE tunnel pop. ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(1),dnl ++ eth(src=f8:bc:12:44:34:b6,dst=aa:55:aa:55:00:03),eth_type(0x0800),dnl ++ ipv4(src=1.1.2.92,dst=1.1.2.88,proto=47,tos=0,ttl=64,frag=no)'], ++[0], [stdout]) ++ ++AT_CHECK([tail -1 stdout], [0], ++ [Datapath actions: tnl_pop(4) ++]) ++ ++dnl Check GRE tunnel push. ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(3),dnl ++ eth(dst=f9:bc:12:44:34:b6,src=af:55:aa:55:00:03),eth_type(0x0800),dnl ++ ipv4(src=1.1.3.88,dst=1.1.3.92,proto=1,tos=0,ttl=64,frag=no)'], ++[0], [stdout]) ++AT_CHECK([tail -1 stdout], [0], ++ [Datapath actions: tnl_push(tnl_port(4),header(size=38,type=3,dnl ++eth(dst=f8:bc:12:44:34:b6,src=aa:55:aa:55:00:03,dl_type=0x0800),dnl ++ipv4(src=1.1.2.88,dst=1.1.2.92,proto=47,tos=0,ttl=64,frag=0x4000),dnl ++gre((flags=0x0,proto=0x6558))),out_port(2)),1 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP diff --git a/tests/tunnel.at b/tests/tunnel.at -index b8ae7caa9b..fd482aa872 100644 +index b8ae7caa9b..1be899fff3 100644 --- a/tests/tunnel.at +++ b/tests/tunnel.at @@ -126,7 +126,7 @@ AT_CHECK([ovs-appctl dpif/show | tail -n +3], [0], [dnl @@ -77965,6 +123599,73 @@ index b8ae7caa9b..fd482aa872 100644 ]) OVS_VSWITCHD_STOP +@@ -333,6 +333,50 @@ set(tunnel(tun_id=0x5,dst=4.4.4.4,ttl=64,flags(df|key))),1 + OVS_VSWITCHD_STOP + AT_CLEANUP + ++AT_SETUP([tunnel - set_tunnel VXLAN]) ++OVS_VSWITCHD_START([dnl ++ add-port br0 p1 -- set Interface p1 type=vxlan options:key=flow \ ++ options:remote_ip=1.1.1.1 ofport_request=1 \ ++ -- add-port br0 p2 -- set Interface p2 type=vxlan options:key=flow \ ++ options:remote_ip=2.2.2.2 ofport_request=2 \ ++ -- add-port br0 p3 -- set Interface p3 type=vxlan options:key=flow \ ++ options:remote_ip=3.3.3.3 ofport_request=3 \ ++ -- add-port br0 p4 -- set Interface p4 type=vxlan options:key=flow \ ++ options:remote_ip=4.4.4.4 ofport_request=4]) ++AT_DATA([flows.txt], [dnl ++actions=set_tunnel:1,output:1,set_tunnel:2,output:2,set_tunnel:3,output:3,set_tunnel:5,output:4 ++]) ++ ++OVS_VSWITCHD_DISABLE_TUNNEL_PUSH_POP ++AT_CHECK([ovs-ofctl add-flows br0 flows.txt]) ++ ++AT_CHECK([ovs-appctl dpif/show | tail -n +3], [0], [dnl ++ br0 65534/100: (dummy-internal) ++ p1 1/4789: (vxlan: key=flow, remote_ip=1.1.1.1) ++ p2 2/4789: (vxlan: key=flow, remote_ip=2.2.2.2) ++ p3 3/4789: (vxlan: key=flow, remote_ip=3.3.3.3) ++ p4 4/4789: (vxlan: key=flow, remote_ip=4.4.4.4) ++]) ++ ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'in_port(100),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout]) ++AT_CHECK([tail -1 stdout], [0], [Datapath actions: dnl ++set(tunnel(tun_id=0x1,dst=1.1.1.1,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x2,dst=2.2.2.2,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x3,dst=3.3.3.3,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x5,dst=4.4.4.4,ttl=64,tp_dst=4789,flags(df|key))),4789 ++]) ++ ++dnl With pre-existing tunnel metadata. ++AT_CHECK([ovs-appctl ofproto/trace ovs-dummy 'tunnel(tun_id=0x1,src=1.1.1.1,dst=5.5.5.5,tp_src=12345,tp_dst=4789,ttl=64,flags(key)),in_port(4789),eth(src=50:54:00:00:00:05,dst=50:54:00:00:00:07),eth_type(0x0800),ipv4(src=192.168.0.1,dst=192.168.0.2,proto=1,tos=0,ttl=128,frag=no),icmp(type=8,code=0)'], [0], [stdout]) ++AT_CHECK([tail -1 stdout], [0], [Datapath actions: dnl ++set(tunnel(tun_id=0x2,dst=2.2.2.2,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x3,dst=3.3.3.3,ttl=64,tp_dst=4789,flags(df|key))),4789,dnl ++set(tunnel(tun_id=0x5,dst=4.4.4.4,ttl=64,tp_dst=4789,flags(df|key))),4789 ++]) ++ ++OVS_VSWITCHD_STOP ++AT_CLEANUP ++ + AT_SETUP([tunnel - key]) + OVS_VSWITCHD_START([dnl + add-port br0 p1 -- set Interface p1 type=gre options:key=1 \ +@@ -480,11 +524,12 @@ dummy@ovs-dummy: hit:0 missed:0 + v2 3/3: (dummy-internal) + ]) + +-dnl First setup dummy interface IP address, then add the route +-dnl so that tnl-port table can get valid IP address for the device. ++dnl Setup dummy interface IP address. + AT_CHECK([ovs-appctl netdev-dummy/ip4addr br0 172.31.1.1/24], [0], [OK + ]) +-AT_CHECK([ovs-appctl ovs/route/add 172.31.1.0/24 br0], [0], [OK ++dnl Checking that a local route for added IP was successfully installed. ++AT_CHECK([ovs-appctl ovs/route/show | grep Cached], [0], [dnl ++Cached: 172.31.1.0/24 dev br0 SRC 172.31.1.1 local + ]) + + dnl change the flow table to bump the internal table version diff --git a/utilities/bugtool/ovs-bugtool.in b/utilities/bugtool/ovs-bugtool.in index fa62cbe949..fee0de8532 100755 --- a/utilities/bugtool/ovs-bugtool.in @@ -78030,7 +123731,7 @@ index fbe6e4f560..2c1766eff5 100755 for flowFile in args.flowFiles: logging.info("reading flows from %s", flowFile) diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c -index ede7f1e61a..6771973ae9 100644 +index ede7f1e61a..5c431e9cfc 100644 --- a/utilities/ovs-ofctl.c +++ b/utilities/ovs-ofctl.c @@ -730,12 +730,12 @@ static void @@ -78048,6 +123749,15 @@ index ede7f1e61a..6771973ae9 100644 const struct ofp_header *error_oh = error->data; ovs_be32 error_xid = error_oh->xid; enum ofperr ofperr; +@@ -5041,7 +5041,7 @@ static const struct ovs_cmdl_command all_commands[] = { + 1, 1, ofctl_dump_ipfix_flow, OVS_RO }, + + { "ct-flush-zone", "switch zone", +- 2, 2, ofctl_ct_flush_zone, OVS_RO }, ++ 2, 2, ofctl_ct_flush_zone, OVS_RW }, + + { "ofp-parse", "file", + 1, 1, ofctl_ofp_parse, OVS_RW }, diff --git a/utilities/ovs-save b/utilities/ovs-save index fb2025b765..67092ecf7e 100755 --- a/utilities/ovs-save diff --git a/SPECS/openvswitch2.17.spec b/SPECS/openvswitch2.17.spec index 124d35a..4011168 100644 --- a/SPECS/openvswitch2.17.spec +++ b/SPECS/openvswitch2.17.spec @@ -63,7 +63,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 2.17.0 -Release: 137%{?dist} +Release: 154%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -97,7 +97,7 @@ Source101: https://pypi.io/packages/source/P/Pygments/Pygments-%{pygmentsver}.ta Source102: https://pypi.io/packages/source/S/Sphinx/Sphinx-%{sphinxver}.tar.gz Source103: https://pypi.io/packages/source/p/pyelftools/pyelftools-%{pyelftoolsver}.tar.gz -Patch: openvswitch-%{version}.patch +Patch0: openvswitch-%{version}.patch # The DPDK is designed to optimize througput of network traffic using, among # other techniques, carefully crafted assembly instructions. As such it @@ -267,7 +267,7 @@ mv dpdk-*/ %{dpdkdir}/ # FIXME should we propose a way to do that upstream? sed -ri "/^subdir\('(usertools|app)'\)/d" %{dpdkdir}/meson.build -%patch -p1 +%patch0 -p1 %build %if 0%{?rhel} && 0%{?rhel} < 9 @@ -749,6 +749,1130 @@ exit 0 %endif %changelog +* Tue Mar 12 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-154 +- Merging dpdk subtree [RH git: e5c83b7dbf] + Commit list: + 856b38dbd3 Merge tag 'v21.11.6' into 21.11 + 48ac0a964f Reapply "net/iavf: fix abnormal disable HW interrupt" + 181fb849b5 version: 21.11.6 + 1aa6eccdc8 Merging upstream branch-2.17 + ef39402104 Merging upstream branch-2.17 + f082ed71cc Merging upstream branch-2.17 + 598acceb55 Merging upstream branch-2.17 + bbc9320caf version: 21.11.6-rc1 + fc1c14ba87 Revert "net/iavf: fix abnormal disable HW interrupt" + b655b64158 Revert "net/iavf: fix abnormal disable HW interrupt" + 770e7c2724 net/af_xdp: make compatible with libbpf 0.8.0 + 9bb15a3d0f net/hns3: fix VF reset handler interruption + 90236089a2 net/hns3: fix reset event status + 0dece2892b net/hns3: fix ignored reset event + f2151f9aa0 ethdev: fix ESP packet type description + 6faf898f55 mempool: clarify enqueue/dequeue ops documentation + 1699794ad5 mempool: fix get function documentation + 69793a2e87 doc: remove number of commands in vDPA guide + 82a6991ac2 doc: fix some ordered lists + f8a3dcb22d doc: remove restriction on ixgbe vector support + 8633f55ed5 Merging upstream branch-2.17 + 88bee6c641 Merging 4e50ad4469 version: 21.11.5 + c83e88b939 Merging upstream branch-2.17 + 2b8325631a Merging upstream branch-2.17 + 9a6c6fad1b Merging upstream branch-2.17 + fab24e72ab app/pipeline: add sigint handler + 25bb8f090e test/hash: fix creation error log + 83eafb9441 examples/ethtool: fix pause configuration + 952d702879 app/dumpcap: allow multiple invocations + c9a8aa95de pdump: fix error number on IPC response + b2d5c193df eal/windows: fix build with recent MinGW + e39929c868 examples/ipsec-secgw: fix partial overflow + 25bd80cff6 doc: update versions recommendations for i40e and ice + 3840836817 doc: fix RSS flow description in hns3 guide + 326b4f1213 doc: update features in hns3 guide + dded07195d doc: fix hns3 build option about max queue number + d549330dfc net/txgbe: fix out of bound access + 45cef8185a vhost: fix missing lock protection in power monitor API + 9b63dd4599 net/virtio: fix descriptor addresses in 32-bit build + 93f0406998 net/hns3: fix crash for NEON and SVE + 662c60a322 net/hns3: fix LRO offload to report + 7d54c473db net/hns3: fix setting DCB capability + 9a8d7ab07f net/hns3: extract common function to obtain revision ID + 082393fa5d net/hns3: fix IMP or global reset + 5544a65fbb net/hns3: fix traffic management thread safety + 88ddc5f559 net/nfp: fix reconfigure logic of set MAC address + 69933a8ef9 net/nfp: fix reconfigure logic in VF initialization + 66e7d1cdb0 net/nfp: fix reconfigure logic in PF initialization + f3f42b6d5b net/nfp: fix link status interrupt + d6ca7e03fe net/nfp: fix DMA error after abnormal exit + 09ec0e74aa net/mlx5: fix hairpin queue unbind + f19325f6eb crypto/qat: fix raw API null algorithm digest + 501c30a45c crypto/qat: fix NULL algorithm digest placement + 1c5191e68c net/nfp: fix Tx descriptor free logic of NFD3 + 8d9e1872ea event/dlb2: fix disable PASID + 804ef265cf net/mlx5: zero UDP checksum over IPv4 in encapsulation + e867b9ff37 net/mlx5: fix shared Rx queue list management + 48f88518d9 net/mlx5: fix multi-segment Tx inline data length + b4baa4d39e net/mlx5: fix hairpin queue states + 5642cf46de net/mlx5: fix use after free on Rx queue start + 17a2cc95f1 net/mlx5: fix validation of sample encap flow action + 8f6de188ec app/testpmd: fix tunnel TSO configuration + 522d1db23c app/testpmd: add explicit check for tunnel TSO + 7e61e0b122 app/testpmd: fix tunnel TSO capability check + 78727d935b net/hns3: fix mailbox sync + 07e3d1ea4f test/bonding: fix uninitialized RSS configuration + af6b84f09f ethdev: account for smaller MTU when setting default + 4084334e1d app/testpmd: remove useless check in TSO command + c105c5081a net/sfc: remove null dereference in log + 7d50298a26 meter: fix RFC4115 trTCM API Doxygen + e9285c755b event/dlb2: fix missing queue ordering capability flag + 644f1632a6 net/ice: fix crash on closing representor ports + 056e0eac3d test/bbdev: assert failed test for queue configure + 939e119406 test/bbdev: fix Python script subprocess + 38a1842b74 event/dlb2: fix name check in self-test + 68cdf1299e app/dumpcap: fix mbuf pool ring type + c268e6c466 config/arm: fix aarch32 build with GCC 13 + f4ef2095b0 net/ice: fix Tx preparation + 93bbc6d23d net/iavf: fix Tx preparation + cfd4195e5c crypto/nitrox: fix panic with high number of segments + 987c28f022 net/ice: fix DCF port statistics + 7fd13314d3 net/iavf: fix Tx offload flags check + 9e228c5690 net/iavf: fix indent in Tx path + 820e8fc770 net/iavf: fix Tx offload mask + da268d8b4e net/ice: fix L1 check interval + 981460d101 common/mlx5: fix controller index parsing + 986d9d2fbb net/mlx5: fix E-Switch mirror flow rule validation + 9f2c545da6 net/mlx5: fix decap action checking in sample flow + 97cf69ec61 net/tap: fix RSS for fragmented packets + 4c22ff4433 app/procinfo: adjust format of RSS info + ff10f5d05b app/procinfo: fix RSS info + 774c444b1c net/tap: fix IPv4 checksum offloading + 8b094ad673 net/tap: fix L4 checksum offloading + c2404cbdfa net/hns3: fix uninitialized hash algo value + 8ca6753041 net/hns3: keep set/get algo key functions local + 5c52c7e6a4 net/hns3: fix some error logs + 6b71d322ad net/hns3: fix some return values + 48ce6323ad net/enic: avoid extra unlock in MTU set + 6d69e09954 ethdev: fix 32-bit build with GCC 13 + d0f7aa36ad net/bonding: fix possible overrun + fe5d3a4119 test/bonding: add missing check + 1c29c23953 test/bonding: remove unreachable statement + df93e5cc12 net/hns3: refactor interrupt state query + 0bc0e51d06 net/hns3: fix multiple reset detected log + b28a338b6b net/hns3: remove reset log in secondary + efe611f1c8 net/hns3: fix double stats for IMP and global reset + fc1e7c7b6e net/hns3: fix unchecked Rx free threshold + 79ee20d589 net/hns3: fix typo in function name + 0aaa1f6146 ethdev: fix function name in comment + 4adc748e0e common/cnxk: fix pool buffer size in opaque mode + 587aea0f91 vhost: fix check on virtqueue access in in-flight getter + 201e1f617b vhost: fix check on virtqueue access in async registration + 900cc61cbe vhost: fix missing check on virtqueue access + e9b6f56c9c vhost: fix missing vring call check on virtqueue access + 98857c5b76 net/virtio: fix link state interrupt vector setting + 0c91efd331 net/virtio: fix missing next flag in Tx packed ring + c900dccdfe eventdev: fix missing driver names in info struct + 48ecb471d9 eventdev: fix device pointer for vdev-based devices + 6de3135f1e net/mlx5: fix matcher layout size calculation + 104340b4e1 net/mlx5: fix MPRQ stride size to accommodate the headroom + 21554f4ee2 Merging upstream branch-2.17 + 7a268dfe93 malloc: remove return from void functions + 997c669059 app/procinfo: remove unnecessary rte_malloc + 6b396dceec net/bonding: fix link status callback stop + 097657f9bc app/testpmd: fix primary process not polling all queues + 91c5c25915 net/vmxnet3: fix Rx and Tx queue state + a508510366 net/virtio: fix Rx and Tx queue state + 1213bcc826 net/vhost: fix Rx and Tx queue state + 358e6d50e7 net/txgbe: fix Rx and Tx queue state + 4e0e44f7ef net/softnic: fix Rx and Tx queue state + 7a88ec18c1 net/sfc: fix Rx and Tx queue state + d849664d39 net/ring: fix Rx and Tx queue state + ec33dc897d net/pfe: fix Rx and Tx queue state + 2e34b54c6e net/octeon_ep: fix Rx and Tx queue state + b86f8689f9 net/null: fix Rx and Tx queue state + 4cdfd0b407 net/ngbe: fix Rx and Tx queue state + eb249c2d5f net/mvpp2: fix Rx and Tx queue state + bd403e9e66 net/mvneta: fix Rx and Tx queue state + 789097d1b9 net/mlx4: fix Rx and Tx queue state + 72786175fb net/memif: fix Rx and Tx queue state + 5c8a283469 net/ipn3ke: fix Rx and Tx queue state + 9323a4e3c7 net/hinic: fix Rx and Tx queue state + 10ccc32cc0 net/enic: fix Rx and Tx queue state + ee101d1045 net/enetc: fix Rx and Tx queue state + b69c78275b net/ena: fix Rx and Tx queue state + 180c47a4a4 net/e1000: fix Rx and Tx queue state + 09058bb2bb net/dpaa2: fix Rx and Tx queue state + 87695a10d6 net/dpaa: fix Rx and Tx queue state + 92c7732d11 net/cxgbe: fix Rx and Tx queue state + b251cfe68f net/bonding: fix Rx and Tx queue state + af16f64988 net/bnxt: fix Rx and Tx queue state + 8b1f7ca738 net/bnx2x: fix Rx and Tx queue state + 0a4aa9fa9e net/avp: fix Rx and Tx queue state + 36cbe7901c net/af_xdp: fix Rx and Tx queue state + d59c166780 net/af_packet: fix Rx and Tx queue state + f3baeaa757 fib6: fix adding default route as first route + e9cd35de3c fib: fix adding default route overwriting entire table + 70b49bf5ef net/mlx5: fix leak in sysfs port name translation + 1b21ab31ce net/ice: fix TSO with big segments + d4041c9e05 net/ice: remove log from Tx prepare function + 49e338a2b2 net/iavf: fix TSO with big segments + 296c529c25 net/iavf: remove log from Tx prepare function + 11e3255ee8 net/iavf: fix Tx debug + 1cc6a40c0d net/ice: fix initial link status + f89cd0418d net/iavf: fix ESN session update + 77aff6a046 net/iavf: unregister interrupt handler before FD close + 1b0d948303 net/iavf: fix port stats clearing + 28ddcaf4a6 net/ice: fix TM configuration clearing + 13662a31d2 net/i40e: fix buffer leak on Rx reconfiguration + bf23d4ca47 net/iavf: fix checksum offloading + 71663e247f net/iavf: fix VLAN offload strip flag + e7bd53b384 net/ice: write timestamp to first segment in scattered Rx + 1d231c0c26 net/i40e: fix FDIR queue receives broadcast packets + cbc624f535 app/bbdev: fix link with NXP LA12XX + e166b7fd15 baseband/acc: fix ACC100 HARQ input alignment + 8816d35798 common/cnxk: remove dead Meson code + e50b6544bb common/cnxk: fix aura disable handling + 5bbab97809 mempool/cnxk: fix free from non-EAL threads + f2a2d57884 dma/cnxk: fix device state + 7c35f7b1c5 common/cnxk: fix DPI memzone name + a2e1ba51cd net/cnxk: fix uninitialized variable + 723ae321d2 net/cnxk: fix uninitialized variable + 3e087130fc common/cnxk: fix different size bit operations + 981e2093cb common/cnxk: fix xstats for different packet sizes + b2da49e418 common/cnxk: fix default flow action setting + 77810d067d event/sw: fix ordering corruption with op release + 9ae9b97617 eventdev/eth_rx: fix timestamp field register in mbuf + 2ab27f391f event/sw: remove obsolete comment + 9ff70c4b78 test/event: fix crypto null device creation + 107d68ad4d event/cnxk: fix return values for capability API + d31f9b208d event/cnxk: fix getwork mode devargs parsing + 2df5816de9 Merging upstream branch-2.17 + 13b3789404 bus/pci: fix device ID log + 0882f0ecd2 eventdev: fix symbol export for port maintenance + 27a64dc2dd crypto/ipsec_mb: add dependency check for cross build + 4346110ba4 test/crypto: fix typo in asym tests + 0ad8ae6b18 test/crypto: skip some synchronous tests with CPU crypto + 956aa99fcb test/crypto: fix IV in some vectors + de8bdca2b3 doc: replace code blocks with includes in security guide + 9ee1a46fe7 cryptodev: add missing doc for security context + 64513c3fd4 app/testpmd: fix help string + da9e764370 net/tap: use MAC address parse API instead of local parser + 4cdc254d17 net/ngbe: check process type in close operation + 4a97dac668 net/txgbe: check process type in close operation + 5658f2dd84 net/ngbe: keep link down after device close + a3d1c5eb08 net/txgbe: keep link down after device close + 709992faca net/ngbe: reconfigure MAC Rx when link update + b817c9e250 net/txgbe: reconfigure MAC Rx when link update + 51451e5516 net/ngbe: fix flow control + 289d492046 net/txgbe: fix GRE tunnel packet checksum + 2a455021e4 net/txgbe: add Tx queue maximum limit + eb6a3e4952 net/netvsc: increase VSP response timeout to 60 seconds + 25123c091b hash: align SSE lookup to scalar implementation + 171c6bf70d bus/dpaa: fix build with asserts for GCC 13 + 81b71fcdab random: initialize state for unregistered non-EAL threads + 03a4383ed7 net/hns3: fix order in NEON Rx + 7690a37723 net/hns3: fix flushing multicast MAC address + 21874e8d89 net/hns3: fix error code for multicast resource + e5a349762a net/hns3: fix VF default MAC modified when set failed + 2a40eeaf9a net/sfc: add missing error code indication to MAE init path + 0ea692c019 net/sfc: account for data offset on Tx + c0edca24f2 net/sfc: set max Rx packet length for representors + 41073f2809 net/bonding: fix header for C++ + 2e32b7f958 rawdev: fix device class in log message + c8f024efd8 eal/unix: fix firmware reading with external xz helper + acafc55e26 mempool: fix default ops for an empty mempool + fb25ee65c2 Merging upstream branch-2.17 + f5fec488a1 Merging upstream branch-2.17 + 5d7fc78c46 Merging upstream branch-2.17 + 2d8e91e4cd Merging upstream branch-2.17 + edb655f33c Merging upstream branch-2.17 + fcd29ab280 Merging upstream branch-2.17 + 0ab613e72e Merging upstream branch-2.17 + dc3b12d638 Merging upstream branch-2.17 + e89b367b29 Merging upstream branch-2.17 + b960a1e73b Merging upstream branch-2.17 + 2323252f10 Merging upstream branch-2.17 + ec3ddd7e91 Revert "net/iavf: fix tunnel TSO path selection" + bfa72e847e Merging upstream branch-2.17 + 4dbf535102 Merging upstream branch-2.17 + 33604e6b9e Merging upstream branch-2.17 + 1d093172f3 Merging upstream branch-2.17 + c6fe249b71 Merging upstream branch-2.17 + 75de7e4692 Merging upstream branch-2.17 + 0cabc0aaab Merging upstream branch-2.17 + 3906bbc236 Merging upstream branch-2.17 + c54d4024f1 Merging upstream branch-2.17 + ae0cf9c610 Merging upstream branch-2.17 + 40bc911f1e Merging upstream branch-2.17 + 9f7cd5522e Merging upstream branch-2.17 + 6521f134cf Merging upstream branch-2.17 + 9f8ccda402 Merging upstream branch-2.17 + cbaaedee10 Merging upstream branch-2.17 + 46e64a87e0 Merging upstream branch-2.17 + 475deff39c Merging upstream branch-2.17 + 31a23281e4 Merging upstream branch-2.17 + 5d594abe7a Merging upstream branch-2.17 + 76a5db03fd Merging upstream branch-2.17 + f477adf374 Merging upstream branch-2.17 + 45d844304e Merging upstream branch-2.17 + 2697579fdc Merging upstream branch-2.17 + 67f604ebdc Merging upstream branch-2.17 + 05fa26a139 Merging upstream branch-2.17 + 37c3af3e12 Merging upstream branch-2.17 + 0e651f6b54 Merging upstream branch-2.17 + 17b95f12c3 Merging upstream branch-2.17 + ae8260a204 Merging upstream branch-2.17 + 128581791a net/i40e: revert link status check on device start (#2138116) + 6f8543f511 Merging upstream branch-2.17 + f19938d3a1 Merging upstream branch-2.17 + b525569b0d Merging upstream branch-2.17 + 5dfd4e6400 Merging upstream branch-2.17 + 1cfa796567 Merging upstream branch-2.17 + 71ddd61269 Merging upstream branch-2.17 + a433643e75 Merging upstream branch-2.17 + 6169fac929 Merging upstream branch-2.17 + f23d889fb6 Merging upstream branch-2.17 + 0ec0df6e20 Merging upstream branch-2.17 + 4ba6b95b22 Merging upstream branch-2.17 + 71bc61c2bf Merging upstream branch-2.17 + 7ff23905b5 Merging upstream branch-2.17 + 25e49e5fca Merging upstream branch-2.17 + da8dd9a9b2 net/i40e: fix jumbo frame Rx with X722 + f1c25f8297 Merging upstream branch-2.17 + 84413c9788 Merging upstream branch-2.17 + ab44868171 Merging upstream branch-2.17 + d0433c40d3 Merging upstream branch-2.17 + 7c403d4410 Merging upstream branch-2.17 + 0b5d3b91b4 Merging upstream branch-2.17 + a533d9e5f2 Merging upstream branch-2.17 + 3b7766ed9f Merging upstream branch-2.17 + 516dbfecd4 Merging upstream branch-2.17 + 8ca47ac0d2 Merging upstream branch-2.17 + 8fb9f9f63d Merging upstream branch-2.17 + 312ab65013 Merging upstream branch-2.17 + 11d3f7c41a vhost: fix virtqueue use after free on NUMA reallocation + e516b3bf5f Merging upstream branch-2.17 + d749cfdaec Merging upstream branch-2.17 + 1295f7a5e6 Merging 7bcd45ce82 version: 21.11.2 + 6f69b34570 Merging upstream branch-2.17 + 89e726bde0 Merging upstream branch-2.17 + a4ef706d85 Merging upstream branch-2.17 + 22fee2fe1c Merging upstream branch-2.17 + d5f05599e2 Merging upstream branch-2.17 + 9696953ec3 Merging upstream branch-2.17 + 4e7aefcf9b Merging upstream branch-2.17 + 35eafbd002 Merging upstream branch-2.17 + f20d9896dc Merging upstream branch-2.17 + 33619c7858 Merging upstream branch-2.17 + 90c3a0cb82 Merging upstream branch-2.17 + 99635fe2d6 Merging upstream branch-2.17 + 14f43280c9 Merging upstream branch-2.17 + 6d503f9671 Merging upstream branch-2.17 + ef89b30abc Merging upstream branch-2.17 + fc7fadc01f Merging upstream branch-2.17 + 3048a9f25e Merging upstream branch-2.17 + 67ef01bba8 Merging upstream branch-2.17 + 5ca18f3ca7 Merging upstream branch-2.17 + 97e4e20baf Merging upstream branch-2.17 + 05c95aefde Merging upstream branch-2.17 + d5ccc414d3 Merging upstream branch-2.17 + ec6e7994a3 Merging upstream branch-2.17 + 47e297811e vhost: fix queue number check when setting inflight FD + eb67d78dbb vhost: fix FD leak with inflight messages + 4130c8ead3 Merging upstream branch-2.17 + 5a4bc1fbfe Merging upstream branch-2.17 + 3383d19a17 Merging upstream branch-2.17 + c32cf7621a Merging upstream branch-2.17 + e20f46658c Merging upstream branch-2.17 + + +* Tue Mar 12 2024 Timothy Redaelli <tredaelli@redhat.com> - 2.17.0-153 +- redhat: Fix testcase failures due to downstream only backport. [RH git: 69bb84bec0] + The following tests where failing due to the downstream only backport: + + 793: tunnel_push_pop - local_ip configuration + 797: tunnel_push_pop - use non-local port as tunnel endpoint + 801: tunnel_push_pop_ipv6 - local_ip configuration + + This is the downstream backport causing the problem: + + 684b6e8ad9ff ("ofproto-dpif-xlate: Optimize datapath action set by removing last clone action.") + + +* Tue Mar 12 2024 Timothy Redaelli <tredaelli@redhat.com> - 2.17.0-152 +- redhat: Use Patch0 instead of Patch [RH git: 950581580e] + + +* Tue Mar 12 2024 Timothy Redaelli <tredaelli@redhat.com> - 2.17.0-151 +- redhat: Fix SyntaxWarnings with Python 3.12 [RH git: 717ba0dd65] + + +* Tue Mar 05 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-150 +- Merging upstream branch-2.17 [RH git: c780a55623] + Fix merge conflict as well. The fix goes fully with the Upstream version + + Commit list: + 67f834f2b3 bond: Reset stats when deleting post recirc rule. + 567b7f9cf1 ofproto-dpif-trace: Fix infinite recirculation tracing. + 108d5e70a0 ofproto-dpif-xlate: Fix ignoring IPv6 local_ip for native tunnels. + d4d4e7dec3 netdev-dummy: Add local route entries for IP addresses. + 01e400bf5b tests: Move the non-local port as tunnel endpoint test. + ff356a2988 netdev-dummy: Support multiple IP addresses. + e60c121f60 rstp: Fix deadlock with patch ports. + + +* Wed Feb 14 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-149 +- Merging upstream branch-2.17 [RH git: 95c67eed84] + Commit list: + 558ec274f6 ofproto-dpif-monitor: Remove unneeded calls to clear packets. + 1b9ee817e0 bfd: Set proper offsets and flags in BFD packets. + + +* Fri Feb 09 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-148 +- Merging upstream branch-2.17 [RH git: b295496b75] + Commit list: + 82ab9d1dcb Prepare for 2.17.10. + 0bea06d995 Set release date for 2.17.9. + b8657dada9 netdev-offload-tc: Check geneve metadata length. + e235a421fb odp: ND: Follow Open Flow spec converting from OF to DP. + + +* Thu Feb 08 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-147 +- Merging upstream branch-2.17 [RH git: 11d47978ac] + Commit list: + 002cba9f19 dpdk: Use DPDK 21.11.6 release for OVS 2.17. + ee889659db github: Update versions of action dependencies (Node.js 20). + bf717d0f31 ovs-atomic: Fix inclusion of Clang header by GCC 14. + + +* Thu Jan 11 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-146 +- Merging upstream branch-2.17 [RH git: 2348735fed] + Commit list: + 9bbc2cf8a8 ovsdb-idl.at: Test IDL behavior during database conversion. + 049189584f tests: Use _DAEMONIZE macro's to start tcpdump. + 30099c5d9e tests-ovsdb: Switch OVSDB_START_IDLTEST to macro. + f4b4d650a1 python: idl: Handle monitor_canceled. + + +* Tue Jan 09 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-145 +- Merging upstream branch-2.17 [RH git: c1d394c18f] + Commit list: + d6caa6ed03 vconn: Count vconn_sent regardless of log level. + b0eb66a69d backtrace: Fix error in log_backtrace() documentation. + bb89735b21 ovsdb: trigger: Do not allow conversion in read-only mode. + a79ee883a7 ovsdb: jsonrpc-server: Fix the DSCP value in default options. + 4f01f2f7de jsonrpc: Sort JSON objects while printing debug messages. + 3cae42bc53 tests: ovsdb: Use diff -up format for replay test. + a7036f6a1e ovsdb-server.at: Enbale debug logs in active-backup tests. + c944a30fee ovsdb: transaction: Don't try to diff unchanged columns. + 5c0dc96027 ovsdb: transaction: Avoid diffs for different type references. + + +* Tue Jan 09 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-144 +- Merging upstream branch-2.17 [RH git: 10e593b4ef] + Commit list: + eabd4cb2f6 ci: Update the GitHub Ubuntu runner image to Ubuntu 22.04. + c462aabb36 netdev-afxdp: Disable -Wfree-nonheap-object on receive. + + +* Thu Jan 04 2024 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-143 +- Merging upstream branch-2.17 [RH git: f523f8b516] + Commit list: + b3f8c32eda ovsdb-idl: Preserve change_seqno when deleting rows. + + +* Tue Dec 19 2023 Kevin Traynor <ktraynor@redhat.com> - 2.17.0-142 +- Revert "net/iavf: fix abnormal disable HW interrupt" [RH git: fc3754f28d] + This reverts commit b965aceffdb9dd8d2501314c174716617371d752. + + This was reported by QE to be causing a hang in FD-3249. + + commit b965aceffdb9dd8d2501314c174716617371d752 + Author: Mingjin Ye <mingjinx.ye@intel.com> + Date: Wed Jun 14 09:53:03 2023 +0000 + + net/iavf: fix abnormal disable HW interrupt + + [ upstream commit 675a104e2e940ec476e8b469725e8465d01c0098 ] + + Signed-off-by: Kevin Traynor <ktraynor@redhat.com> + + +* Tue Dec 05 2023 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-141 +- Merging upstream branch-2.17 [RH git: 25eec953ca] + Commit list: + d254aedad7 tunnel: Do not carry source port from a previous tunnel. + 1857c569ee netdev-offload-tc: Fix offload of tunnel key tp_src. + 0a0c500d7c cirrus: Update from FreeBSD 12 to 14. + + +* Fri Dec 01 2023 Kevin Traynor <ktraynor@redhat.com> - 2.17.0-140 +- Merging 4e50ad4469 version: 21.11.5 [RH git: 9040d7ae2f] + Commit list: + 4e50ad4469 version: 21.11.5 + 89133ca5b1 version: 21.11.5-rc1 + cdb53b339b net/mlx5: fix drop action attribute validation + c6c34e7304 net/hns3: get FEC capability from firmware + 464ba4ae95 net/hns3: fix missing FEC capability + 5a557a0746 test/bonding: fix include of standard header + 894cfad370 net/ngbe: fix RSS offload capability + 155acf8e9d test/mbuf: fix crash in a forked process + ad2b9b7ef9 net/nfp: fix offloading flows + 9866fb3dae doc: update BIOS settings and supported HW for NTB + c90d34c66f net/mlx5: fix validation for conntrack indirect action + 88407eb228 net/mlx5: fix LRO TCP checksum + a83cabaa74 net/mlx5: fix drop action memory leak + 23103032ab net/e1000: fix Rx and Tx queue status + 5df4afe600 net/igc: fix Rx and Tx queue status + 9eceb8933e net/ixgbe: fix Rx and Tx queue status + 7b9fc06893 common/iavf: fix MAC type for 710 NIC + 8ff3aaeba2 net/iavf: fix stop ordering + f89976b2af net/i40e: fix comments + 775d83159d doc: fix typos and wording in flow API guide + 904c11f81a kni: fix build with Linux 6.5 + 4934f0d915 ipsec: fix NAT-T header length + 7d5fa222be examples/ipsec-secgw: fix TAP default MAC address + 0fad3187d7 app/crypto-perf: fix socket ID default value + 629d2a5ac7 examples/fips_validation: fix digest length in AES-GCM + ea37fdf491 test/crypto: fix PDCP-SDAP test vectors + 80aeb93e1e common/qat: detach crypto from compress build + 2fabcaceff baseband/fpga_5gnr_fec: fix starting unconfigured queue + 2074d7e982 baseband/fpga_5gnr_fec: fix possible division by zero + 03f55be06e net/ice: fix RSS hash key generation + 3eb4ad8ed6 net/iavf: fix tunnel TSO path selection + 336ae4ce52 net/ice: fix 32-bit build + 87a713bbe9 net/iavf: fix VLAN insertion in vector path + c3d6a7d2e4 net/ice: fix VLAN mode parser + 3f5a206aab net/mlx5: forbid MPRQ restart + c947d1f3bd net/mlx5: fix flow workspace destruction + 9a1b7013e1 net/mlx5: fix flow dump for modify field + 376ba6a8d7 ethdev: fix potential leak in PCI probing helper + 4045558987 net/hns3: fix index to look up table in NEON Rx + 3a78edd47d net/hns3: fix non-zero weight for disabled TC + 7b603155c1 doc: fix number of leading spaces in hns3 guide + a6df8f6163 doc: fix syntax in hns3 guide + 8d3993728f doc: fix kernel patch link in hns3 guide + 73aaba67bd net/hns3: delete duplicate macro definition + 8ed9016759 app/testpmd: fix checksum engine with GTP on 32-bit + 3bb1514228 net/netvsc: fix sizeof calculation + 4701569166 hash: fix reading unaligned bits in Toeplitz hash + 8f2d1e294c mem: fix memsegs exhausted message + aa341b7700 fib: fix adding default route + 0c24fe6db3 ipc: fix file descriptor leakage with unhandled messages + b687d491a2 net/cnxk: fix flow queue index validation + fa91d69044 net/cnxk: fix cookies check with security offload + 27da89db96 net/cnxk: flush SQ before configuring MTU + 09ad6cf34e common/mlx5: adjust fork call with new kernel API + 3bb439262b net/mlx5: fix device removal event handling + 04972e0400 net/mlx5: fix risk in NEON Rx descriptor read + ad874a89de net/ice: fix protocol agnostic offloading with big packets + 87ec3aee94 net/e1000: fix queue number initialization + 0826a11cd2 net/i40e: fix tunnel packet Tx descriptor + b965aceffd net/iavf: fix abnormal disable HW interrupt + 9dee3615aa net/ixgbe: add proper memory barriers in Rx + 259f2c0780 net/ice: fix tunnel packet Tx descriptor + fe2fc3df82 net/iavf: fix VLAN offload with AVX512 + c4dc88c937 common/sfc_efx/base: fix Rx queue without RSS hash prefix + 4963f9aba5 net/nfp: fix address always related with PF ID 0 + fcea6b6498 net/ngbe: fix extended statistics + edf9b54d25 net/txgbe: fix extended statistics + 1077232f46 net/txgbe: fix to set autoneg for 1G speed + 5b72e18f0a net/txgbe: fix interrupt enable mask + 1a7755214b net/txgbe/base: fix Tx with fiber hotplug + d98f67c3a3 net/bonding: fix destroy dedicated queues flow + b11127de28 net/bonding: fix startup when NUMA is not supported + da6cad795b ethdev: update documentation for API to get FEC + b6557a758d ethdev: check that at least one FEC mode is specified + d53afe6d0e ethdev: update documentation for API to set FEC + 1cf0bf459c crypto/openssl: skip workaround at compilation time + ad9d44049a ci: fix libabigail cache in GHA + 88497910a9 mbuf: fix Doxygen comment of distributor metadata + 5d9357c7e2 test: add graph tests + 47a25c8459 examples/l2fwd-cat: fix external build + 648321ab81 doc: remove warning with Doxygen 1.9.7 + eac5508ce6 doc: fix typo in graph guide + 77145940b5 net/virtio-user: fix leak when initialisation fails + 7b8888a456 net/ice: fix outer UDP checksum offload + 1c5b0a02f0 net/ice: initialize parser for double VLAN + 634cd44ea0 net/ice: fix timestamp enabling + 285bc0e2d1 net/ice: adjust timestamp mbuf register + f420162557 net/ice/base: remove unreachable code + cdd62beeb2 net/ice: fix DCF control thread crash + 45daf222f8 net/iavf: release large VF when closing device + 9d5f649aa5 net/ice: fix DCF RSS initialization + 4408ee1716 net/ice: fix statistics + 0a060a3c1e net/iavf: fix Rx data buffer size + 54b08280e7 net/ice: fix Rx data buffer size + d5755af28b net/i40e: fix Rx data buffer size + 1173611e55 doc: fix typo in cnxk platform guide + a56289503e net/qede: fix RSS indirection table initialization + 498f380f8c common/cnxk: fix inline device VF identification + 6f685b45a6 common/cnxk: fix IPsec IPv6 tunnel address byte swap + baff3174c6 net/mlx5: fix duplicated tag index matching in SWS + 2681bfff6d net/mlx5: enhance error log for tunnel offloading + 32d51fbf81 net/virtio: fix initialization to return negative errno + 2c2d4e88a8 net/virtio: propagate interrupt configuration error values + 11a426ee6b vhost: fix invalid call FD handling + f329acb883 crypto/ipsec_mb: optimize allocation in session + 2487cb35ca crypto/ipsec_mb: fix enqueue counter for SNOW3G + 0428cfa189 test/crypto: fix session creation check + c4583cd33a test/crypto: fix return value for SNOW3G + 792da5fcf7 crypto/scheduler: fix last element for valid args + 54fa9bdcd5 doc: fix auth algos in cryptoperf app + f68f4b2b8b net/vmxnet3: fix return code in initializing + 1fa656ba3c net/hns3: fix IMP reset trigger + b99c14584b net/hns3: fix redundant line break in log + 44160c032b net/hns3: fix inaccurate log + 1834797be4 net/hns3: fix uninitialized variable + f630f709e9 net/hns3: fix device start return value + 51a43c9a86 net/hns3: fix mbuf leakage when RxQ started after reset + 3d26d3f0c2 net/hns3: fix mbuf leakage when RxQ started during reset + 37164c2537 net/hns3: extract PTP to its own header file + 80d032eed5 net/hns3: uninitialize PTP + d2ec3c302c net/hns3: fix RTC time after reset + bf28f06d51 net/hns3: fix RTC time on initialization + 57a458ded6 doc: fix format in flow API guide + 65193aced0 net/hns3: fix FEC mode check + 3e8f218194 net/hns3: fix FEC mode for 200G ports + c11ee881c2 ethdev: fix indirect action conversion + b374bc7b0e net/hns3: fix Rx multiple firmware reset interrupts + 5c105051b9 net/hns3: fix variable type mismatch + 4077943285 net/hns3: fix never set MAC flow control + fbfa671cbf net/sfc: invalidate dangling MAE flow action FW resource IDs + b3741dfb48 ethdev: fix MAC address occupies two entries + 80e17cb702 net/txgbe: fix use-after-free on remove + 6de921b8d6 net/vmxnet3: fix drop of empty segments in Tx + b23b2580e3 app/testpmd: fix GTP L2 length in checksum engine + f297d8681d net/dpaa2: fix checksum good flags + aae992cbfb net/tap: set locally administered bit for fixed MAC address + ed992acac3 net/sfc: stop misuse of Rx ingress m-port metadata on EF100 + f400487830 net/hns3: fix build warning + b75ffdb751 eal/linux: fix legacy mem init with many segments + 3ca06320cd eal/linux: fix secondary process crash for mp hotplug requests + 15aac36450 event/cnxk: fix nanoseconds to ticks conversion + 44a0ae7840 eventdev/timer: fix buffer flush + 63daa71622 event/dsw: free rings on close + 87c6203836 doc: fix event timer adapter guide + 84d280e276 eventdev/timer: fix timeout event wait behavior + e248dac54f pci: fix comment referencing renamed function + e506470227 eal: avoid calling cleanup twice + c2f6df57e2 test/malloc: fix statistics checks + 13c7286a69 test/malloc: fix missing free + e531825d4e pipeline: fix double free for table stats + 4b8a60276b ring: fix dequeue parameter name + c95d48b0d4 telemetry: fix autotest on Alpine + 16e1d054e4 build: fix case of project language name + e8393bfd7b kernel/freebsd: fix function parameter list + ae3de8c27c vfio: fix include with musl runtime + 096ec31efd ring: fix use after free + 27f385ccbd examples/ntb: fix build with GCC 13 + 43a3d772b3 examples/ip_pipeline: fix build with GCC 13 + b22ddfe63a kni: fix build with Linux 6.3 + e185150c2a version: 21.11.4 + 37b4d55f5e build: detect backtrace availability + f7dce59377 version: 21.11.4-rc1 + dea6bb1c53 vhost: fix possible FD leaks + a7843e7a00 net/af_xdp: squash deprecated-declaration warnings + 29d8b0a92a ci: switch to Ubuntu 20.04 + 6da61add54 doc: fix pipeline example path in user guide + 03973756cf acl: fix crash on PPC64 with GCC 11 + 606474e9d0 pdump: fix build with GCC 12 + 143dc1f75c test/crypto: fix statistics error messages + f428da47bf net/mlx5: fix sysfs port name translation + 4509d71d9e net/mlx5: fix CQE dump for Tx + c3a4fd09f9 net/mlx5: fix build with GCC 12 and ASan + b663eda75c examples/qos_sched: fix config entries in wrong sections + fd79c3462e net/nfp: fix MTU configuration order + 398c55fee9 net/ipn3ke: fix representor name + b3fbfee3fb net/ipn3ke: fix thread exit + 400ac4f9da bus/ifpga: fix devargs handling + 34934b2b19 doc: fix LPM support in l3forward guide + 7a7e85c4de net/mlx5: fix hairpin Tx queue reference count + 35f911011c net/iavf: fix device stop during reset + e71d71bc0c net/i40e: fix MAC loopback on X722 + acfff39a6d net/e1000: fix saving of stripped VLAN TCI + ac111b80fe net/i40e: fix AVX512 fast-free path + 998d70414b net/sfc: invalidate switch port entry on representor unplug + f5922d3d3d net/virtio: remove address width limit for modern devices + 1585556d2c net/vhost: fix Rx interrupt + a38b2e5693 net/vhost: fix leak in interrupt handle setup + 14297e3110 net/vhost: add missing newline in logs + b3225c2215 app/bbdev: check statistics failure + 9daf33a237 app/compress-perf: fix remaining data for ops + 50f94c98a9 test/crypto: fix capability check for ZUC cipher-auth + 0872c1aaad test/crypto: fix ZUC digest length in comparison + 655c88665d test/mbuf: fix test with mbuf debug enabled + 988dc26cab test: fix segment length in packet generator + b71a128da3 reorder: fix sequence number mbuf field register + 7dd9b2a9fa raw/skeleton: fix selftest + fe4b05502d net/hns3: add verification of RSS types + a306316dd2 net/hns3: reimplement hash flow function + 31b6a0d06f net/hns3: separate flow RSS config from RSS conf + 6e7fad71e4 net/hns3: allow adding queue buffer size hash rule + c1c4bb0af2 net/hns3: remove unused structures + 2e9d4cae1a net/hns3: save hash algo to RSS filter list node + d624edd0a2 net/hns3: use new RSS rule to configure hardware + 5b805e1ebc net/hns3: separate setting RSS types + 10b331bc5f net/hns3: separate setting hash key + 22b71c0ad7 net/hns3: separate setting hash algorithm + 1f685aab8a net/hns3: use hardware config to report hash types + abe4818f0e net/hns3: use hardware config to report hash key + 8dac9d9073 net/hns3: extract common functions to set Rx/Tx + b5f5a77a9f net/hns3: fix burst mode query with dummy function + 6cce9c61f9 net/hns3: fix RSS key size compatibility + 9a20540ca1 net/hns3: make getting Tx function static + df7d846ead net/hns3: separate Tx prepare from getting Tx function + 8ad4e433e7 common/cnxk: fix auth key length + 7faf9ad3c4 eal: cleanup alarm and hotplug before memory detach + 64407a1385 common/cnxk: fix dual VLAN parsing + bace3fae4e net/sfc: fix resetting mark in tunnel offload switch rules + 0a3ba8d8bc mailmap: add list of contributors + 27b61d8f7b app/flow-perf: fix division or module by zero + a9cddaff93 app/crypto-perf: fix test file memory leak + 44bd8ea719 net/mlx5: fix Windows build with MinGW GCC 12 + f8bbcce58a net/hns3: separate setting and clearing RSS rule + 5de7a11e06 net/hns3: separate setting redirection table + 9a7ea28161 net/hns3: use hardware config to report redirection table + abeba16e38 net/hns3: fix possible truncation of redirection table + f9e58fd51a net/hns3: fix possible truncation of hash key when config + 43af30f9a5 mem: fix heap ID in telemetry + 01a15c3c5c cmdline: handle EOF as quit + 42339a554e cmdline: make rdline status not private + c98bda4768 kni: fix possible starvation when mbufs are exhausted + b7c5365796 common/sfc_efx/base: add MAE mark reset action + 5b765d83bb regex/mlx5: fix doorbell record + e140c624f6 regex/mlx5: utilize all available queue pairs + c07485a1c1 table: fix action selector group size log2 setting + d15e8526b0 raw/skeleton: fix empty devargs parsing + c6dd06ddb6 dma/skeleton: fix empty devargs parsing + cbb0399801 net/virtio: fix empty devargs parsing + 1b619068f3 net/hns3: fix empty devargs parsing + 52480ab3ae cryptodev: fix empty devargs parsing + cb72e89906 compressdev: fix empty devargs parsing + 5d95507d27 kvargs: add API documentation for process callback + 5eeee34bf7 common/cnxk: add memory clobber to steor and ldeor + c0a24be8f2 net/cnxk: fix LBK BPID usage + 617735f8fb net/ixgbe: fix IPv6 mask in flow director + 66c0e09464 app/testpmd: fix secondary process packet forwarding + 096166c31c ethdev: remove telemetry Rx mbuf alloc failed field + 2d42946f45 net/nfp: fix getting RSS configuration + 36cbdd704b ethdev: fix build with LTO + 4b158390a6 app/testpmd: fix packet transmission in noisy VNF engine + ce6a8e97ec app/testpmd: fix packet count in IEEE 1588 engine + 0a4ce1a382 app/testpmd: fix Tx preparation in checksum engine + 90c0aaef25 compress/mlx5: fix queue setup for partial transformations + 38c2a798fd compress/mlx5: fix output Adler-32 checksum offset + cb240b164c compress/mlx5: fix decompress xform validation + f18e07dad7 examples/ipsec-secgw: fix auth IV length + c41493361c net/virtio: deduce IP length for TSO checksum + 7a4cda6e20 vhost: fix OOB access for invalid vhost ID + e29f5be725 test/bbdev: remove check for invalid opaque data + 8a36f3995a test/bbdev: extend HARQ tolerance + f77e413c91 test/bbdev: fix crash for non supported HARQ length + 349431b3fb event/cnxk: fix SSO cleanup + 6147a8003d doc: fix reference to event timer header + 52cf05533b doc: add gpudev to the Doxygen index + 0bc981bfdf eal/windows: fix pedantic build + 7b20d58e71 build: fix dependencies lookup + 74f84c7558 examples/qos_sched: fix Tx port config when link down + 661f5540c3 examples/cmdline: fix build with GCC 12 + c791a30249 eal: use same atomic intrinsics for GCC and clang + aff575df46 build: fix toolchain definition + fa627c973b test/reorder: fix double free of drained buffers + 68bf4f36e9 reorder: invalidate buffer from ready queue in drain + 5f2d6beac4 dma/ioat: fix error reporting on restart + 3d440ae7d3 dma/ioat: fix indexes after restart + 7e8fc5d992 dma/ioat: fix device stop if no copies done + 118edbb634 eal/freebsd: fix lock in alarm callback + 818b598bfd sched: fix alignment of structs in subport + 058262ec6c app/testpmd: fix crash on cleanup + e6152fdf62 net/bnxt: fix link state change interrupt config + 8b717146ab app/compress-perf: fix testing single operation + 87689a74a4 app/compress-perf: fix some typos + cc692ab817 net/iavf: fix VLAN offload with AVX2 + 312d7c3eb2 net/ixgbe: enable IPv6 mask in flow rules + 7cdea129b8 net/iavf: fix building data desc + a6aba6b291 net/iavf: protect insertion in flow list + 1ce8e1d4a8 net/ice: fix validation of flow transfer attribute + cb8f428cfe net/i40e: fix validation of flow transfer attribute + 22ffbcffdf net/iavf: add lock for VF commands + a13a713ce4 net/ixgbe: fix firmware version consistency + c8e4a58ca9 net/i40e: reduce interrupt interval in multi-driver mode + 0b4592d347 net/mlx5: check compressed CQE opcode in vectorized Rx + 026450013f net/mlx5: ignore non-critical syndromes for Rx queue + 0167cd8cf1 net/mlx5: fix error CQE dumping for vectorized Rx + e8b6b06faa net/mlx5: fix flow sample with ConnectX-5 + 190d16fb4c net/txgbe: fix Rx buffer size in config register + 08f5c42a98 net/sfc: enforce fate action in transfer flow rules + ef7d2f34a7 net/sfc: fix MAC address entry leak in transfer flow parsing + 483243b873 net/hns3: remove debug condition for Tx prepare + c086288760 net/hns3: add debug info for Rx/Tx dummy function + e0f5e9d162 app/testpmd: fix link check condition on port start + a3172b1ab9 net/sfc: export pick transfer proxy callback to representors + f6c50b6c50 net/hns3: fix duplicate RSS rule check + 69d2be50d9 net/hns3: fix config struct used for conversion + c7c798a8f6 net/hns3: fix warning on flush or destroy rule + 2f00c06957 net/hns3: remove useless code when destroy valid RSS rule + 8ba1441f48 net/hns3: use RSS filter list to check duplicated rule + eee8b3cd45 net/hns3: fix clearing RSS configuration + abe093c214 net/hns3: refactor set RSS hash algorithm and key interface + b6328f7feb net/hns3: extract common function to query device + 81cb3c29c8 net/hns3: fix log about indirection table size + 489a1c2580 net/txgbe: fix interrupt loss + f8e27fb215 net/ngbe: fix packet type to parse from offload flags + 5d80aae214 net/txgbe: fix packet type to parse from offload flags + 551a0e317c net/txgbe: fix default signal quality value for KX/KX4 + b641ca3033 app/testpmd: fix forwarding stats for Tx dropped + 8746dd7d44 doc: fix description of L2TPV2 flow item + 9c90d1a945 net/hns3: declare flow rule keeping capability + fd950290d1 net/virtio-user: fix device starting failure handling + 6e302e5ede vhost: fix possible FD leaks on truncation + f05708ea45 vhost: decrease log level for unimplemented requests + 02397094e9 eventdev/timer: fix overflow + 5f55f9989c test/mbuf: fix mbuf reset test + 26b4d5a198 eal/linux: fix hugetlbfs sub-directories discovery + bb9295b2f3 telemetry: fix repeat display when callback don't init dict + 13bf4f461a raw/ifpga/base: fix init with multi-process + 8a411e66c3 mem: fix hugepage info mapping + 9b7ac02837 event/cnxk: fix timer operations in secondary process + 45fb6a4f8a event/cnxk: fix burst timer arm + da4a271c75 event/cnxk: wait for CPT flow control on WQE path + 2b15b1cd4a net/bnxt: fix RSS hash in mbuf + 20189c4886 net/bnxt: fix Rx queue stats after queue stop and start + aa0ecc4f91 net/bnxt: fix Tx queue stats after queue stop and start + 1ac2732197 compressdev: fix end of driver list + af78ee860e test/crypto: add missing MAC-I to PDCP vectors + af7f9481e1 test/crypto: fix typo in AES test + b2c5b2d8a8 crypto/ccp: fix IOVA handling + bc5f3c068c crypto/ccp: remove some dead code for UIO + ba8923c86e crypto/ccp: remove some printf + df587bef4a baseband/acc: fix acc100 iteration counter in TB + 3664b67985 baseband/acc: fix memory leak on acc100 close + dd830098ef vhost: fix net header settings in datapath + e928dfe88c vdpa/ifc: fix reconnection in SW-assisted live migration + 2d551ec8dc vdpa/ifc: fix argument compatibility check + f8d7e778b5 app/dumpcap: fix storing port identifier + dce447af75 examples/qos_sched: fix debug mode + 5cac713dff mem: fix telemetry data truncation + ddb46da599 cryptodev: fix telemetry data truncation + 119d73b152 mempool: fix telemetry data truncation + 86d2c8b77c ethdev: fix telemetry data truncation + 005b23b18f telemetry: move include after guard + f6a5384a93 app/testpmd: fix interactive mode with no ports + f43191608a net/hns3: fix inaccurate RTC time to read + d13acd65cc net/nfp: fix firmware name derived from PCI name + e4609ddf51 graph: fix node shrink + 868b1b71df gpudev: fix deadlocks when registering callback + 9a43ba896c fbarray: fix metadata dump + e8f2aab768 crypto/qat: fix stream cipher direction + 98f5f64240 eventdev/eth_tx: fix devices loop + a668035217 app/crypto-perf: fix IPsec direction + b2d5a49ce6 app/crypto-perf: fix SPI zero + d734fcf15c app/crypto-perf: fix number of segments + 17817f916b devtools: fix escaped space in grep pattern + 71d0f78adf doc: fix dependency setup in l2fwd-cat example guide + 47951ef1dc hash: fix GFNI implementation build with GCC 12 + 84d43ac12f kni: fix build on RHEL 9.1 + 31608e4db5 version: 21.11.3 + e1d728588d ring: squash gcc 12.2.1 warnings + adeaf361f0 vhost: fix doxygen warnings + 3b02ad76a3 lib: remove empty return types from doxygen comments + 7a21adb82b Revert "net/iavf: add thread for event callbacks" + 558ff33739 Revert "net/i40e: enable maximum frame size at port level" + 494050958c Revert "net/i40e: fix max frame size config at port level" + 528361cfa8 Revert "net/i40e: fix jumbo frame Rx with X722" + 0c553ce527 net/bonding: set initial value of descriptor count alignment + 9524c48bcd devtools: fix checkpatch header retrieval from stdin + d5f1d8130b examples/ipsec-secgw: fix Tx checksum offload flag + 0bb6905a88 version: 21.11.3-rc1 + 46abe3043e net/i40e: fix jumbo frame Rx with X722 + 655f0ed4a7 net/hns3: fix restore filter function input + eee943e2f2 net/hns3: fix lock protection of RSS flow rule + 14687654a0 net/hns3: fix RSS rule restore + 800390b260 net/hns3: extract functions to create RSS and FDIR flow rule + 2d10a3a9ba doc: add Rx buffer split capability for mlx5 + 0b176394b7 net/mlx5: fix assert when creating meter policy + 7cd66fc297 net/mlx5: fix mirror flow validation with ASO action + 666d1e75a2 net/mlx5: fix source port checking in sample flow rule + bd9e9d22ef trace: fix metadata dump + fcf8e69afb net/iavf: add thread for event callbacks + 9345c7264d net/mlx5: fix port event cleaning order + db856c5fb6 net/mlx5: fix race condition in counter pool resizing + 3e852fd9fe ci: enable ABI check in GHA + 66bfbc5b37 Revert "cryptodev: fix missing SHA3 algorithm strings" + 9a3294eb17 doc: avoid meson deprecation in setup + 23f263e7bd doc: fix maximum packet size of virtio driver + 776c777a61 drivers: remove unused build variable + 631e68c186 net/nfp: fix Rx descriptor DMA address + 24d76003e1 doc: fix colons in testpmd aged flow rules + 9f54b0a4f9 doc: fix underlines in testpmd guide + d42406e77e doc: add LRO size limitation in mlx5 guide + 8e04bc3773 net/mlx5: fix maximum LRO message size + 4b50df7128 net/ixgbevf: fix promiscuous and allmulti + 1961dbbdbd test/event: fix build with clang 15 + ca0952588a test/member: fix build with clang 15 + 4cacbe5682 test/efd: fix build with clang 15 + 1cd61dc7fb app/testpmd: fix build with clang 15 in flow code + e5d36910cf app/testpmd: fix build with clang 15 + c5f336041e net/dpaa2: fix build with clang 15 + c44d10cae9 net/atlantic: fix build with clang 15 + c6ece01586 bus/dpaa: fix build with clang 15 + d116a5e632 vhost: fix build with clang 15 + 2d03c4e3ef service: fix build with clang 15 + 3930398baa vdpa/ifc: handle data path update failure + 29a2eb7cee ring: remove leftover comment about watermark + ecfdc8089d ring: fix description + 674f2e762d drivers: fix typos found by Lintian + b16910eabc doc: fix typo depreciated instead of deprecated + 4726dffdd2 test/crypto: fix bitwise operator in a SNOW3G case + 5ffd9e4baf doc: document device dump in procinfo guide + 1c94187eec doc: fix application name in procinfo guide + 08a6a565e7 mempool/cnxk: fix destroying empty pool + aaf49e83ab examples/fips_validation: fix typo in error log + c8812bd8ec event/cnxk: fix missing mempool cookie marking + 7773bb2b9a app/eventdev: fix limits in error message + 80dfbebe4f event/cnxk: fix mbuf offset calculation + 40f8ea78a9 doc: fix event timer adapter guide + fd7eb800f4 ci: update to new API for step outputs in GHA + 852ba6cf84 ci: bump versions of actions in GHA + b0df64d8a2 net/ice: fix interrupt handler unregister + 1f7763504a net/ice: fix scalar Tx path segment + da678c3542 net/ice: fix scalar Rx path segment + 43acbdad22 net/i40e: fix pctype configuration for X722 + 12164b4b18 net/iavf: fix VLAN offload + 59f67d1041 net/ice/base: fix duplicate flow rules + 3e76df459a net/mlx5: fix drop action validation + 4f1a129761 net/mlx5: fix port initialization with small LRO + 815d59452a net/mlx5: fix indexed pool local cache crash + 4cce54a549 net/mlx5: fix first segment inline length + b4524bef61 net/mlx5: fix hairpin split with set VLAN VID action + 6f9e7bd908 net/mlx5: fix shared Rx queue config reuse + bbddde24b2 common/mlx5: fix shared mempool subscription + 97b4706f1e net/mlx5: fix action flag data type + 4468cce76b bus/auxiliary: prevent device from being probed again + e2413f6427 test/hash: fix bulk lookup check + 1e81619d17 test/hash: remove dead code in extendable bucket test + 84848bab5c hash: fix RCU configuration memory leak + 5a4f6d16e8 power: fix some doxygen comments + 3ff124101d eal: fix doxygen comments for UUID + 89f4c06b0f net/bonding: fix mbuf fast free handling + a0521c13c1 app/testpmd: make quit flag volatile + 194dea76b1 net/bonding: fix dropping valid MAC packets + 185674ed26 app/testpmd: fix MAC header in checksum forward engine + 8bdb61a66f net/bonding: fix slave device Rx/Tx offload configuration + ffdfd9c1f2 baseband/acc100: fix double MSI intr in TB mode + 43f2133add baseband/acc100: fix ring/queue allocation + 61623044a0 baseband/acc100: fix null HARQ input case + 1c83304c34 baseband/acc100: enforce additional check on FCW + 478d7fa578 baseband/acc100: fix ring availability calculation + fd2001af83 baseband/acc100: check AQ availability + 9e244f70bc baseband/acc100: add LDPC encoder padding function + 49be94c1a2 examples/vhost: fix use after free + 4733d1363d net/bonding: fix flow flush order on close + dec305584e net/mlx5: fix build with recent compilers + 673e72e4dd crypto/qat: fix null hash algorithm digest size + 0c19d3c807 examples/ipsec-secgw: fix Tx checksum offload flag + 91f397092e test/crypto: fix PDCP vectors + 1d95544ee5 baseband/acc100: fix close cleanup + 29ee0dd15a baseband/acc100: fix device minimum alignment + bc46d5e411 baseband/acc100: fix clearing PF IR outside handler + c0e09362ca baseband/acc100: fix input length for CRC24B + e20054a0ca baseband/acc100: add null checks + f0f952b3a4 baseband/acc100: check turbo dec/enc input + a52ef88f24 baseband/acc100: fix memory leak + dfe0ec3699 test/crypto: fix wireless auth digest segment + 230df8806a examples/l2fwd-crypto: fix typo in error message + b4d5a4880f net/mlx5: fix thread termination check on Windows + 5f6b7cdb26 common/cnxk: fix schedule weight update + adc3d8f0be net/cnxk: fix later skip to include mbuf private data + 9e011e6018 net/ice: support VXLAN-GPE tunnel offload + 80b45cf627 net/iavf: fix queue stop for large VF + 7a87e29707 net/iavf: fix SPI check + d8f6f9c217 net/iavf: fix IPsec flow create error check + 8d69eff7e5 net/ice: fix null function pointer call + a7ae3ec58b net/ixgbe: fix unexpected VLAN Rx in promisc mode on VF + 7fc86024c5 net/ixgbe: fix broadcast Rx on VF after promisc removal + c422bc8590 examples/qos_sched: fix number of subport profiles + 71f3070f3b sched: fix subport profile configuration + 8c0059933d net/mlx5: fix null check in devargs parsing + 3a37707b3f net/mlx5: fix tunnel header with IPIP offload + 5dd7f99419 net/mlx5: fix RSS expansion buffer size + 695bdd0252 net/mlx5: fix thread workspace memory leak + ce31e6798d vhost: add non-blocking API for posting interrupt + 8c4cb627f2 net/nfp: fix internal buffer size and MTU check + a2e076069c app/testpmd: remove jumbo offload + 1df0906a71 net/ionic: fix reported error stats + 1865648551 net/ionic: fix Rx filter save + b91f3a9676 net/ionic: fix adapter name for logging + 9c5b13e3d6 net/ionic: fix endianness for RSS + 2e91cfe2d1 net/ionic: fix endianness for Rx and Tx + 69d5f8cbdb net/bonding: fix descriptor limit reporting + 765b6db1e1 app/testpmd: skip port reset in secondary process + 74a6d9b865 net/iavf: revert VLAN insertion fix + 9f735dd4ae doc: fix support table for Ethernet/VLAN flow items + 5123b23465 gro: check payload length after trim + ff54781530 eventdev/eth_tx: fix queue delete + 11b702acfa eventdev/crypto: fix multi-process + 050edd6df0 event/sw: fix log in self test + 14a1303083 event/sw: fix flow ID init in self test + 24687124e2 net/dpaa2: fix buffer freeing on SG Tx + e0c63efff0 net/dpaa2: use internal mempool for SG table + a71b12bced net/dpaa: fix buffer freeing on SG Tx + a7bcea2f9c net/dpaa: use internal mempool for SG table + 161eb5bcfc event/dlb2: handle enqueuing more than maximum depth + 5da67ffc34 service: fix early move to inactive status + 5c3b10f936 doc: fix reference to dma application example + f0f63a2a3c app/dumpcap: fix pathname for output file + 9fe212c5b1 app/dumpcap: fix crash on cleanup + 3c5fb1c3aa pdump: do not allow enable/disable in primary process + b7926cae59 trace: fix race in debug dump + 8f22713b3f trace: fix dynamically enabling trace points + f6c054fa69 trace: fix leak with regexp + 6f27e2f1a7 trace: fix mode change + f620019ec3 trace: fix mode for new trace point + f2a63213a4 buildtools: fix NUMA nodes count + 3fedf9fcf4 examples/l3fwd: fix MTU configuration with event mode + 0f52767b30 dma/idxd: check DSA device allocation + 19ab7dbdeb node: check Rx element allocation + dd0ea76557 graph: fix node objects allocation + 6976f45b64 common/mlx5: fix multi-process mempool registration + fd3f52c7d7 mempool: make event callbacks process-private + b252ecfe35 pcapng: fix write more packets than IOV_MAX limit + 226d2f8fd8 doc: fix dumpcap interface parameter option + 7e0fbf6e4b eal: fix data race in multi-process support + f9544dfb1e net/bonding: fix Tx hash for TCP + ef528a1c38 gro: trim tail padding bytes + f4696c801d net/dpaa: fix buffer freeing in slow path + 7ac138d29b net/dpaa: fix jumbo packet Rx in case of VSP + 82b3b682b2 net/dpaa2: fix DPDMUX error behaviour + c1098f3797 net/enetfec: fix buffer leak + 5747e5d217 net/enetfec: fix restart + 8582b5b558 net/nfp: fix memory leak in Rx + dd0c2c45fe net/hns3: fix minimum Tx frame length + d08a9e5bf5 net/hns3: fix VF mailbox message handling + dea739ceb8 net/hns3: revert fix mailbox communication with HW + cb4be0a694 net/hns3: add L3 and L4 RSS types + b3596aa547 net/hns3: fix IPv4 RSS + 6b31e0bea1 net/hns3: fix typos in IPv6 SCTP fields + 4a628c7185 net/hns3: fix IPv4 and IPv6 RSS + 0ee1261c33 net/hns3: fix packet type for GENEVE + 61bd1be0ee net/hns3: move flow direction rule recovery + d86c6f3dbb net/hns3: fix RSS flow rule restore + ceefe8e42c net/hns3: fix RSS filter restore + 69155ec3fb net/hns3: revert Tx performance optimization + 3990392648 net/hns3: fix clearing hardware MAC statistics + d9d6252870 net/hns3: delete unused markup + afa520b633 net/hns3: fix crash when secondary process access FW + daf6edc317 net/hns3: fix next-to-use overflow in simple Tx + 540c8608d6 net/hns3: fix next-to-use overflow in SVE Tx + 2197709643 net/hns3: fix crash in SVE Tx + 5210f643fe net/hns3: fix Rx with PTP + 84cc72849a common/sfc_efx/base: remove VQ index check during VQ start + ee7decc2c9 net/memif: fix crash with different number of Rx/Tx queues + 37560c8deb net/tap: fix overflow of network interface index + 4d4b866a15 net/qede/base: fix 32-bit build with GCC 12 + 9ba87edbe6 net/i40e: fix build with MinGW GCC 12 + 54a98eecdb vhost: fix build with GCC 12 + 84e55d681a timer: fix stopping all timers + e7ad87f034 examples/vm_power_manager: use safe list iterator + e3b8a0894a mem: fix API doc about allocation on secondary processes + 1b93855eb5 net/bnxt: fix build with GCC 13 + b673b37b9a net/bnxt: fix representor info freeing + 50e07634fe net/bnxt: remove unnecessary check + a12a1fcc0c net/bnxt: fix error code during MTU change + d477471727 net/bnxt: fix null pointer dereference in LED config + 6638450e4b cryptodev: fix unduly newlines in logs + 86727609c5 event/cnxk: fix missing xstats operations + 67f3304980 common/sfc_efx/base: fix maximum Tx data count + fb442d0c0f net/axgbe: save segment data in scattered Rx + 9683271240 net/axgbe: clear buffer on scattered Rx chaining failure + 18895ee575 net/axgbe: reset end of packet in scattered Rx + 1747e299c2 net/cnxk: fix DF bit in vector mode + d141aba6a0 net/ice: check illegal packet sizes + 60f243aee8 net/iavf: check illegal packet sizes + 6a68e9ad38 net/mlx5: fix meter profile delete after disable + 9412d614c2 net/mlx5: fix modify action with tunnel decapsulation + ccdfdbafa4 net/mlx5: fix Tx check for hardware descriptor length + 6986b5af9c net/mlx5: fix inline length exceeding descriptor limit + 517c25053b net/mlx5: fix single not inline packet storing + b5e5d926b2 net/mlx5: fix check for orphan wait descriptor + 879ebb7dce net/mlx5: fix Verbs FD leak in secondary process + 43ebf47ac3 net/mlx4: fix Verbs FD leak in secondary process + 21c7ed77f1 net/virtio: fix crash when configured twice + 5150a5b753 net/iavf: fix outer checksum flags + 5f0b3890df net/iavf: fix processing VLAN TCI in SSE path + bf097fbecf net/ice/base: fix input set of GTPoGRE + 65e2ff5b1e net/ice/base: ignore promiscuous already exist + da3be8cd7e net/ice/base: fix double VLAN in promiscuous mode + b1f7933ce6 net/ice/base: fix add MAC rule + f1ac3e7090 net/ice/base: fix bit finding range over ptype bitmap + e075b36c5f net/ice/base: fix array overflow in add switch recipe + 26b64ce3a2 net/ice/base: fix endian format + 13824e6da2 net/ice/base: fix function descriptions for parser + d2c68e7c09 net/ice/base: fix media type of PHY 10G SFI C2C + 0255526a1b net/ice/base: fix DSCP PFC TLV creation + fdbf7b4da3 net/ice/base: fix 100M speed capability + 14ed431e08 net/ice/base: fix division during E822 PTP init + 85de6911de common/iavf: avoid copy in async mode + fc56f980c8 net/iavf: update IPsec ESN values when updating session + 30fc18b4dc net/iavf: fix Tx done descriptors cleanup + 246026e0b8 net/iavf: fix pattern check for flow director parser + fe9ea36901 net/iavf: fix VLAN insertion + 301e4cc87b net/iavf: fix L3 checksum Tx offload flag + a50e008983 net/ice/base: fix inner symmetric RSS hash in raw flow + a991f641f3 net/ice: fix RSS hash update + 85821d6d95 net/i40e: fix VF representor release + 278054776c eventdev: fix name of Rx conf type in documentation + afa77bc4df cryptodev: fix missing SHA3 algorithm strings + 29b6bd6014 crypto/qat: fix build with GCC 12 + db9f8c23a4 ipsec: fix build with GCC 12 + fdebff6b5a test/ipsec: fix build with GCC 12 + 656b357968 common/qat: fix VF to PF answer + 1af7871cdd test/crypto: fix debug messages + 783a1099f2 examples/ipsec-secgw: use Tx checksum offload conditionally + 6a5ed8de57 test/ipsec: skip if no compatible device + a003adfe32 eventdev/eth_tx: fix adapter stop + 24f4f4450f eventdev/eth_tx: add spinlock for adapter start/stop + 0fc2e4cd21 event/sw: fix device name in dump + 6dc1d114cd event/dsw: fix flow migration + 5f22fcf619 malloc: fix storage size for some allocations + 8b3ce30416 common/cnxk: fix printing disabled MKEX registers + 97083f144c common/cnxk: fix missing flow counter reset + 6d030554f1 net/mvneta: fix build with GCC 12 + a2439f58c0 common/cnxk: fix log level during MCAM allocation + 82fa3908c0 net/ngbe: fix maximum frame size + 60df2778b9 net/ngbe: remove semaphore between SW/FW + 9f6bab86f3 net/ngbe: rename some extended statistics + 8bdb992cea net/txgbe: rename some extended statistics + 982225e1bb net/txgbe: remove semaphore between SW/FW + d47522ba5c net/txgbe: fix IPv6 flow rule + f1810ceebb net/nfp: improve HW info header log readability + 88eb5f06dd net/axgbe: remove freeing buffer in scattered Rx + c57a09f16a net/axgbe: optimise scattered Rx + 7c52801e6c net/axgbe: fix checksum and RSS in scattered Rx + edf988aa48 net/axgbe: fix length of each segment in scattered Rx + fca744c60f net/axgbe: fix mbuf lengths in scattered Rx + 77c3e4194b net/axgbe: fix scattered Rx + 45ae1b821d net/nfp: compose firmware file name with new hwinfo + f88bb5a08a net/failsafe: fix interrupt handle leak + e9c74a0c19 net/bonding: fix double slave link status query + 371746d80f net/bonding: fix array overflow in Rx burst + 40e0b6e827 app/testpmd: restore ixgbe bypass commands + 48240d704e eal: fix side effect in some pointer arithmetic macros + ac8fea2279 net: accept unaligned data in checksum routines + 155a98e864 build: enable developer mode for all working trees + 8010a15523 vhost: fix virtqueue use after free on NUMA reallocation + + +* Thu Nov 30 2023 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-139 +- Merging upstream branch-2.17 [RH git: 8eba94245a] + Commit list: + 74633888d2 dpdk: Use DPDK 21.11.5 release for OVS 2.17. + + +* Wed Nov 29 2023 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-138 +- Merging upstream branch-2.17 [RH git: 87d5d3d5a3] + Commit list: + b5e54aa161 ovs-ofctl: Correctly mark the CT flush commands. + + * Fri Nov 24 2023 Open vSwitch CI <ovs-ci@redhat.com> - 2.17.0-137 - Merging upstream branch-2.17 [RH git: b3a98e2a19] Commit list: