diff --git a/SOURCES/openvswitch-3.1.0.patch b/SOURCES/openvswitch-3.1.0.patch index 04effe5..cba09ad 100644 --- a/SOURCES/openvswitch-3.1.0.patch +++ b/SOURCES/openvswitch-3.1.0.patch @@ -18969,7 +18969,7 @@ index e5d99714de..61ec4801ed 100644 struct vteprec_physical_port *port_cfg = ps_cfg->ports[j]; struct vtep_ctl_port *port; diff --git a/dpdk/.github/workflows/build.yml b/dpdk/.github/workflows/build.yml -index 82d83f4030..8bd8f57513 100644 +index 82d83f4030..c08f6ae827 100644 --- a/dpdk/.github/workflows/build.yml +++ b/dpdk/.github/workflows/build.yml @@ -25,7 +25,8 @@ jobs: @@ -18990,7 +18990,13 @@ index 82d83f4030..8bd8f57513 100644 cross: aarch64 - os: ubuntu-20.04 compiler: gcc -@@ -67,7 +69,7 @@ jobs: +@@ -62,15 +64,15 @@ jobs: + + steps: + - name: Checkout sources +- uses: actions/checkout@v3 ++ uses: actions/checkout@v4 + - name: Generate cache keys id: get_ref_keys run: | echo 'ccache=ccache-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-'$(date -u +%Y-w%W) >> $GITHUB_OUTPUT @@ -18998,7 +19004,36 @@ index 82d83f4030..8bd8f57513 100644 + echo 'libabigail=libabigail-${{ env.LIBABIGAIL_VERSION }}-${{ matrix.config.os }}' >> $GITHUB_OUTPUT echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT - name: Retrieve ccache cache - uses: actions/cache@v3 +- uses: actions/cache@v3 ++ uses: actions/cache@v4 + with: + path: ~/.ccache + key: ${{ steps.get_ref_keys.outputs.ccache }}-${{ github.ref }} +@@ -78,13 +80,13 @@ jobs: + ${{ steps.get_ref_keys.outputs.ccache }}-refs/heads/main + - name: Retrieve libabigail cache + id: libabigail-cache +- uses: actions/cache@v3 ++ uses: actions/cache@v4 + if: env.ABI_CHECKS == 'true' + with: + path: libabigail + key: ${{ steps.get_ref_keys.outputs.libabigail }} + - name: Retrieve ABI reference cache +- uses: actions/cache@v3 ++ uses: actions/cache@v4 + if: env.ABI_CHECKS == 'true' + with: + path: reference +@@ -131,7 +133,7 @@ jobs: + run: .ci/linux-build.sh + - name: Upload logs on failure + if: failure() +- uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 + with: + name: meson-logs-${{ join(matrix.config.*, '-') }} + path: | @@ -143,6 +145,8 @@ jobs: prepare-container-images: name: ${{ join(matrix.config.*, '-') }} @@ -19008,7 +19043,22 @@ index 82d83f4030..8bd8f57513 100644 strategy: fail-fast: false -@@ -208,14 +212,13 @@ jobs: +@@ -157,7 +161,7 @@ jobs: + echo 'image=image-${{ matrix.config.image }}-'$(date -u +%Y-%m-%d) >> $GITHUB_OUTPUT + - name: Retrieve image cache + id: image_cache +- uses: actions/cache@v3 ++ uses: actions/cache@v4 + with: + path: ~/.image + key: ${{ steps.get_keys.outputs.image }} +@@ -203,26 +207,25 @@ jobs: + + steps: + - name: Checkout sources +- uses: actions/checkout@v3 ++ uses: actions/checkout@v4 + - name: Generate various keys id: get_keys run: | echo 'ccache=ccache-${{ matrix.config.image }}-${{ matrix.config.compiler }}-'$(date -u +%Y-w%W) >> $GITHUB_OUTPUT @@ -19016,7 +19066,8 @@ index 82d83f4030..8bd8f57513 100644 echo 'logs=meson-logs-${{ join(matrix.config.*, '-') }}' | tr -d ':' >> $GITHUB_OUTPUT - name: Retrieve image cache id: image_cache - uses: actions/cache@v3 +- uses: actions/cache@v3 ++ uses: actions/cache@v4 with: path: ~/.image - key: ${{ steps.get_keys.outputs.image }} @@ -19024,19 +19075,58 @@ index 82d83f4030..8bd8f57513 100644 - name: Fail if no image (not supposed to happen) if: steps.image_cache.outputs.cache-hit != 'true' run: | + echo 'Image ${{ matrix.config.image }} is not cached.' + false + - name: Retrieve ccache cache +- uses: actions/cache@v3 ++ uses: actions/cache@v4 + with: + path: ~/.ccache + key: ${{ steps.get_keys.outputs.ccache }}-${{ github.ref }} +@@ -259,7 +262,7 @@ jobs: + run: docker kill dpdk + - name: Upload logs on failure + if: failure() +- uses: actions/upload-artifact@v3 ++ uses: actions/upload-artifact@v4 + with: + name: ${{ steps.get_keys.outputs.logs }} + path: | diff --git a/dpdk/.mailmap b/dpdk/.mailmap -index 75884b6fe2..a97dce5fcf 100644 +index 75884b6fe2..b0d505e36d 100644 --- a/dpdk/.mailmap +++ b/dpdk/.mailmap -@@ -38,6 +38,7 @@ Aleksandr Loktionov +@@ -2,7 +2,7 @@ Aakash Sasidharan + Aaro Koskinen + Aaron Campbell + Aaron Conole +-Abdullah Ömer Yamaç ++Abdullah Ömer Yamaç + Abdullah Sevincer + Abed Kamaluddin + Abhijit Sinha +@@ -27,9 +27,11 @@ Ajit Khaparde + Akash Saxena + Akeem G Abodunrin + Akhil Goyal ++Akshay Dorwat + Alain Leon + Alan Carew + Alan Dewar ++Alan Elder + Alan Liu + Alan Winkowski + Alejandro Lucero +@@ -38,6 +40,8 @@ Aleksandr Loktionov Aleksandr Miloshenko Aleksey Baulin Aleksey Katargin +Ales Musil ++Alessio Igor Bogani Alexander Bechikov Alexander Belyakov Alexander Chernavin -@@ -64,6 +65,7 @@ Ali Volkan Atli +@@ -64,6 +68,7 @@ Ali Volkan Atli Allain Legacy Allen Hubbe Alok Makhariya @@ -19044,7 +19134,7 @@ index 75884b6fe2..a97dce5fcf 100644 Alvin Zhang Aman Singh Amaranath Somalapuram -@@ -102,7 +104,7 @@ Andriy Berestovskyy +@@ -102,7 +107,7 @@ Andriy Berestovskyy Andrzej Ostruszka Andy Gospodarek Andy Green @@ -19053,7 +19143,7 @@ index 75884b6fe2..a97dce5fcf 100644 Andy Pei Anirudh Venkataramanan Ankur Dwivedi -@@ -119,6 +121,7 @@ Arkadiusz Kubalewski +@@ -119,6 +124,7 @@ Arkadiusz Kubalewski Arkadiusz Kusztal Arnon Warshavsky Arshdeep Kaur @@ -19061,7 +19151,7 @@ index 75884b6fe2..a97dce5fcf 100644 Artem V. Andreev Artur Rojek Artur Trybula -@@ -143,6 +146,7 @@ Balazs Nemeth +@@ -143,6 +149,7 @@ Balazs Nemeth Bao-Long Tran Barak Enat Barry Cao @@ -19069,7 +19159,7 @@ index 75884b6fe2..a97dce5fcf 100644 Baruch Siach Bassam Zaid AlKilani Beilei Xing -@@ -166,7 +170,9 @@ Bin Huang +@@ -166,7 +173,9 @@ Bin Huang Bin Zheng Björn Töpel Bo Chen @@ -19079,7 +19169,7 @@ index 75884b6fe2..a97dce5fcf 100644 Boris Pismenny Brandon Lo Brendan Ryan -@@ -195,6 +201,7 @@ Chaoyong He +@@ -195,6 +204,7 @@ Chaoyong He Chao Zhu Charles Brett Charles Myers @@ -19087,7 +19177,15 @@ index 75884b6fe2..a97dce5fcf 100644 Chas Williams <3chas3@gmail.com> Chenbo Xia Chengchang Tang -@@ -295,6 +302,8 @@ Deepak Khandelwal +@@ -206,6 +216,7 @@ Cheng Liu + Cheng Peng + Chengwen Feng + Chenmin Sun ++Chenming Chang + Chenxu Di + Cheryl Houser + Chinh T Cao +@@ -295,6 +306,8 @@ Deepak Khandelwal Deepak Kumar Jain Deirdre O'Connor Dekel Peled @@ -19096,7 +19194,7 @@ index 75884b6fe2..a97dce5fcf 100644 Dennis Marinus Derek Chickles Des O Dea -@@ -338,6 +347,7 @@ Dzmitry Sautsa +@@ -338,12 +351,14 @@ Dzmitry Sautsa Ed Czeck Eduard Serra Edward Makarov @@ -19104,7 +19202,14 @@ index 75884b6fe2..a97dce5fcf 100644 Eelco Chaudron Elad Nachman Elad Persiko -@@ -371,6 +381,8 @@ Farah Smith + Elena Agostini + Eli Britstein + Elza Mathew ++Emi Aoki + Emma Finn + Emma Kenny + Emmanuel Roullit +@@ -371,13 +386,17 @@ Farah Smith Fei Chen Feifei Wang Fei Qin @@ -19113,7 +19218,24 @@ index 75884b6fe2..a97dce5fcf 100644 Fengtian Guo Ferdinand Thiessen Ferruh Yigit -@@ -474,6 +486,7 @@ Helin Zhang + Fidaullah Noonari ++Fidel Castro + Fiona Trahe + Flavia Musatescu + Flavio Leitner ++Flore Norceide + Forrest Shi + Francesco Santoro + Francis Kelly +@@ -455,6 +474,7 @@ Hanoch Haim + Hanumanth Pothula + Hao Chen + Hao Wu ++Haoqian He + Hari Kumar Vemula + Harini Ramakrishnan + Hariprasad Govindharajan +@@ -474,6 +494,7 @@ Helin Zhang Hemant Agrawal Heng Ding Hengjian Zhang @@ -19121,7 +19243,15 @@ index 75884b6fe2..a97dce5fcf 100644 Heng Wang Henning Schild Henry Cai -@@ -524,6 +537,7 @@ Ilya Maximets +@@ -488,6 +509,7 @@ Hideyuki Yamashita + Hiroki Shirokura + Hiroshi Shimamoto + Hiroyuki Mikita ++Holly Nichols + Hongbo Zheng + Hongjun Ni + Hongzhi Guo +@@ -524,6 +546,7 @@ Ilya Maximets Ilya V. Matveychikov Ilyes Ben Hamouda Intiyaz Basha @@ -19129,7 +19259,7 @@ index 75884b6fe2..a97dce5fcf 100644 Itsuro Oda Ivan Boule Ivan Dyukov -@@ -601,6 +615,7 @@ Jie Liu +@@ -601,6 +624,7 @@ Jie Liu Jie Pan Jie Wang Jie Zhou @@ -19137,7 +19267,17 @@ index 75884b6fe2..a97dce5fcf 100644 Jijiang Liu Jilei Chen Jim Harris -@@ -634,9 +649,11 @@ John McNamara +@@ -615,7 +639,8 @@ Jin Liu + Jin Yu + Jiri Slaby + Job Abraham +-Jochen Behrens ++Jochen Behrens ++Joel Kavanagh + Joey Xing + Johan Faltstrom + Johan Källström +@@ -634,9 +659,11 @@ John McNamara John Miller John OLoughlin John Ousterhout @@ -19145,12 +19285,17 @@ index 75884b6fe2..a97dce5fcf 100644 John W. Linville Jonas Pfefferle -Jonathan Erb -+Jonathan Erb ++Jonathan Erb +Jonathan Tsai Jon DeVree Jon Loeliger Joongi Kim -@@ -667,9 +684,12 @@ Jun Yang +@@ -663,13 +690,17 @@ Junjie Wan + Jun Qiu + Jun W Zhou + Junxiao Shi ++Jun Wang + Jun Yang Junyu Jiang Juraj Linkeš Kai Ji @@ -19163,7 +19308,7 @@ index 75884b6fe2..a97dce5fcf 100644 Kamil Bednarczyk Kamil Chalupnik Kamil Rytarowski -@@ -708,7 +728,9 @@ Konstantin Ananyev Krzysztof Galazka Krzysztof Kanas @@ -19173,7 +19318,15 @@ index 75884b6fe2..a97dce5fcf 100644 Kuba Kozak Kumar Amber Kumara Parameshwaran -@@ -747,7 +769,7 @@ Liming Sun +@@ -732,6 +765,7 @@ Leszek Zygo + Levend Sayar + Lev Faerman + Lewei Yang ++Lewis Donzis + Leyi Rong + Liang Ma + Liang-Min Larry Wang +@@ -747,7 +781,7 @@ Liming Sun Linfan Hu Lingli Chen Lingyu Liu @@ -19182,7 +19335,7 @@ index 75884b6fe2..a97dce5fcf 100644 Linsi Yuan Lior Margalit Li Qiang -@@ -784,6 +806,7 @@ Maciej Paczkowski +@@ -784,6 +818,7 @@ Maciej Paczkowski Maciej Rabeda Maciej Szwed Madhuker Mythri @@ -19190,7 +19343,31 @@ index 75884b6fe2..a97dce5fcf 100644 Mahipal Challa Mah Yock Gen Mairtin o Loingsigh -@@ -843,6 +866,7 @@ Mateusz Rusinski +@@ -795,7 +830,6 @@ Mandeep Rohilla + Manish Chopra + Manish Tomar + Mao Jiang +-Mao YingMing + Marcel Apfelbaum + Marcel Cornu + Marcelo Ricardo Leitner +@@ -812,6 +846,7 @@ Marcin Wojtas + Marcin Zapolski + Marco Varlese + Marc Sune ++Marek Mical + Maria Lingemark + Mario Carrillo + Mário Kuka +@@ -835,6 +870,7 @@ Martin Weiser + Martyna Szapar + Maryam Tahhan + Masoud Hasanifard ++Masoumeh Farhadi Nia + Matan Azrad + Matej Vido + Mateusz Kowalski +@@ -843,6 +879,7 @@ Mateusz Rusinski Matias Elo Mats Liljegren Matteo Croce @@ -19198,7 +19375,15 @@ index 75884b6fe2..a97dce5fcf 100644 Matthew Hall Matthew Smith Matthew Vick -@@ -886,6 +910,7 @@ Michal Litwicki +@@ -877,6 +914,7 @@ Michael Santana + Michael Savisko + Michael Shamis + Michael S. Tsirkin ++Michael Theodore Stolarchuk + Michael Wildt + Michal Berger + Michal Jastrzebski +@@ -886,6 +924,7 @@ Michal Litwicki Michal Mazurek Michal Michalik Michał Mirosław @@ -19206,7 +19391,7 @@ index 75884b6fe2..a97dce5fcf 100644 Michal Swiatkowski Michal Wilczynski Michel Machado -@@ -911,6 +936,7 @@ Mitch Williams +@@ -911,6 +950,7 @@ Mitch Williams Mit Matelske Mohamad Noor Alim Hussin Mohammad Abdul Awal @@ -19214,7 +19399,16 @@ index 75884b6fe2..a97dce5fcf 100644 Mohammed Gamal Mohsin Kazmi Mohsin Mazhar Shaikh -@@ -1024,6 +1050,7 @@ Pawel Rutkowski +@@ -947,7 +987,7 @@ Nemanja Marjanovic + Netanel Belgazal + Netanel Gonen + Niall Power +-Nick Connolly ++Nick Connolly + Nick Nunley + Niclas Storm + Nicolas Chautru +@@ -1024,6 +1064,7 @@ Pawel Rutkowski Pawel Wodkowski Pei Chao Pei Zhang @@ -19222,7 +19416,7 @@ index 75884b6fe2..a97dce5fcf 100644 Peng He Peng Huang Peng Sun -@@ -1035,6 +1062,7 @@ Peter Spreadborough +@@ -1035,6 +1076,7 @@ Peter Spreadborough Petr Houska Phanendra Vukkisala Phil Yang @@ -19230,7 +19424,7 @@ index 75884b6fe2..a97dce5fcf 100644 Pierre Pfister Piotr Azarewicz Piotr Bartosiewicz -@@ -1050,6 +1078,7 @@ Prashant Upadhyaya +@@ -1050,12 +1092,14 @@ Prashant Upadhyaya Prateek Agarwal Praveen Shetty Pravin Pathak @@ -19238,7 +19432,22 @@ index 75884b6fe2..a97dce5fcf 100644 Priyanka Jain Przemyslaw Ciesielski Przemyslaw Czesnowicz -@@ -1143,6 +1172,7 @@ Roy Franz + Przemyslaw Patynowski + Przemyslaw Zegan + Pu Xu <583493798@qq.com> ++Qian Hao + Qian Xu + Qiao Liu + Qi Fu +@@ -1070,6 +1114,7 @@ Quentin Armitage + Qun Wan + Radha Mohan Chintakuntla + Radoslaw Biernacki ++Radoslaw Tyl + Radu Bulie + Radu Nicolau + Rafael Ávila de Espíndola +@@ -1143,6 +1188,7 @@ Roy Franz Roy Pledge Roy Shterman Ruifeng Wang @@ -19246,7 +19455,7 @@ index 75884b6fe2..a97dce5fcf 100644 Ryan E Hall Sabyasachi Sengupta Sachin Saxena -@@ -1159,6 +1189,7 @@ Sangjin Han +@@ -1159,6 +1205,7 @@ Sangjin Han Sankar Chokkalingam Santoshkumar Karanappa Rastapur Santosh Shukla @@ -19254,7 +19463,7 @@ index 75884b6fe2..a97dce5fcf 100644 Saori Usami Sarath Somasekharan Sarosh Arif -@@ -1167,6 +1198,7 @@ Satananda Burla +@@ -1167,6 +1214,7 @@ Satananda Burla Satha Rao Satheesh Paul Sathesh Edara @@ -19262,7 +19471,15 @@ index 75884b6fe2..a97dce5fcf 100644 Savinay Dharmappa Scott Branden Scott Daniels -@@ -1210,6 +1242,7 @@ Shiqi Liu <835703180@qq.com> +@@ -1196,6 +1244,7 @@ Shally Verma + Shannon Nelson + Shannon Zhao + Shaopeng He ++Shaowei Sun <1819846787@qq.com> + Sharmila Podury + Sharon Haroni + Shay Agroskin +@@ -1210,6 +1259,7 @@ Shiqi Liu <835703180@qq.com> Shiri Kuzin Shivanshu Shukla Shiweixian @@ -19270,7 +19487,15 @@ index 75884b6fe2..a97dce5fcf 100644 Shlomi Gridish Shougang Wang Shraddha Joshi -@@ -1232,6 +1265,7 @@ Simon Kuenzer +@@ -1220,6 +1270,7 @@ Shuanglin Wang + Shuki Katzenelson + Shun Hao + Shu Shen ++Shuo Li + Shweta Choudaha + Shyam Kumar Shrivastav + Shy Shyman +@@ -1232,6 +1283,7 @@ Simon Kuenzer Siobhan Butler Sirshak Das Sivaprasad Tummala @@ -19278,7 +19503,7 @@ index 75884b6fe2..a97dce5fcf 100644 Siwar Zitouni Slawomir Mrozowicz Slawomir Rosek -@@ -1239,6 +1273,7 @@ Smadar Fuks +@@ -1239,6 +1291,7 @@ Smadar Fuks Solal Pirelli Solganik Alexander Somnath Kotur @@ -19286,15 +19511,34 @@ index 75884b6fe2..a97dce5fcf 100644 Song Zhu Sony Chacko Sotiris Salloumis -@@ -1331,6 +1366,7 @@ Tianli Lai +@@ -1331,8 +1384,10 @@ Tianli Lai Tianyu Li Timmons C. Player Timothy McDaniel +Timothy Miskell Timothy Redaelli Tim Shearer ++Ting-Kai Ku Ting Xu -@@ -1386,6 +1422,7 @@ Vijay Kumar Srivastava + Tiwei Bie + Todd Fujinaka +@@ -1346,6 +1401,7 @@ Tomasz Kulasek + Tomasz Zawadzki + Tom Barbette + Tom Crugnale ++Tom Jones + Tom Millington + Tom Rix + Tone Zhang +@@ -1364,6 +1420,7 @@ Vadim Suraev + Vakul Garg + Vamsi Attunuru + Vanshika Shukla ++Varun Sethi + Vasily Philipov + Veerasenareddy Burru + Venkata Suresh Kumar P +@@ -1386,13 +1443,17 @@ Vijay Kumar Srivastava Vijay Srivastava Vikas Aggarwal Vikas Gupta @@ -19302,15 +19546,21 @@ index 75884b6fe2..a97dce5fcf 100644 Vimal Chungath Vincent Guo Vincent Jardin -@@ -1393,6 +1430,7 @@ Vincent Li + Vincent Li Vincent S. Cojot ++Vinh Tran ++Vipin Padmam Ramesh Vipin Varghese Vipul Ashri +Visa Hankala Vishal Kulkarni Vishwas Danivas Vitaliy Mysak -@@ -1413,8 +1451,9 @@ Waterman Cao +@@ -1410,11 +1471,13 @@ Walter Heymans + Wang Sheng-Hui + Wangyu (Eric) + Waterman Cao ++Wathsala Vithanage Weichun Chen Wei Dai Weifeng Li @@ -19321,7 +19571,15 @@ index 75884b6fe2..a97dce5fcf 100644 Wei Hu (Xavier) WeiJie Zhuang Weiliang Luo -@@ -1562,6 +1601,7 @@ Zhipeng Lu +@@ -1504,6 +1567,7 @@ Yi Lu + Yilun Xu + Yinan Wang + Ying A Wang ++Yingming Mao + Yingya Han + Yinjun Zhang + Yipeng Wang +@@ -1562,6 +1626,7 @@ Zhipeng Lu Zhirun Yan Zhiwei He Zhiyong Yang @@ -19357,14 +19615,14 @@ index 22ef2ea4b9..1338ca00ba 100644 F: drivers/net/mana/ F: doc/guides/nics/mana.rst diff --git a/dpdk/VERSION b/dpdk/VERSION -index 7378dd9f9e..1ffcbab134 100644 +index 7378dd9f9e..4e2024f345 100644 --- a/dpdk/VERSION +++ b/dpdk/VERSION @@ -1 +1 @@ -22.11.1 -+22.11.4 ++22.11.6 diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c -index 2eb8414efa..0c8e647598 100644 +index 2eb8414efa..81c9d7d2f1 100644 --- a/dpdk/app/dumpcap/main.c +++ b/dpdk/app/dumpcap/main.c @@ -44,7 +44,6 @@ @@ -19383,7 +19641,19 @@ index 2eb8414efa..0c8e647598 100644 rte_strscpy(intf->name, name, sizeof(intf->name)); printf("Capturing on '%s'\n", name); -@@ -554,6 +554,7 @@ static void dpdk_init(void) +@@ -546,6 +546,11 @@ static void dpdk_init(void) + eal_argv[i++] = strdup(file_prefix); + } + ++ for (i = 0; i < (unsigned int)eal_argc; i++) { ++ if (eal_argv[i] == NULL) ++ rte_panic("No memory\n"); ++ } ++ + if (rte_eal_init(eal_argc, eal_argv) < 0) + rte_exit(EXIT_FAILURE, "EAL init failed: is primary process running?\n"); + } +@@ -554,6 +559,7 @@ static void dpdk_init(void) static struct rte_ring *create_ring(void) { struct rte_ring *ring; @@ -19391,7 +19661,7 @@ index 2eb8414efa..0c8e647598 100644 size_t size, log2; /* Find next power of 2 >= size. */ -@@ -567,31 +568,31 @@ static struct rte_ring *create_ring(void) +@@ -567,31 +573,31 @@ static struct rte_ring *create_ring(void) ring_size = size; } @@ -19436,6 +19706,105 @@ index 2eb8414efa..0c8e647598 100644 if (mp == NULL) rte_exit(EXIT_FAILURE, "Mempool (%s) creation failed: %s\n", pool_name, +@@ -799,6 +805,11 @@ int main(int argc, char **argv) + { + struct rte_ring *r; + struct rte_mempool *mp; ++ struct sigaction action = { ++ .sa_flags = SA_RESTART, ++ .sa_handler = signal_handler, ++ }; ++ struct sigaction origaction; + dumpcap_out_t out; + char *p; + +@@ -826,6 +837,14 @@ int main(int argc, char **argv) + if (TAILQ_EMPTY(&interfaces)) + set_default_interface(); + ++ sigemptyset(&action.sa_mask); ++ sigaction(SIGTERM, &action, NULL); ++ sigaction(SIGINT, &action, NULL); ++ sigaction(SIGPIPE, &action, NULL); ++ sigaction(SIGHUP, NULL, &origaction); ++ if (origaction.sa_handler == SIG_DFL) ++ sigaction(SIGHUP, &action, NULL); ++ + r = create_ring(); + mp = create_mempool(); + out = create_output(); +diff --git a/dpdk/app/meson.build b/dpdk/app/meson.build +index e32ea4bd5c..81c94e2b79 100644 +--- a/dpdk/app/meson.build ++++ b/dpdk/app/meson.build +@@ -83,7 +83,7 @@ foreach app:apps + if not build + if reason != '' + dpdk_apps_disabled += app +- set_variable(app.underscorify() + '_disable_reason', reason) ++ set_variable('app_' + app.underscorify() + '_disable_reason', reason) + endif + continue + endif +diff --git a/dpdk/app/pdump/main.c b/dpdk/app/pdump/main.c +index c6cf9d9c87..6216d5454c 100644 +--- a/dpdk/app/pdump/main.c ++++ b/dpdk/app/pdump/main.c +@@ -171,6 +171,9 @@ parse_device_id(const char *key __rte_unused, const char *value, + struct pdump_tuples *pt = extra_args; + + pt->device_id = strdup(value); ++ if (pt->device_id == NULL) ++ return -1; ++ + pt->dump_by_type = DEVICE_ID; + + return 0; +@@ -568,13 +571,9 @@ disable_primary_monitor(void) + } + + static void +-signal_handler(int sig_num) ++signal_handler(int sig_num __rte_unused) + { +- if (sig_num == SIGINT) { +- printf("\n\nSignal %d received, preparing to exit...\n", +- sig_num); +- quit_signal = 1; +- } ++ quit_signal = 1; + } + + static inline int +@@ -971,6 +970,11 @@ enable_primary_monitor(void) + int + main(int argc, char **argv) + { ++ struct sigaction action = { ++ .sa_flags = SA_RESTART, ++ .sa_handler = signal_handler, ++ }; ++ struct sigaction origaction; + int diag; + int ret; + int i; +@@ -979,8 +983,14 @@ main(int argc, char **argv) + char mp_flag[] = "--proc-type=secondary"; + char *argp[argc + 2]; + +- /* catch ctrl-c so we can print on exit */ +- signal(SIGINT, signal_handler); ++ /* catch ctrl-c so we can cleanup on exit */ ++ sigemptyset(&action.sa_mask); ++ sigaction(SIGTERM, &action, NULL); ++ sigaction(SIGINT, &action, NULL); ++ sigaction(SIGPIPE, &action, NULL); ++ sigaction(SIGHUP, NULL, &origaction); ++ if (origaction.sa_handler == SIG_DFL) ++ sigaction(SIGHUP, &action, NULL); + + argp[0] = argv[0]; + argp[1] = n_flag; diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c index 53e852a07c..9104f9e6b9 100644 --- a/dpdk/app/proc-info/main.c @@ -19611,7 +19980,7 @@ index 65805977ae..cf224dca5d 100644 TEST_ASSERT_SUCCESS(return_value = rte_bbdev_queue_info_get(dev_id, diff --git a/dpdk/app/test-bbdev/test_bbdev_perf.c b/dpdk/app/test-bbdev/test_bbdev_perf.c -index b285d3f3a7..f77ebc4b47 100644 +index b285d3f3a7..66d14ff502 100644 --- a/dpdk/app/test-bbdev/test_bbdev_perf.c +++ b/dpdk/app/test-bbdev/test_bbdev_perf.c @@ -78,13 +78,12 @@ @@ -19692,6 +20061,160 @@ index b285d3f3a7..f77ebc4b47 100644 "Length of data differ in original (%u) and filled (%u) op", total_data_size, pkt_len); +@@ -3122,15 +3125,6 @@ throughput_intr_lcore_ldpc_dec(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_ldpc_dec_ops( +- tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(num_to_enq != enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -3140,6 +3134,15 @@ throughput_intr_lcore_ldpc_dec(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_ldpc_dec_ops( ++ tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(num_to_enq != enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ +@@ -3215,14 +3218,6 @@ throughput_intr_lcore_dec(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_dec_ops(tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(num_to_enq != enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -3232,6 +3227,14 @@ throughput_intr_lcore_dec(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_dec_ops(tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(num_to_enq != enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ +@@ -3301,14 +3304,6 @@ throughput_intr_lcore_enc(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_enc_ops(tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(enq != num_to_enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -3318,6 +3313,14 @@ throughput_intr_lcore_enc(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_enc_ops(tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(enq != num_to_enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ +@@ -3389,15 +3392,6 @@ throughput_intr_lcore_ldpc_enc(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_ldpc_enc_ops( +- tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(enq != num_to_enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -3407,6 +3401,15 @@ throughput_intr_lcore_ldpc_enc(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_ldpc_enc_ops( ++ tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(enq != num_to_enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ +@@ -3477,14 +3480,6 @@ throughput_intr_lcore_fft(void *arg) + if (unlikely(num_to_process - enqueued < num_to_enq)) + num_to_enq = num_to_process - enqueued; + +- enq = 0; +- do { +- enq += rte_bbdev_enqueue_fft_ops(tp->dev_id, +- queue_id, &ops[enqueued], +- num_to_enq); +- } while (unlikely(enq != num_to_enq)); +- enqueued += enq; +- + /* Write to thread burst_sz current number of enqueued + * descriptors. It ensures that proper number of + * descriptors will be dequeued in callback +@@ -3494,6 +3489,14 @@ throughput_intr_lcore_fft(void *arg) + */ + __atomic_store_n(&tp->burst_sz, num_to_enq, __ATOMIC_RELAXED); + ++ enq = 0; ++ do { ++ enq += rte_bbdev_enqueue_fft_ops(tp->dev_id, ++ queue_id, &ops[enqueued], ++ num_to_enq); ++ } while (unlikely(enq != num_to_enq)); ++ enqueued += enq; ++ + /* Wait until processing of previous batch is + * completed + */ @@ -4933,13 +4936,95 @@ get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id, stats->dequeued_count = q_stats->dequeued_count; stats->enqueue_err_count = q_stats->enqueue_err_count; @@ -20440,10 +20963,27 @@ index 41b8edc2bd..bbb4c7917b 100644 test_data->input_data_sz, 0, rte_socket_id()); diff --git a/dpdk/app/test-crypto-perf/cperf_ops.c b/dpdk/app/test-crypto-perf/cperf_ops.c -index 61a3967697..93b9bfb240 100644 +index 61a3967697..6bbab3289e 100644 --- a/dpdk/app/test-crypto-perf/cperf_ops.c +++ b/dpdk/app/test-crypto-perf/cperf_ops.c -@@ -42,8 +42,7 @@ test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options, +@@ -21,7 +21,6 @@ cperf_set_ops_asym(struct rte_crypto_op **ops, + uint64_t *tsc_start __rte_unused) + { + uint16_t i; +- void *asym_sess = (void *)sess; + + for (i = 0; i < nb_ops; i++) { + struct rte_crypto_asym_op *asym_op = ops[i]->asym; +@@ -31,7 +30,7 @@ cperf_set_ops_asym(struct rte_crypto_op **ops, + asym_op->modex.base.length = options->modex_data->base.len; + asym_op->modex.result.data = options->modex_data->result.data; + asym_op->modex.result.length = options->modex_data->result.len; +- rte_crypto_op_attach_asym_session(ops[i], asym_sess); ++ rte_crypto_op_attach_asym_session(ops[i], sess); + } + } + +@@ -42,8 +41,7 @@ test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options, { struct rte_ipv4_hdr *ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *); @@ -20453,7 +20993,41 @@ index 61a3967697..93b9bfb240 100644 memcpy(ip, test_vector->plaintext.data, sizeof(struct rte_ipv4_hdr)); -@@ -645,8 +644,9 @@ create_ipsec_session(struct rte_mempool *sess_mp, +@@ -65,7 +63,6 @@ cperf_set_ops_security(struct rte_crypto_op **ops, + + for (i = 0; i < nb_ops; i++) { + struct rte_crypto_sym_op *sym_op = ops[i]->sym; +- void *sec_sess = (void *)sess; + uint32_t buf_sz; + + uint32_t *per_pkt_hfn = rte_crypto_op_ctod_offset(ops[i], +@@ -73,7 +70,7 @@ cperf_set_ops_security(struct rte_crypto_op **ops, + *per_pkt_hfn = options->pdcp_ses_hfn_en ? 0 : PDCP_DEFAULT_HFN; + + ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; +- rte_security_attach_session(ops[i], sec_sess); ++ rte_security_attach_session(ops[i], sess); + sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + + src_buf_offset); + +@@ -130,7 +127,6 @@ cperf_set_ops_security_ipsec(struct rte_crypto_op **ops, + uint16_t iv_offset __rte_unused, uint32_t *imix_idx, + uint64_t *tsc_start) + { +- void *sec_sess = sess; + const uint32_t test_buffer_size = options->test_buffer_size; + const uint32_t headroom_sz = options->headroom_sz; + const uint32_t segment_sz = options->segment_sz; +@@ -144,7 +140,7 @@ cperf_set_ops_security_ipsec(struct rte_crypto_op **ops, + struct rte_mbuf *m = sym_op->m_src; + + ops[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; +- rte_security_attach_session(ops[i], sec_sess); ++ rte_security_attach_session(ops[i], sess); + sym_op->m_src = (struct rte_mbuf *)((uint8_t *)ops[i] + + src_buf_offset); + +@@ -645,8 +641,9 @@ create_ipsec_session(struct rte_mempool *sess_mp, const struct cperf_test_vector *test_vector, uint16_t iv_offset) { @@ -20464,7 +21038,7 @@ index 61a3967697..93b9bfb240 100644 if (options->aead_algo != 0) { /* Setup AEAD Parameters */ -@@ -660,10 +660,10 @@ create_ipsec_session(struct rte_mempool *sess_mp, +@@ -660,10 +657,10 @@ create_ipsec_session(struct rte_mempool *sess_mp, xform.aead.iv.length = test_vector->aead_iv.length; xform.aead.digest_length = options->digest_sz; xform.aead.aad_length = options->aead_aad_sz; @@ -20476,7 +21050,7 @@ index 61a3967697..93b9bfb240 100644 xform.cipher.algo = options->cipher_algo; xform.cipher.op = options->cipher_op; xform.cipher.iv.offset = iv_offset; -@@ -680,7 +680,6 @@ create_ipsec_session(struct rte_mempool *sess_mp, +@@ -680,7 +677,6 @@ create_ipsec_session(struct rte_mempool *sess_mp, /* Setup Auth Parameters */ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; @@ -20484,7 +21058,7 @@ index 61a3967697..93b9bfb240 100644 auth_xform.auth.algo = options->auth_algo; auth_xform.auth.op = options->auth_op; auth_xform.auth.iv.offset = iv_offset + -@@ -699,7 +698,15 @@ create_ipsec_session(struct rte_mempool *sess_mp, +@@ -699,7 +695,15 @@ create_ipsec_session(struct rte_mempool *sess_mp, auth_xform.auth.iv.length = 0; } @@ -20501,7 +21075,7 @@ index 61a3967697..93b9bfb240 100644 } else { return NULL; } -@@ -722,30 +729,26 @@ create_ipsec_session(struct rte_mempool *sess_mp, +@@ -722,30 +726,26 @@ create_ipsec_session(struct rte_mempool *sess_mp, .action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL, .protocol = RTE_SECURITY_PROTOCOL_IPSEC, {.ipsec = { @@ -20552,10 +21126,17 @@ index 613d6d31e2..6966e0b286 100644 enum rte_crypto_cipher_algorithm cipher_algo; enum rte_crypto_cipher_operation cipher_op; diff --git a/dpdk/app/test-crypto-perf/cperf_options_parsing.c b/dpdk/app/test-crypto-perf/cperf_options_parsing.c -index bc5e312c81..1f06e15d10 100644 +index bc5e312c81..5c3acdabc7 100644 --- a/dpdk/app/test-crypto-perf/cperf_options_parsing.c +++ b/dpdk/app/test-crypto-perf/cperf_options_parsing.c -@@ -519,6 +519,7 @@ parse_test_file(struct cperf_options *opts, +@@ -516,9 +516,14 @@ parse_test_file(struct cperf_options *opts, + const char *arg) + { + opts->test_file = strdup(arg); ++ if (opts->test_file == NULL) { ++ RTE_LOG(ERR, USER1, "Dup vector file failed!\n"); ++ return -1; ++ } if (access(opts->test_file, F_OK) != -1) return 0; RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n"); @@ -20563,7 +21144,7 @@ index bc5e312c81..1f06e15d10 100644 return -1; } -@@ -1318,6 +1319,21 @@ cperf_options_check(struct cperf_options *options) +@@ -1318,6 +1323,21 @@ cperf_options_check(struct cperf_options *options) if (check_docsis_buffer_length(options) < 0) return -EINVAL; } @@ -20586,10 +21167,64 @@ index bc5e312c81..1f06e15d10 100644 return 0; diff --git a/dpdk/app/test-crypto-perf/cperf_test_common.c b/dpdk/app/test-crypto-perf/cperf_test_common.c -index 27646cd619..932aab16df 100644 +index 27646cd619..6b8ab65731 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_common.c +++ b/dpdk/app/test-crypto-perf/cperf_test_common.c -@@ -197,9 +197,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, +@@ -49,7 +49,6 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, + { + uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf); + uint16_t remaining_segments = segments_nb; +- struct rte_mbuf *next_mbuf; + rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) + + mbuf_offset + mbuf_hdr_size; + +@@ -70,15 +69,15 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, + m->nb_segs = segments_nb; + m->port = 0xff; + rte_mbuf_refcnt_set(m, 1); +- next_mbuf = (struct rte_mbuf *) ((uint8_t *) m + +- mbuf_hdr_size + segment_sz); +- m->next = next_mbuf; +- m = next_mbuf; +- remaining_segments--; + ++ remaining_segments--; ++ if (remaining_segments > 0) { ++ m->next = (struct rte_mbuf *)((uint8_t *) m + mbuf_hdr_size + segment_sz); ++ m = m->next; ++ } else { ++ m->next = NULL; ++ } + } while (remaining_segments > 0); +- +- m->next = NULL; + } + + static void +@@ -150,11 +149,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, + int ret; + + /* Calculate the object size */ +- uint16_t crypto_op_size = sizeof(struct rte_crypto_op) + +- sizeof(struct rte_crypto_sym_op); ++ uint16_t crypto_op_size = sizeof(struct rte_crypto_op); + uint16_t crypto_op_private_size; + + if (options->op_type == CPERF_ASYM_MODEX) { ++ crypto_op_size += sizeof(struct rte_crypto_asym_op); + snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u", + rte_socket_id()); + *pool = rte_crypto_op_pool_create( +@@ -171,6 +170,8 @@ cperf_alloc_common_memory(const struct cperf_options *options, + return 0; + } + ++ crypto_op_size += sizeof(struct rte_crypto_sym_op); ++ + /* + * If doing AES-CCM, IV field needs to be 16 bytes long, + * and AAD field needs to be long enough to have 18 bytes, +@@ -197,9 +198,11 @@ cperf_alloc_common_memory(const struct cperf_options *options, RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size); uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz; uint32_t max_size = options->max_buffer_size + options->digest_sz; @@ -20604,8 +21239,27 @@ index 27646cd619..932aab16df 100644 uint32_t obj_size = crypto_op_total_size_padded + (mbuf_size * segments_nb); +@@ -225,7 +228,8 @@ cperf_alloc_common_memory(const struct cperf_options *options, + (mbuf_size * segments_nb); + params.dst_buf_offset = *dst_buf_offset; + /* Destination buffer will be one segment only */ +- obj_size += max_size + sizeof(struct rte_mbuf); ++ obj_size += max_size + sizeof(struct rte_mbuf) + ++ options->headroom_sz + options->tailroom_sz; + } + + *pool = rte_mempool_create_empty(pool_name, +@@ -267,7 +271,7 @@ cperf_mbuf_set(struct rte_mbuf *mbuf, + const struct cperf_options *options, + const struct cperf_test_vector *test_vector) + { +- uint32_t segment_sz = options->segment_sz; ++ uint32_t segment_sz = options->segment_sz - options->headroom_sz - options->tailroom_sz; + uint8_t *mbuf_data; + uint8_t *test_data; + uint32_t remaining_bytes = options->max_buffer_size; diff --git a/dpdk/app/test-crypto-perf/cperf_test_latency.c b/dpdk/app/test-crypto-perf/cperf_test_latency.c -index 49bf421c01..406e082e4e 100644 +index 49bf421c01..1c5fca0a55 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_latency.c +++ b/dpdk/app/test-crypto-perf/cperf_test_latency.c @@ -43,15 +43,28 @@ struct priv_op_data { @@ -20644,6 +21298,35 @@ index 49bf421c01..406e082e4e 100644 } void * +@@ -109,7 +122,11 @@ store_timestamp(struct rte_crypto_op *op, uint64_t timestamp) + { + struct priv_op_data *priv_data; + +- priv_data = (struct priv_op_data *) (op->sym + 1); ++ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) ++ priv_data = (struct priv_op_data *) (op->sym + 1); ++ else ++ priv_data = (struct priv_op_data *) (op->asym + 1); ++ + priv_data->result->status = op->status; + priv_data->result->tsc_end = timestamp; + } +@@ -237,9 +254,13 @@ cperf_latency_test_runner(void *arg) + ctx->res[tsc_idx].tsc_start = tsc_start; + /* + * Private data structure starts after the end of the +- * rte_crypto_sym_op structure. ++ * rte_crypto_sym_op (or rte_crypto_asym_op) structure. + */ +- priv_data = (struct priv_op_data *) (ops[i]->sym + 1); ++ if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) ++ priv_data = (struct priv_op_data *) (ops[i]->sym + 1); ++ else ++ priv_data = (struct priv_op_data *) (ops[i]->asym + 1); ++ + priv_data->result = (void *)&ctx->res[tsc_idx]; + tsc_idx++; + } diff --git a/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c b/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c index 98e46c3381..737d61d4af 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_vector_parsing.c @@ -20657,7 +21340,7 @@ index 98e46c3381..737d61d4af 100644 rte_free(vector); diff --git a/dpdk/app/test-crypto-perf/cperf_test_verify.c b/dpdk/app/test-crypto-perf/cperf_test_verify.c -index c03e1d5ba5..8042c94e04 100644 +index c03e1d5ba5..2249c46694 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_verify.c +++ b/dpdk/app/test-crypto-perf/cperf_test_verify.c @@ -38,14 +38,27 @@ struct cperf_op_result { @@ -20694,6 +21377,128 @@ index c03e1d5ba5..8042c94e04 100644 } void * +@@ -98,8 +111,10 @@ cperf_verify_op(struct rte_crypto_op *op, + uint32_t len; + uint16_t nb_segs; + uint8_t *data; +- uint32_t cipher_offset, auth_offset; +- uint8_t cipher, auth; ++ uint32_t cipher_offset, auth_offset = 0; ++ bool cipher = false; ++ bool digest_verify = false; ++ bool is_encrypt = false; + int res = 0; + + if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) +@@ -137,57 +152,54 @@ cperf_verify_op(struct rte_crypto_op *op, + + switch (options->op_type) { + case CPERF_CIPHER_ONLY: +- cipher = 1; +- cipher_offset = 0; +- auth = 0; +- auth_offset = 0; +- break; +- case CPERF_CIPHER_THEN_AUTH: +- cipher = 1; ++ cipher = true; + cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ is_encrypt = options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT; + break; + case CPERF_AUTH_ONLY: +- cipher = 0; + cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) { ++ auth_offset = options->test_buffer_size; ++ digest_verify = true; ++ } + break; ++ case CPERF_CIPHER_THEN_AUTH: + case CPERF_AUTH_THEN_CIPHER: +- cipher = 1; ++ cipher = true; + cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { ++ auth_offset = options->test_buffer_size; ++ digest_verify = true; ++ is_encrypt = true; ++ } + break; + case CPERF_AEAD: +- cipher = 1; ++ cipher = true; + cipher_offset = 0; +- auth = 1; +- auth_offset = options->test_buffer_size; ++ if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { ++ auth_offset = options->test_buffer_size; ++ digest_verify = true; ++ is_encrypt = true; ++ } + break; + default: + res = 1; + goto out; + } + +- if (cipher == 1) { +- if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) +- res += memcmp(data + cipher_offset, ++ if (cipher) { ++ if (is_encrypt) ++ res += !!memcmp(data + cipher_offset, + vector->ciphertext.data, + options->test_buffer_size); + else +- res += memcmp(data + cipher_offset, ++ res += !!memcmp(data + cipher_offset, + vector->plaintext.data, + options->test_buffer_size); + } + +- if (auth == 1) { +- if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) +- res += memcmp(data + auth_offset, +- vector->digest.data, +- options->digest_sz); +- } ++ if (digest_verify) ++ res += !!memcmp(data + auth_offset, vector->digest.data, options->digest_sz); + + out: + rte_free(data); +@@ -263,7 +275,6 @@ cperf_verify_test_runner(void *test_ctx) + ops_needed, ctx->sess, ctx->options, + ctx->test_vector, iv_offset, &imix_idx, NULL); + +- + /* Populate the mbuf with the test vector, for verification */ + for (i = 0; i < ops_needed; i++) + cperf_mbuf_set(ops[i]->sym->m_src, +@@ -281,6 +292,17 @@ cperf_verify_test_runner(void *test_ctx) + } + #endif /* CPERF_LINEARIZATION_ENABLE */ + ++ /** ++ * When ops_needed is smaller than ops_enqd, the ++ * unused ops need to be moved to the front for ++ * next round use. ++ */ ++ if (unlikely(ops_enqd > ops_needed)) { ++ size_t nb_b_to_mov = ops_unused * sizeof(struct rte_crypto_op *); ++ ++ memmove(&ops[ops_needed], &ops[ops_enqd], nb_b_to_mov); ++ } ++ + /* Enqueue burst of ops on crypto device */ + ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, + ops, burst_size); diff --git a/dpdk/app/test-crypto-perf/main.c b/dpdk/app/test-crypto-perf/main.c index af5bd0d23b..bc1f0f9659 100644 --- a/dpdk/app/test-crypto-perf/main.c @@ -21195,8 +22000,21 @@ index d939a85d7e..752f783370 100644 + } } } +diff --git a/dpdk/app/test-pmd/bpf_cmd.c b/dpdk/app/test-pmd/bpf_cmd.c +index 46f6b7d6d2..24d34f983e 100644 +--- a/dpdk/app/test-pmd/bpf_cmd.c ++++ b/dpdk/app/test-pmd/bpf_cmd.c +@@ -139,7 +139,7 @@ static cmdline_parse_token_string_t cmd_load_bpf_prm = + cmdline_parse_inst_t cmd_operate_bpf_ld_parse = { + .f = cmd_operate_bpf_ld_parsed, + .data = NULL, +- .help_str = "bpf-load rx|tx ", ++ .help_str = "bpf-load rx|tx ", + .tokens = { + (void *)&cmd_load_bpf_start, + (void *)&cmd_load_bpf_dir, diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c -index b32dc8bfd4..3a451b9fa0 100644 +index b32dc8bfd4..820332df50 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c @@ -468,6 +468,12 @@ static void cmd_help_long_parsed(void *parsed_result, @@ -21212,7 +22030,16 @@ index b32dc8bfd4..3a451b9fa0 100644 "set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n" " Set the MAC address for a VF from the PF.\n\n" -@@ -4887,19 +4893,6 @@ cmd_tso_set_parsed(void *parsed_result, +@@ -3448,6 +3454,8 @@ parse_hdrs_list(const char *str, const char *item_name, unsigned int max_items, + + nb_item = 0; + char *str2 = strdup(str); ++ if (str2 == NULL) ++ return nb_item; + cur = strtok_r(str2, ",", &tmp); + while (cur != NULL) { + parsed_items[nb_item] = get_ptype(cur); +@@ -4887,19 +4895,6 @@ cmd_tso_set_parsed(void *parsed_result, ports[res->port_id].tso_segsz); } cmd_config_queue_tx_offloads(&ports[res->port_id]); @@ -21232,7 +22059,7 @@ index b32dc8bfd4..3a451b9fa0 100644 cmd_reconfig_device_queue(res->port_id, 1, 1); } -@@ -4957,39 +4950,27 @@ struct cmd_tunnel_tso_set_result { +@@ -4957,39 +4952,27 @@ struct cmd_tunnel_tso_set_result { portid_t port_id; }; @@ -21286,7 +22113,7 @@ index b32dc8bfd4..3a451b9fa0 100644 } static void -@@ -4999,6 +4980,13 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, +@@ -4999,6 +4982,13 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, { struct cmd_tunnel_tso_set_result *res = parsed_result; struct rte_eth_dev_info dev_info; @@ -21300,7 +22127,7 @@ index b32dc8bfd4..3a451b9fa0 100644 if (port_id_is_invalid(res->port_id, ENABLED_WARN)) return; -@@ -5010,28 +4998,19 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, +@@ -5010,28 +5000,19 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, if (!strcmp(res->mode, "set")) ports[res->port_id].tunnel_tso_segsz = res->tso_segsz; @@ -21338,7 +22165,7 @@ index b32dc8bfd4..3a451b9fa0 100644 /* Below conditions are needed to make it work: * (1) tunnel TSO is supported by the NIC; -@@ -5044,14 +5023,23 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, +@@ -5044,14 +5025,23 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, * is not necessary for IPv6 tunneled pkts because there's no * checksum in IP header anymore. */ @@ -21367,7 +22194,7 @@ index b32dc8bfd4..3a451b9fa0 100644 } cmd_config_queue_tx_offloads(&ports[res->port_id]); -@@ -12917,32 +12905,25 @@ cmdline_read_from_file(const char *filename) +@@ -12917,32 +12907,25 @@ cmdline_read_from_file(const char *filename) printf("Read CLI commands from %s\n", filename); } @@ -21412,7 +22239,7 @@ index b32dc8bfd4..3a451b9fa0 100644 void diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c -index 88108498e0..6970f90307 100644 +index 88108498e0..698b4d9601 100644 --- a/dpdk/app/test-pmd/cmdline_flow.c +++ b/dpdk/app/test-pmd/cmdline_flow.c @@ -2940,6 +2940,7 @@ static const struct token token_list[] = { @@ -21423,7 +22250,29 @@ index 88108498e0..6970f90307 100644 }, [TABLE_PATTERN_TEMPLATE] = { .name = "pattern_template", -@@ -7737,15 +7738,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token, +@@ -2979,7 +2980,7 @@ static const struct token token_list[] = { + [QUEUE_DESTROY] = { + .name = "destroy", + .help = "destroy a flow rule", +- .next = NEXT(NEXT_ENTRY(QUEUE_DESTROY_ID), ++ .next = NEXT(NEXT_ENTRY(QUEUE_DESTROY_POSTPONE), + NEXT_ENTRY(COMMON_QUEUE_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, queue)), + .call = parse_qo_destroy, +@@ -4771,9 +4772,12 @@ static const struct token token_list[] = { + [ITEM_CONNTRACK] = { + .name = "conntrack", + .help = "conntrack state", ++ .priv = PRIV_ITEM(CONNTRACK, ++ sizeof(struct rte_flow_item_conntrack)), + .next = NEXT(NEXT_ENTRY(ITEM_NEXT), NEXT_ENTRY(COMMON_UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_conntrack, flags)), ++ .call = parse_vc, + }, + [ITEM_PORT_REPRESENTOR] = { + .name = "port_representor", +@@ -7737,15 +7741,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token, l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -21443,7 +22292,7 @@ index 88108498e0..6970f90307 100644 } action_encap_data->conf.size = header - action_encap_data->data; -@@ -7793,11 +7794,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token, +@@ -7793,11 +7797,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token, header = action_decap_data->data; if (l2_decap_conf.select_vlan) eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); @@ -21459,7 +22308,7 @@ index 88108498e0..6970f90307 100644 } action_decap_data->conf.size = header - action_decap_data->data; -@@ -7877,15 +7878,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, +@@ -7877,15 +7881,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -21479,7 +22328,7 @@ index 88108498e0..6970f90307 100644 } if (mplsogre_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -7972,15 +7973,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, +@@ -7972,15 +7976,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -21499,7 +22348,7 @@ index 88108498e0..6970f90307 100644 } if (mplsogre_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -8071,15 +8072,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, +@@ -8071,15 +8075,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -21519,7 +22368,7 @@ index 88108498e0..6970f90307 100644 } if (mplsoudp_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -8168,15 +8169,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, +@@ -8168,15 +8172,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -21539,7 +22388,7 @@ index 88108498e0..6970f90307 100644 } if (mplsoudp_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -8993,6 +8994,11 @@ parse_table(struct context *ctx, const struct token *token, +@@ -8993,6 +8997,11 @@ parse_table(struct context *ctx, const struct token *token, case TABLE_TRANSFER: out->args.table.attr.flow_attr.transfer = 1; return len; @@ -21552,7 +22401,7 @@ index 88108498e0..6970f90307 100644 return -1; } diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c -index acccb6b035..6a9eb4609c 100644 +index acccb6b035..4ff0e72115 100644 --- a/dpdk/app/test-pmd/config.c +++ b/dpdk/app/test-pmd/config.c @@ -1875,6 +1875,7 @@ port_action_handle_update(portid_t port_id, uint32_t id, @@ -21581,7 +22430,17 @@ index acccb6b035..6a9eb4609c 100644 default: update = action; break; -@@ -2924,8 +2936,10 @@ port_queue_action_handle_update(portid_t port_id, +@@ -2688,8 +2700,7 @@ port_queue_flow_create(portid_t port_id, queueid_t queue_id, + flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, + pattern, pattern_idx, actions, actions_idx, job, &error); + if (!flow) { +- uint32_t flow_id = pf->id; +- port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); ++ free(pf); + free(job); + return port_flow_complain(&error); + } +@@ -2924,8 +2935,10 @@ port_queue_action_handle_update(portid_t port_id, case RTE_FLOW_ACTION_TYPE_METER_MARK: rte_memcpy(&mtr_update.meter_mark, action->conf, sizeof(struct rte_flow_action_meter_mark)); @@ -21594,8 +22453,63 @@ index acccb6b035..6a9eb4609c 100644 mtr_update.color_mode_valid = 1; mtr_update.init_color_valid = 1; mtr_update.state_valid = 1; +@@ -4231,9 +4244,9 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, + continue; + printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", + share_group, share_rxq); +- printf(" lcore %hhu Port %hu queue %hu\n", ++ printf(" lcore %u Port %hu queue %hu\n", + src_lc, src_port, src_rxq); +- printf(" lcore %hhu Port %hu queue %hu\n", ++ printf(" lcore %u Port %hu queue %hu\n", + lc_id, fs->rx_port, fs->rx_queue); + printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", + nb_rxq); +@@ -4414,7 +4427,6 @@ rss_fwd_config_setup(void) + queueid_t nb_q; + streamid_t sm_id; + int start; +- int end; + + nb_q = nb_rxq; + if (nb_q > nb_txq) +@@ -4422,7 +4434,7 @@ rss_fwd_config_setup(void) + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + cur_fwd_config.nb_fwd_ports = nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = +- (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); ++ (streamid_t) (nb_q / num_procs * cur_fwd_config.nb_fwd_ports); + + if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) + cur_fwd_config.nb_fwd_lcores = +@@ -4444,7 +4456,6 @@ rss_fwd_config_setup(void) + * the 2~3 queue for secondary process. + */ + start = proc_id * nb_q / num_procs; +- end = start + nb_q / num_procs; + rxp = 0; + rxq = start; + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { +@@ -4463,8 +4474,6 @@ rss_fwd_config_setup(void) + continue; + rxp = 0; + rxq++; +- if (rxq >= end) +- rxq = start; + } + } + +@@ -4609,7 +4618,7 @@ icmp_echo_config_setup(void) + lcoreid_t lc_id; + uint16_t sm_id; + +- if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) ++ if ((lcoreid_t)(nb_txq * nb_fwd_ports) < nb_fwd_lcores) + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) + (nb_txq * nb_fwd_ports); + else diff --git a/dpdk/app/test-pmd/csumonly.c b/dpdk/app/test-pmd/csumonly.c -index 1c24598515..4efb72be77 100644 +index 1c24598515..161bbd4e14 100644 --- a/dpdk/app/test-pmd/csumonly.c +++ b/dpdk/app/test-pmd/csumonly.c @@ -250,7 +250,7 @@ parse_gtp(struct rte_udp_hdr *udp_hdr, @@ -21607,7 +22521,103 @@ index 1c24598515..4efb72be77 100644 } /* Parse a vxlan header */ -@@ -1168,10 +1168,13 @@ tunnel_update: +@@ -568,21 +568,23 @@ static uint64_t + process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, + uint64_t tx_offloads, int tso_enabled, struct rte_mbuf *m) + { +- struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr; +- struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr; + struct rte_udp_hdr *udp_hdr; + uint64_t ol_flags = 0; + + if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) { +- ipv4_hdr->hdr_checksum = 0; + ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4; + +- if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ++ if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) { + ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM; +- else ++ } else { ++ struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr; ++ ++ ipv4_hdr->hdr_checksum = 0; + ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); +- } else ++ } ++ } else { + ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6; ++ } + + if (info->outer_l4_proto != IPPROTO_UDP) + return ol_flags; +@@ -595,13 +597,6 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, + + /* Skip SW outer UDP checksum generation if HW supports it */ + if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) { +- if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) +- udp_hdr->dgram_cksum +- = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +- else +- udp_hdr->dgram_cksum +- = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +- + ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM; + return ol_flags; + } +@@ -867,17 +862,29 @@ pkt_burst_checksum_forward(struct fwd_stream *fs) + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + inc_rx_burst_stats(fs, nb_rx); +- if (unlikely(nb_rx == 0)) +- return; ++ if (unlikely(nb_rx == 0)) { ++#ifndef RTE_LIB_GRO ++ return ; ++#else ++ gro_enable = gro_ports[fs->rx_port].enable; ++ /* ++ * Check if packets need to be flushed in the GRO context ++ * due to a timeout. ++ * ++ * Continue only in GRO heavyweight mode and if there are ++ * packets in the GRO context. ++ */ ++ if (!gro_enable || (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) || ++ (rte_gro_get_pkt_count(current_fwd_lcore()->gro_ctx) == 0)) ++ return ; ++#endif ++ } + + fs->rx_packets += nb_rx; + rx_bad_ip_csum = 0; + rx_bad_l4_csum = 0; + rx_bad_outer_l4_csum = 0; + rx_bad_outer_ip_csum = 0; +-#ifdef RTE_LIB_GRO +- gro_enable = gro_ports[fs->rx_port].enable; +-#endif + + txp = &ports[fs->tx_port]; + tx_offloads = txp->dev_conf.txmode.offloads; +@@ -1105,6 +1112,7 @@ tunnel_update: + } + + #ifdef RTE_LIB_GRO ++ gro_enable = gro_ports[fs->rx_port].enable; + if (unlikely(gro_enable)) { + if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { + nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx, +@@ -1124,6 +1132,8 @@ tunnel_update: + gro_pkts_num); + fs->gro_times = 0; + } ++ if (nb_rx == 0) ++ return; + } + + pkts_ip_csum_recalc(pkts_burst, nb_rx, tx_offloads); +@@ -1168,10 +1178,13 @@ tunnel_update: nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue, tx_pkts_burst, nb_rx); @@ -21622,7 +22632,7 @@ index 1c24598515..4efb72be77 100644 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst, nb_prep); -@@ -1179,12 +1182,12 @@ tunnel_update: +@@ -1179,12 +1192,12 @@ tunnel_update: /* * Retry if necessary */ @@ -21638,7 +22648,7 @@ index 1c24598515..4efb72be77 100644 } } fs->tx_packets += nb_tx; -@@ -1194,11 +1197,11 @@ tunnel_update: +@@ -1194,11 +1207,11 @@ tunnel_update: fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum; inc_tx_burst_stats(fs, nb_tx); @@ -21654,7 +22664,7 @@ index 1c24598515..4efb72be77 100644 get_end_cycles(fs, start_tsc); diff --git a/dpdk/app/test-pmd/ieee1588fwd.c b/dpdk/app/test-pmd/ieee1588fwd.c -index fc4e2d014c..896d5ef26a 100644 +index fc4e2d014c..8d9ce4d6ce 100644 --- a/dpdk/app/test-pmd/ieee1588fwd.c +++ b/dpdk/app/test-pmd/ieee1588fwd.c @@ -184,13 +184,13 @@ ieee1588_packet_fwd(struct fwd_stream *fs) @@ -21672,6 +22682,33 @@ index fc4e2d014c..896d5ef26a 100644 /* * Check the TX timestamp. +@@ -201,14 +201,23 @@ ieee1588_packet_fwd(struct fwd_stream *fs) + static int + port_ieee1588_fwd_begin(portid_t pi) + { +- rte_eth_timesync_enable(pi); +- return 0; ++ int ret; ++ ++ ret = rte_eth_timesync_enable(pi); ++ if (ret) ++ printf("Port %u enable PTP failed, ret = %d\n", pi, ret); ++ ++ return ret; + } + + static void + port_ieee1588_fwd_end(portid_t pi) + { +- rte_eth_timesync_disable(pi); ++ int ret; ++ ++ ret = rte_eth_timesync_disable(pi); ++ if (ret) ++ printf("Port %u disable PTP failed, ret = %d\n", pi, ret); + } + + static void diff --git a/dpdk/app/test-pmd/noisy_vnf.c b/dpdk/app/test-pmd/noisy_vnf.c index c65ec6f06a..abd99a0407 100644 --- a/dpdk/app/test-pmd/noisy_vnf.c @@ -21689,6 +22726,76 @@ index c65ec6f06a..abd99a0407 100644 ncf->prev_time = rte_get_timer_cycles(); } } +diff --git a/dpdk/app/test-pmd/parameters.c b/dpdk/app/test-pmd/parameters.c +index d597c209ba..d6f07df789 100644 +--- a/dpdk/app/test-pmd/parameters.c ++++ b/dpdk/app/test-pmd/parameters.c +@@ -100,10 +100,6 @@ usage(char* progname) + "of peer ports.\n"); + printf(" --eth-peer=X,M:M:M:M:M:M: set the MAC address of the X peer " + "port (0 <= X < %d).\n", RTE_MAX_ETHPORTS); +-#endif +-#ifdef RTE_LIB_LATENCYSTATS +- printf(" --latencystats=N: enable latency and jitter statistics " +- "monitoring on forwarding lcore id N.\n"); + #endif + printf(" --disable-crc-strip: disable CRC stripping by hardware.\n"); + printf(" --enable-scatter: enable scattered Rx.\n"); +@@ -167,8 +163,14 @@ usage(char* progname) + printf(" --disable-device-start: do not automatically start port\n"); + printf(" --no-lsc-interrupt: disable link status change interrupt.\n"); + printf(" --no-rmv-interrupt: disable device removal interrupt.\n"); ++#ifdef RTE_LIB_BITRATESTATS + printf(" --bitrate-stats=N: set the logical core N to perform " + "bit-rate calculation.\n"); ++#endif ++#ifdef RTE_LIB_LATENCYSTATS ++ printf(" --latencystats=N: enable latency and jitter statistics " ++ "monitoring on forwarding lcore id N.\n"); ++#endif + printf(" --print-event : " + "enable print of designated event or all of them.\n"); + printf(" --mask-event : " +@@ -761,7 +763,7 @@ launch_args_parse(int argc, char** argv) + n = strtoul(optarg, &end, 10); + if ((optarg[0] == '\0') || (end == NULL) || + (*end != '\0')) +- break; ++ rte_exit(EXIT_FAILURE, "Invalid stats-period value\n"); + + stats_period = n; + break; +@@ -862,8 +864,8 @@ launch_args_parse(int argc, char** argv) + } + if (!strcmp(lgopts[opt_idx].name, "nb-cores")) { + n = atoi(optarg); +- if (n > 0 && n <= nb_lcores) +- nb_fwd_lcores = (uint8_t) n; ++ if (n > 0 && (lcoreid_t)n <= nb_lcores) ++ nb_fwd_lcores = (lcoreid_t) n; + else + rte_exit(EXIT_FAILURE, + "nb-cores should be > 0 and <= %d\n", +@@ -1113,7 +1115,9 @@ launch_args_parse(int argc, char** argv) + 0, + &dev_info); + if (ret != 0) +- return; ++ rte_exit(EXIT_FAILURE, "Failed to get driver " ++ "recommended burst size, please provide a " ++ "value between 1 and %d\n", MAX_PKT_BURST); + + rec_nb_pkts = dev_info + .default_rxportconf.burst_size; +@@ -1465,7 +1469,7 @@ launch_args_parse(int argc, char** argv) + break; + default: + usage(argv[0]); +- fprintf(stderr, "Invalid option: %s\n", argv[optind]); ++ fprintf(stderr, "Invalid option: %s\n", argv[optind - 1]); + rte_exit(EXIT_FAILURE, + "Command line is incomplete or incorrect\n"); + break; diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c index 134d79a555..340c713c19 100644 --- a/dpdk/app/test-pmd/testpmd.c @@ -22050,7 +23157,7 @@ index 134d79a555..340c713c19 100644 if (ret != 0) rte_exit(EXIT_FAILURE, diff --git a/dpdk/app/test-pmd/testpmd.h b/dpdk/app/test-pmd/testpmd.h -index 7d24d25970..022210a7a9 100644 +index 7d24d25970..f7f3587c83 100644 --- a/dpdk/app/test-pmd/testpmd.h +++ b/dpdk/app/test-pmd/testpmd.h @@ -34,6 +34,7 @@ @@ -22061,11 +23168,37 @@ index 7d24d25970..022210a7a9 100644 /* * It is used to allocate the memory for hash key. +@@ -83,7 +84,7 @@ extern uint8_t cl_quit; + /* Maximum number of pools supported per Rx queue */ + #define MAX_MEMPOOL 8 + +-typedef uint8_t lcoreid_t; ++typedef uint32_t lcoreid_t; + typedef uint16_t portid_t; + typedef uint16_t queueid_t; + typedef uint16_t streamid_t; diff --git a/dpdk/app/test/meson.build b/dpdk/app/test/meson.build -index f34d19e3c3..4e39c9e7cf 100644 +index f34d19e3c3..09cbe5e620 100644 --- a/dpdk/app/test/meson.build +++ b/dpdk/app/test/meson.build -@@ -206,6 +206,7 @@ fast_tests = [ +@@ -15,7 +15,6 @@ test_sources = files( + 'test_barrier.c', + 'test_bitops.c', + 'test_bitmap.c', +- 'test_bpf.c', + 'test_byteorder.c', + 'test_cksum.c', + 'test_cksum_perf.c', +@@ -161,8 +160,6 @@ fast_tests = [ + ['acl_autotest', true, true], + ['atomic_autotest', false, true], + ['bitmap_autotest', true, true], +- ['bpf_autotest', true, true], +- ['bpf_convert_autotest', true, true], + ['bitops_autotest', true, true], + ['byteorder_autotest', true, true], + ['cksum_autotest', true, true], +@@ -206,6 +203,7 @@ fast_tests = [ ['memzone_autotest', false, true], ['meter_autotest', true, true], ['multiprocess_autotest', false, false], @@ -22073,7 +23206,7 @@ index f34d19e3c3..4e39c9e7cf 100644 ['per_lcore_autotest', true, true], ['pflock_autotest', true, true], ['prefetch_autotest', true, true], -@@ -320,6 +321,10 @@ driver_test_names = [ +@@ -320,6 +318,10 @@ driver_test_names = [ dump_test_names = [] if not is_windows @@ -22084,6 +23217,46 @@ index f34d19e3c3..4e39c9e7cf 100644 driver_test_names += [ 'cryptodev_openssl_asym_autotest', 'eventdev_selftest_octeontx', +@@ -425,6 +427,14 @@ if dpdk_conf.has('RTE_HAS_LIBPCAP') + endif + endif + ++if arch_subdir != 'x86' or dpdk_conf.get('RTE_ARCH_64') ++ test_sources += 'test_bpf.c' ++ fast_tests += [ ++ ['bpf_autotest', true, true], ++ ['bpf_convert_autotest', true, true], ++ ] ++endif ++ + if cc.has_argument('-Wno-format-truncation') + cflags += '-Wno-format-truncation' + endif +@@ -469,15 +479,21 @@ dpdk_test = executable('dpdk-test', + driver_install_path), + install: true) + +-has_hugepage = run_command(py3, 'has_hugepage.py', check: true).stdout().strip() != '0' +-message('hugepage availability: @0@'.format(has_hugepage)) +- + # some perf tests (eg: memcpy perf autotest)take very long + # to complete, so timeout to 10 minutes + timeout_seconds = 600 + timeout_seconds_fast = 10 + + test_no_huge_args = ['--no-huge', '-m', '2048'] ++has_hugepage = run_command(py3, 'has_hugepage.py', check: true).stdout().strip() != '0' ++message('hugepage availability: @0@'.format(has_hugepage)) ++if not has_hugepage ++ if arch_subdir == 'ppc' ++ # On ppc64, without huge pages, PA would be the default but fails like: ++ # EAL: Cannot use IOVA as 'PA' since physical addresses are not available ++ test_no_huge_args += '--iova-mode=va' ++ endif ++endif + + foreach arg : fast_tests + test_args = [] diff --git a/dpdk/app/test/packet_burst_generator.c b/dpdk/app/test/packet_burst_generator.c index 6b42b9b83b..867a88da00 100644 --- a/dpdk/app/test/packet_burst_generator.c @@ -22160,6 +23333,112 @@ index 6b42b9b83b..867a88da00 100644 } pkt_seg->next = NULL; /* Last segment of packet. */ +diff --git a/dpdk/app/test/process.h b/dpdk/app/test/process.h +index 1f073b9c5c..e8e7e5ab60 100644 +--- a/dpdk/app/test/process.h ++++ b/dpdk/app/test/process.h +@@ -17,6 +17,7 @@ + #include + + #include /* strlcpy */ ++#include + + #ifdef RTE_EXEC_ENV_FREEBSD + #define self "curproc" +@@ -34,6 +35,34 @@ extern uint16_t flag_for_send_pkts; + #endif + #endif + ++#define PREFIX_ALLOW "--allow=" ++ ++static int ++add_parameter_allow(char **argv, int max_capacity) ++{ ++ struct rte_devargs *devargs; ++ int count = 0; ++ ++ RTE_EAL_DEVARGS_FOREACH(NULL, devargs) { ++ if (strlen(devargs->name) == 0) ++ continue; ++ ++ if (devargs->data == NULL || strlen(devargs->data) == 0) { ++ if (asprintf(&argv[count], PREFIX_ALLOW"%s", devargs->name) < 0) ++ break; ++ } else { ++ if (asprintf(&argv[count], PREFIX_ALLOW"%s,%s", ++ devargs->name, devargs->data) < 0) ++ break; ++ } ++ ++ if (++count == max_capacity) ++ break; ++ } ++ ++ return count; ++} ++ + /* + * launches a second copy of the test process using the given argv parameters, + * which should include argv[0] as the process name. To identify in the +@@ -43,8 +72,10 @@ extern uint16_t flag_for_send_pkts; + static inline int + process_dup(const char *const argv[], int numargs, const char *env_value) + { +- int num; +- char *argv_cpy[numargs + 1]; ++ int num = 0; ++ char **argv_cpy; ++ int allow_num; ++ int argv_num; + int i, status; + char path[32]; + #ifdef RTE_LIB_PDUMP +@@ -58,11 +89,21 @@ process_dup(const char *const argv[], int numargs, const char *env_value) + if (pid < 0) + return -1; + else if (pid == 0) { ++ allow_num = rte_devargs_type_count(RTE_DEVTYPE_ALLOWED); ++ argv_num = numargs + allow_num + 1; ++ argv_cpy = calloc(argv_num, sizeof(char *)); ++ if (!argv_cpy) ++ rte_panic("Memory allocation failed\n"); ++ + /* make a copy of the arguments to be passed to exec */ +- for (i = 0; i < numargs; i++) ++ for (i = 0; i < numargs; i++) { + argv_cpy[i] = strdup(argv[i]); +- argv_cpy[i] = NULL; +- num = numargs; ++ if (argv_cpy[i] == NULL) ++ rte_panic("Error dup args\n"); ++ } ++ if (allow_num > 0) ++ num = add_parameter_allow(&argv_cpy[i], allow_num); ++ num += numargs; + + #ifdef RTE_EXEC_ENV_LINUX + { +diff --git a/dpdk/app/test/test.c b/dpdk/app/test/test.c +index fb073ff795..5cf9f51c28 100644 +--- a/dpdk/app/test/test.c ++++ b/dpdk/app/test/test.c +@@ -343,11 +343,13 @@ unit_test_suite_runner(struct unit_test_suite *suite) + + if (test_success == TEST_SUCCESS) + suite->succeeded++; +- else if (test_success == TEST_SKIPPED) ++ else if (test_success == TEST_SKIPPED) { + suite->skipped++; +- else if (test_success == -ENOTSUP) ++ suite->executed--; ++ } else if (test_success == -ENOTSUP) { + suite->unsupported++; +- else ++ suite->executed--; ++ } else + suite->failed++; + } else if (test_success == -ENOTSUP) { + suite->unsupported++; diff --git a/dpdk/app/test/test.h b/dpdk/app/test/test.h index 85f57efbc6..6a4fa0b1d7 100644 --- a/dpdk/app/test/test.h @@ -22173,8 +23452,60 @@ index 85f57efbc6..6a4fa0b1d7 100644 #define TEST_CASE_NAMED_WITH_DATA(name, setup, teardown, testcase, data) \ { setup, teardown, NULL, testcase, name, 1, data } +diff --git a/dpdk/app/test/test_bpf.c b/dpdk/app/test/test_bpf.c +index f5af5e8a3f..51184341c2 100644 +--- a/dpdk/app/test/test_bpf.c ++++ b/dpdk/app/test/test_bpf.c +@@ -3341,6 +3341,7 @@ test_bpf_filter_sanity(pcap_t *pcap) + struct rte_ipv4_hdr ip_hdr; + } *hdr; + ++ memset(&mb, 0, sizeof(mb)); + dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen); + m = &mb; + +diff --git a/dpdk/app/test/test_cfgfile.c b/dpdk/app/test/test_cfgfile.c +index 2f596affee..a5e3d8699c 100644 +--- a/dpdk/app/test/test_cfgfile.c ++++ b/dpdk/app/test/test_cfgfile.c +@@ -168,7 +168,7 @@ test_cfgfile_invalid_section_header(void) + struct rte_cfgfile *cfgfile; + + cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/invalid_section.ini", 0); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } +@@ -185,7 +185,7 @@ test_cfgfile_invalid_comment(void) + + cfgfile = rte_cfgfile_load_with_params(CFG_FILES_ETC "/sample2.ini", 0, + ¶ms); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } +@@ -196,7 +196,7 @@ test_cfgfile_invalid_key_value_pair(void) + struct rte_cfgfile *cfgfile; + + cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/empty_key_value.ini", 0); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } +@@ -236,7 +236,7 @@ test_cfgfile_missing_section(void) + struct rte_cfgfile *cfgfile; + + cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/missing_section.ini", 0); +- TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); ++ TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); + + return 0; + } diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c -index d6ae762df9..b75edb2f2b 100644 +index d6ae762df9..2069ccad3b 100644 --- a/dpdk/app/test/test_cryptodev.c +++ b/dpdk/app/test/test_cryptodev.c @@ -136,6 +136,17 @@ security_proto_supported(enum rte_security_session_action_type action, @@ -22195,7 +23526,16 @@ index d6ae762df9..b75edb2f2b 100644 static struct rte_mbuf * setup_test_string(struct rte_mempool *mpool, const char *string, size_t len, uint8_t blocksize) -@@ -4761,7 +4772,6 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) +@@ -183,6 +194,8 @@ post_process_raw_dp_op(void *user_data, uint32_t index __rte_unused, + static struct crypto_testsuite_params testsuite_params = { NULL }; + struct crypto_testsuite_params *p_testsuite_params = &testsuite_params; + static struct crypto_unittest_params unittest_params; ++static bool enq_cb_called; ++static bool deq_cb_called; + + void + process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id, +@@ -4761,7 +4774,6 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) unsigned int plaintext_len; struct rte_cryptodev_info dev_info; @@ -22203,7 +23543,7 @@ index d6ae762df9..b75edb2f2b 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -4783,19 +4793,14 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) +@@ -4783,19 +4795,14 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) return TEST_SKIPPED; /* Check if device supports ZUC EEA3 */ @@ -22228,7 +23568,7 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SKIPPED; /* Create ZUC session */ -@@ -4853,7 +4858,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) +@@ -4853,7 +4860,7 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata) TEST_ASSERT_BUFFERS_ARE_EQUAL( ut_params->digest, tdata->digest.data, @@ -22237,7 +23577,7 @@ index d6ae762df9..b75edb2f2b 100644 "ZUC Generated auth tag not as expected"); return 0; } -@@ -6253,6 +6258,9 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, +@@ -6253,6 +6260,9 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, tdata->digest.len) < 0) return TEST_SKIPPED; @@ -22247,7 +23587,7 @@ index d6ae762df9..b75edb2f2b 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -6415,7 +6423,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, +@@ -6415,7 +6425,7 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata, TEST_ASSERT_BUFFERS_ARE_EQUAL( ut_params->digest, tdata->digest.data, @@ -22256,7 +23596,7 @@ index d6ae762df9..b75edb2f2b 100644 "ZUC Generated auth tag not as expected"); } return 0; -@@ -6453,6 +6461,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6453,6 +6463,9 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, tdata->digest.len) < 0) return TEST_SKIPPED; @@ -22266,7 +23606,7 @@ index d6ae762df9..b75edb2f2b 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -6622,7 +6633,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, +@@ -6622,7 +6635,7 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata, TEST_ASSERT_BUFFERS_ARE_EQUAL( digest, tdata->digest.data, @@ -22275,7 +23615,7 @@ index d6ae762df9..b75edb2f2b 100644 "ZUC Generated auth tag not as expected"); } return 0; -@@ -6852,6 +6863,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, +@@ -6852,6 +6865,7 @@ snow3g_hash_test_vector_setup(const struct snow3g_test_data *pattern, static int test_snow3g_decryption_with_digest_test_case_1(void) { @@ -22283,7 +23623,7 @@ index d6ae762df9..b75edb2f2b 100644 struct snow3g_hash_test_data snow3g_hash_data; struct rte_cryptodev_info dev_info; struct crypto_testsuite_params *ts_params = &testsuite_params; -@@ -6870,8 +6882,9 @@ test_snow3g_decryption_with_digest_test_case_1(void) +@@ -6870,8 +6884,9 @@ test_snow3g_decryption_with_digest_test_case_1(void) */ snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data); @@ -22295,7 +23635,7 @@ index d6ae762df9..b75edb2f2b 100644 return test_snow3g_authentication_verify(&snow3g_hash_data); } -@@ -7626,6 +7639,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, +@@ -7626,6 +7641,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, if (global_api_test_type == CRYPTODEV_RAW_API_TEST) return TEST_SKIPPED; @@ -22305,7 +23645,7 @@ index d6ae762df9..b75edb2f2b 100644 rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info); uint64_t feat_flags = dev_info.feature_flags; -@@ -7648,6 +7664,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, +@@ -7648,6 +7666,9 @@ test_mixed_auth_cipher_sgl(const struct mixed_cipher_auth_test_data *tdata, } } @@ -22315,7 +23655,7 @@ index d6ae762df9..b75edb2f2b 100644 /* Create the session */ if (verify) retval = create_wireless_algo_cipher_auth_session( -@@ -8433,7 +8452,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) +@@ -8433,7 +8454,7 @@ test_authenticated_encryption(const struct aead_test_data *tdata) tdata->key.data, tdata->key.len, tdata->aad.len, tdata->auth_tag.len, tdata->iv.len); @@ -22324,7 +23664,25 @@ index d6ae762df9..b75edb2f2b 100644 return retval; if (tdata->aad.len > MBUF_SIZE) { -@@ -9809,7 +9828,7 @@ test_ipsec_ah_proto_all(const struct ipsec_test_flags *flags) +@@ -8595,7 +8616,7 @@ static int test_pdcp_proto(int i, int oop, enum rte_crypto_cipher_operation opc, + /* Out of place support */ + if (oop) { + /* +- * For out-op-place we need to alloc another mbuf ++ * For out-of-place we need to alloc another mbuf + */ + ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool); + rte_pktmbuf_append(ut_params->obuf, output_vec_len); +@@ -8804,7 +8825,7 @@ test_pdcp_proto_SGL(int i, int oop, + /* Out of place support */ + if (oop) { + /* +- * For out-op-place we need to alloc another mbuf ++ * For out-of-place we need to alloc another mbuf + */ + ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool); + rte_pktmbuf_append(ut_params->obuf, frag_size_oop); +@@ -9809,7 +9830,7 @@ test_ipsec_ah_proto_all(const struct ipsec_test_flags *flags) } static int @@ -22333,7 +23691,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9821,7 +9840,7 @@ test_ipsec_proto_display_list(const void *data __rte_unused) +@@ -9821,7 +9842,7 @@ test_ipsec_proto_display_list(const void *data __rte_unused) } static int @@ -22342,7 +23700,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9834,7 +9853,7 @@ test_ipsec_proto_ah_tunnel_ipv4(const void *data __rte_unused) +@@ -9834,7 +9855,7 @@ test_ipsec_proto_ah_tunnel_ipv4(const void *data __rte_unused) } static int @@ -22351,7 +23709,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9847,7 +9866,7 @@ test_ipsec_proto_ah_transport_ipv4(const void *data __rte_unused) +@@ -9847,7 +9868,7 @@ test_ipsec_proto_ah_transport_ipv4(const void *data __rte_unused) } static int @@ -22360,7 +23718,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9859,7 +9878,7 @@ test_ipsec_proto_iv_gen(const void *data __rte_unused) +@@ -9859,7 +9880,7 @@ test_ipsec_proto_iv_gen(const void *data __rte_unused) } static int @@ -22369,7 +23727,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9871,7 +9890,7 @@ test_ipsec_proto_sa_exp_pkts_soft(const void *data __rte_unused) +@@ -9871,7 +9892,7 @@ test_ipsec_proto_sa_exp_pkts_soft(const void *data __rte_unused) } static int @@ -22378,7 +23736,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9883,7 +9902,7 @@ test_ipsec_proto_sa_exp_pkts_hard(const void *data __rte_unused) +@@ -9883,7 +9904,7 @@ test_ipsec_proto_sa_exp_pkts_hard(const void *data __rte_unused) } static int @@ -22387,7 +23745,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9895,7 +9914,7 @@ test_ipsec_proto_err_icv_corrupt(const void *data __rte_unused) +@@ -9895,7 +9916,7 @@ test_ipsec_proto_err_icv_corrupt(const void *data __rte_unused) } static int @@ -22396,7 +23754,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9912,7 +9931,7 @@ test_ipsec_proto_udp_encap_custom_ports(const void *data __rte_unused) +@@ -9912,7 +9933,7 @@ test_ipsec_proto_udp_encap_custom_ports(const void *data __rte_unused) } static int @@ -22405,7 +23763,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9924,7 +9943,7 @@ test_ipsec_proto_udp_encap(const void *data __rte_unused) +@@ -9924,7 +9945,7 @@ test_ipsec_proto_udp_encap(const void *data __rte_unused) } static int @@ -22414,7 +23772,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9936,7 +9955,7 @@ test_ipsec_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) +@@ -9936,7 +9957,7 @@ test_ipsec_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused) } static int @@ -22423,7 +23781,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9948,7 +9967,7 @@ test_ipsec_proto_tunnel_dst_addr_verify(const void *data __rte_unused) +@@ -9948,7 +9969,7 @@ test_ipsec_proto_tunnel_dst_addr_verify(const void *data __rte_unused) } static int @@ -22432,7 +23790,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9961,7 +9980,7 @@ test_ipsec_proto_udp_ports_verify(const void *data __rte_unused) +@@ -9961,7 +9982,7 @@ test_ipsec_proto_udp_ports_verify(const void *data __rte_unused) } static int @@ -22441,7 +23799,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9973,7 +9992,7 @@ test_ipsec_proto_inner_ip_csum(const void *data __rte_unused) +@@ -9973,7 +9994,7 @@ test_ipsec_proto_inner_ip_csum(const void *data __rte_unused) } static int @@ -22450,7 +23808,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9985,7 +10004,7 @@ test_ipsec_proto_inner_l4_csum(const void *data __rte_unused) +@@ -9985,7 +10006,7 @@ test_ipsec_proto_inner_l4_csum(const void *data __rte_unused) } static int @@ -22459,7 +23817,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -9998,7 +10017,7 @@ test_ipsec_proto_tunnel_v4_in_v4(const void *data __rte_unused) +@@ -9998,7 +10019,7 @@ test_ipsec_proto_tunnel_v4_in_v4(const void *data __rte_unused) } static int @@ -22468,7 +23826,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10011,7 +10030,7 @@ test_ipsec_proto_tunnel_v6_in_v6(const void *data __rte_unused) +@@ -10011,7 +10032,7 @@ test_ipsec_proto_tunnel_v6_in_v6(const void *data __rte_unused) } static int @@ -22477,7 +23835,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10024,7 +10043,7 @@ test_ipsec_proto_tunnel_v4_in_v6(const void *data __rte_unused) +@@ -10024,7 +10045,7 @@ test_ipsec_proto_tunnel_v4_in_v6(const void *data __rte_unused) } static int @@ -22486,7 +23844,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10037,7 +10056,7 @@ test_ipsec_proto_tunnel_v6_in_v4(const void *data __rte_unused) +@@ -10037,7 +10058,7 @@ test_ipsec_proto_tunnel_v6_in_v4(const void *data __rte_unused) } static int @@ -22495,7 +23853,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10050,7 +10069,7 @@ test_ipsec_proto_transport_v4(const void *data __rte_unused) +@@ -10050,7 +10071,7 @@ test_ipsec_proto_transport_v4(const void *data __rte_unused) } static int @@ -22504,7 +23862,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags = { .l4_csum = true, -@@ -10061,7 +10080,7 @@ test_ipsec_proto_transport_l4_csum(const void *data __rte_unused) +@@ -10061,7 +10082,7 @@ test_ipsec_proto_transport_l4_csum(const void *data __rte_unused) } static int @@ -22513,7 +23871,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10073,7 +10092,7 @@ test_ipsec_proto_stats(const void *data __rte_unused) +@@ -10073,7 +10094,7 @@ test_ipsec_proto_stats(const void *data __rte_unused) } static int @@ -22522,7 +23880,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10086,7 +10105,7 @@ test_ipsec_proto_pkt_fragment(const void *data __rte_unused) +@@ -10086,7 +10107,7 @@ test_ipsec_proto_pkt_fragment(const void *data __rte_unused) } static int @@ -22531,7 +23889,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10098,7 +10117,7 @@ test_ipsec_proto_copy_df_inner_0(const void *data __rte_unused) +@@ -10098,7 +10119,7 @@ test_ipsec_proto_copy_df_inner_0(const void *data __rte_unused) } static int @@ -22540,7 +23898,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10110,7 +10129,7 @@ test_ipsec_proto_copy_df_inner_1(const void *data __rte_unused) +@@ -10110,7 +10131,7 @@ test_ipsec_proto_copy_df_inner_1(const void *data __rte_unused) } static int @@ -22549,7 +23907,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10122,7 +10141,7 @@ test_ipsec_proto_set_df_0_inner_1(const void *data __rte_unused) +@@ -10122,7 +10143,7 @@ test_ipsec_proto_set_df_0_inner_1(const void *data __rte_unused) } static int @@ -22558,7 +23916,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10134,7 +10153,7 @@ test_ipsec_proto_set_df_1_inner_0(const void *data __rte_unused) +@@ -10134,7 +10155,7 @@ test_ipsec_proto_set_df_1_inner_0(const void *data __rte_unused) } static int @@ -22567,7 +23925,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10146,7 +10165,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) +@@ -10146,7 +10167,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_0(const void *data __rte_unused) } static int @@ -22576,7 +23934,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10158,7 +10177,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) +@@ -10158,7 +10179,7 @@ test_ipsec_proto_ipv4_copy_dscp_inner_1(const void *data __rte_unused) } static int @@ -22585,7 +23943,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10174,7 +10193,7 @@ test_ipsec_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) +@@ -10174,7 +10195,7 @@ test_ipsec_proto_ipv4_set_dscp_0_inner_1(const void *data __rte_unused) } static int @@ -22594,7 +23952,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10190,7 +10209,7 @@ test_ipsec_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) +@@ -10190,7 +10211,7 @@ test_ipsec_proto_ipv4_set_dscp_1_inner_0(const void *data __rte_unused) } static int @@ -22603,7 +23961,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10204,7 +10223,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) +@@ -10204,7 +10225,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_0(const void *data __rte_unused) } static int @@ -22612,7 +23970,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10218,7 +10237,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) +@@ -10218,7 +10239,7 @@ test_ipsec_proto_ipv6_copy_dscp_inner_1(const void *data __rte_unused) } static int @@ -22621,7 +23979,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10236,7 +10255,7 @@ test_ipsec_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) +@@ -10236,7 +10257,7 @@ test_ipsec_proto_ipv6_set_dscp_0_inner_1(const void *data __rte_unused) } static int @@ -22630,7 +23988,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags; -@@ -10442,7 +10461,7 @@ test_PDCP_PROTO_all(void) +@@ -10442,7 +10463,7 @@ test_PDCP_PROTO_all(void) } static int @@ -22639,7 +23997,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags = { .dec_ttl_or_hop_limit = true -@@ -10452,7 +10471,7 @@ test_ipsec_proto_ipv4_ttl_decrement(const void *data __rte_unused) +@@ -10452,7 +10473,7 @@ test_ipsec_proto_ipv4_ttl_decrement(const void *data __rte_unused) } static int @@ -22648,7 +24006,7 @@ index d6ae762df9..b75edb2f2b 100644 { struct ipsec_test_flags flags = { .ipv6 = true, -@@ -11567,7 +11586,7 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata) +@@ -11567,7 +11588,7 @@ test_authenticated_decryption_oop(const struct aead_test_data *tdata) tdata->key.data, tdata->key.len, tdata->aad.len, tdata->auth_tag.len, tdata->iv.len); @@ -22657,7 +24015,7 @@ index d6ae762df9..b75edb2f2b 100644 return retval; /* alloc mbuf and set payload */ -@@ -11981,11 +12000,11 @@ test_stats(void) +@@ -11981,11 +12002,11 @@ test_stats(void) TEST_ASSERT((stats.enqueued_count == 1), "rte_cryptodev_stats_get returned unexpected enqueued stat"); TEST_ASSERT((stats.dequeued_count == 1), @@ -22672,7 +24030,7 @@ index d6ae762df9..b75edb2f2b 100644 /* invalid device but should ignore and not reset device stats*/ rte_cryptodev_stats_reset(ts_params->valid_devs[0] + 300); -@@ -11993,7 +12012,7 @@ test_stats(void) +@@ -11993,7 +12014,7 @@ test_stats(void) &stats), "rte_cryptodev_stats_get failed"); TEST_ASSERT((stats.enqueued_count == 1), @@ -22681,7 +24039,7 @@ index d6ae762df9..b75edb2f2b 100644 /* check that a valid reset clears stats */ rte_cryptodev_stats_reset(ts_params->valid_devs[0]); -@@ -12001,9 +12020,9 @@ test_stats(void) +@@ -12001,9 +12022,9 @@ test_stats(void) &stats), "rte_cryptodev_stats_get failed"); TEST_ASSERT((stats.enqueued_count == 0), @@ -22693,7 +24051,223 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SUCCESS; } -@@ -12990,7 +13009,7 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) +@@ -12588,6 +12609,7 @@ test_enq_callback(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, + RTE_SET_USED(ops); + RTE_SET_USED(user_param); + ++ enq_cb_called = true; + printf("crypto enqueue callback called\n"); + return nb_ops; + } +@@ -12601,21 +12623,58 @@ test_deq_callback(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, + RTE_SET_USED(ops); + RTE_SET_USED(user_param); + ++ deq_cb_called = true; + printf("crypto dequeue callback called\n"); + return nb_ops; + } + + /* +- * Thread using enqueue/dequeue callback with RCU. ++ * Process enqueue/dequeue NULL crypto request to verify callback with RCU. + */ + static int +-test_enqdeq_callback_thread(void *arg) ++test_enqdeq_callback_null_cipher(void) + { +- RTE_SET_USED(arg); +- /* DP thread calls rte_cryptodev_enqueue_burst()/ +- * rte_cryptodev_dequeue_burst() and invokes callback. +- */ +- test_null_burst_operation(); ++ struct crypto_testsuite_params *ts_params = &testsuite_params; ++ struct crypto_unittest_params *ut_params = &unittest_params; ++ ++ /* Setup Cipher Parameters */ ++ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ++ ut_params->cipher_xform.next = &ut_params->auth_xform; ++ ++ ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL; ++ ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; ++ ++ /* Setup Auth Parameters */ ++ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; ++ ut_params->auth_xform.next = NULL; ++ ++ ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_NULL; ++ ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; ++ ++ /* Create Crypto session */ ++ ut_params->sess = rte_cryptodev_sym_session_create(ts_params->valid_devs[0], ++ &ut_params->auth_xform, ts_params->session_mpool); ++ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed"); ++ ++ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool, RTE_CRYPTO_OP_TYPE_SYMMETRIC); ++ TEST_ASSERT_NOT_NULL(ut_params->op, "Failed to allocate symmetric crypto op"); ++ ++ /* Allocate mbuf */ ++ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool); ++ TEST_ASSERT_NOT_NULL(ut_params->ibuf, "Failed to allocate mbuf"); ++ ++ /* Append some random data */ ++ TEST_ASSERT_NOT_NULL(rte_pktmbuf_append(ut_params->ibuf, sizeof(unsigned int)), ++ "no room to append data"); ++ ++ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess); ++ ++ ut_params->op->sym->m_src = ut_params->ibuf; ++ ++ /* Process crypto operation */ ++ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0], ut_params->op), ++ "failed to process sym crypto op"); ++ + return 0; + } + +@@ -12623,6 +12682,7 @@ static int + test_enq_callback_setup(void) + { + struct crypto_testsuite_params *ts_params = &testsuite_params; ++ struct rte_cryptodev_sym_capability_idx cap_idx; + struct rte_cryptodev_info dev_info; + struct rte_cryptodev_qp_conf qp_conf = { + .nb_descriptors = MAX_NUM_OPS_INFLIGHT +@@ -12630,6 +12690,19 @@ test_enq_callback_setup(void) + + struct rte_cryptodev_cb *cb; + uint16_t qp_id = 0; ++ int j = 0; ++ ++ /* Verify the crypto capabilities for which enqueue/dequeue is done. */ ++ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; ++ cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; ++ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], ++ &cap_idx) == NULL) ++ return TEST_SKIPPED; ++ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ++ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_NULL; ++ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], ++ &cap_idx) == NULL) ++ return TEST_SKIPPED; + + /* Stop the device in case it's started so it can be configured */ + rte_cryptodev_stop(ts_params->valid_devs[0]); +@@ -12653,9 +12726,16 @@ test_enq_callback_setup(void) + qp_conf.nb_descriptors, qp_id, + ts_params->valid_devs[0]); + ++ enq_cb_called = false; + /* Test with invalid crypto device */ + cb = rte_cryptodev_add_enq_callback(RTE_CRYPTO_MAX_DEVS, + qp_id, test_enq_callback, NULL); ++ if (rte_errno == ENOTSUP) { ++ RTE_LOG(ERR, USER1, "%s line %d: " ++ "rte_cryptodev_add_enq_callback() " ++ "Not supported, skipped\n", __func__, __LINE__); ++ return TEST_SKIPPED; ++ } + TEST_ASSERT_NULL(cb, "Add callback on qp %u on " + "cryptodev %u did not fail", + qp_id, RTE_CRYPTO_MAX_DEVS); +@@ -12685,12 +12765,11 @@ test_enq_callback_setup(void) + + rte_cryptodev_start(ts_params->valid_devs[0]); + +- /* Launch a thread */ +- rte_eal_remote_launch(test_enqdeq_callback_thread, NULL, +- rte_get_next_lcore(-1, 1, 0)); ++ TEST_ASSERT_SUCCESS(test_enqdeq_callback_null_cipher(), "Crypto Processing failed"); + +- /* Wait until reader exited. */ +- rte_eal_mp_wait_lcore(); ++ /* Wait until callback not called. */ ++ while (!enq_cb_called && (j++ < 10)) ++ rte_delay_ms(10); + + /* Test with invalid crypto device */ + TEST_ASSERT_FAIL(rte_cryptodev_remove_enq_callback( +@@ -12715,6 +12794,8 @@ test_enq_callback_setup(void) + "qp %u on cryptodev %u", + qp_id, ts_params->valid_devs[0]); + ++ TEST_ASSERT(enq_cb_called == true, "Crypto enqueue callback not called"); ++ + return TEST_SUCCESS; + } + +@@ -12722,6 +12803,7 @@ static int + test_deq_callback_setup(void) + { + struct crypto_testsuite_params *ts_params = &testsuite_params; ++ struct rte_cryptodev_sym_capability_idx cap_idx; + struct rte_cryptodev_info dev_info; + struct rte_cryptodev_qp_conf qp_conf = { + .nb_descriptors = MAX_NUM_OPS_INFLIGHT +@@ -12729,6 +12811,19 @@ test_deq_callback_setup(void) + + struct rte_cryptodev_cb *cb; + uint16_t qp_id = 0; ++ int j = 0; ++ ++ /* Verify the crypto capabilities for which enqueue/dequeue is done. */ ++ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; ++ cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; ++ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], ++ &cap_idx) == NULL) ++ return TEST_SKIPPED; ++ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; ++ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_NULL; ++ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0], ++ &cap_idx) == NULL) ++ return TEST_SKIPPED; + + /* Stop the device in case it's started so it can be configured */ + rte_cryptodev_stop(ts_params->valid_devs[0]); +@@ -12752,9 +12847,16 @@ test_deq_callback_setup(void) + qp_conf.nb_descriptors, qp_id, + ts_params->valid_devs[0]); + ++ deq_cb_called = false; + /* Test with invalid crypto device */ + cb = rte_cryptodev_add_deq_callback(RTE_CRYPTO_MAX_DEVS, + qp_id, test_deq_callback, NULL); ++ if (rte_errno == ENOTSUP) { ++ RTE_LOG(ERR, USER1, "%s line %d: " ++ "rte_cryptodev_add_deq_callback() " ++ "Not supported, skipped\n", __func__, __LINE__); ++ return TEST_SKIPPED; ++ } + TEST_ASSERT_NULL(cb, "Add callback on qp %u on " + "cryptodev %u did not fail", + qp_id, RTE_CRYPTO_MAX_DEVS); +@@ -12784,12 +12886,11 @@ test_deq_callback_setup(void) + + rte_cryptodev_start(ts_params->valid_devs[0]); + +- /* Launch a thread */ +- rte_eal_remote_launch(test_enqdeq_callback_thread, NULL, +- rte_get_next_lcore(-1, 1, 0)); ++ TEST_ASSERT_SUCCESS(test_enqdeq_callback_null_cipher(), "Crypto processing failed"); + +- /* Wait until reader exited. */ +- rte_eal_mp_wait_lcore(); ++ /* Wait until callback not called. */ ++ while (!deq_cb_called && (j++ < 10)) ++ rte_delay_ms(10); + + /* Test with invalid crypto device */ + TEST_ASSERT_FAIL(rte_cryptodev_remove_deq_callback( +@@ -12814,6 +12915,8 @@ test_deq_callback_setup(void) + "qp %u on cryptodev %u", + qp_id, ts_params->valid_devs[0]); + ++ TEST_ASSERT(deq_cb_called == true, "Crypto dequeue callback not called"); ++ + return TEST_SUCCESS; + } + +@@ -12990,7 +13093,7 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_GENERATE); @@ -22702,7 +24276,7 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13121,7 +13140,7 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) +@@ -13121,7 +13224,7 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_VERIFY); @@ -22711,7 +24285,7 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13250,7 +13269,7 @@ test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata, +@@ -13250,7 +13353,7 @@ test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata, retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_GENERATE); @@ -22720,7 +24294,7 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13867,7 +13886,7 @@ test_authentication_verify_fail_when_data_corruption( +@@ -13867,7 +13970,7 @@ test_authentication_verify_fail_when_data_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY); @@ -22729,7 +24303,7 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13954,6 +13973,8 @@ test_authentication_verify_GMAC_fail_when_corruption( +@@ -13954,6 +14057,8 @@ test_authentication_verify_GMAC_fail_when_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_CIPHER_OP_DECRYPT); @@ -22738,7 +24312,7 @@ index d6ae762df9..b75edb2f2b 100644 if (retval < 0) return retval; -@@ -14044,8 +14065,7 @@ test_authenticated_decryption_fail_when_corruption( +@@ -14044,8 +14149,7 @@ test_authenticated_decryption_fail_when_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_CIPHER_OP_DECRYPT); @@ -22748,7 +24322,7 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -14450,8 +14470,13 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, +@@ -14450,8 +14554,13 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, &cap_idx) == NULL) return TEST_SKIPPED; @@ -22764,6 +24338,15 @@ index d6ae762df9..b75edb2f2b 100644 return TEST_SKIPPED; /* Detailed check for the particular SGL support flag */ +@@ -14514,7 +14623,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, + } + + /* +- * For out-op-place we need to alloc another mbuf ++ * For out-of-place we need to alloc another mbuf + */ + if (oop) { + ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool); diff --git a/dpdk/app/test/test_cryptodev_aes_test_vectors.h b/dpdk/app/test/test_cryptodev_aes_test_vectors.h index ea7b21ce53..f3686beeb5 100644 --- a/dpdk/app/test/test_cryptodev_aes_test_vectors.h @@ -22841,10 +24424,126 @@ index ea7b21ce53..f3686beeb5 100644 .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT, .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP | diff --git a/dpdk/app/test/test_cryptodev_asym.c b/dpdk/app/test/test_cryptodev_asym.c -index c58c7f488b..67659cd1a6 100644 +index c58c7f488b..4c86a2bcec 100644 --- a/dpdk/app/test/test_cryptodev_asym.c +++ b/dpdk/app/test/test_cryptodev_asym.c -@@ -1602,7 +1602,7 @@ error_exit: +@@ -54,11 +54,15 @@ union test_case_structure { + struct rsa_test_data_2 rsa_data; + }; + ++struct vector_details { ++ uint32_t vector_size; ++ const void *address; ++}; + struct test_cases_array { + uint32_t size; +- const void *address[TEST_VECTOR_SIZE]; ++ struct vector_details details[TEST_VECTOR_SIZE]; + }; +-static struct test_cases_array test_vector = {0, { NULL } }; ++static struct test_cases_array test_vector = {0, {} }; + + static uint32_t test_index; + +@@ -513,14 +517,14 @@ error_exit: + } + + static int +-test_one_case(const void *test_case, int sessionless) ++test_one_case(struct vector_details test_case, int sessionless) + { + int status = TEST_SUCCESS, i = 0; + char test_msg[ASYM_TEST_MSG_LEN + 1]; + + /* Map the case to union */ + union test_case_structure tc; +- memcpy(&tc, test_case, sizeof(tc)); ++ rte_memcpy(&tc, test_case.address, RTE_MIN(sizeof(tc), test_case.vector_size)); + + if (tc.modex.xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX + || tc.modex.xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { +@@ -572,7 +576,8 @@ load_test_vectors(void) + "TEST_VECTOR_SIZE too small\n"); + return -1; + } +- test_vector.address[test_vector.size] = &modex_test_case[i]; ++ test_vector.details[test_vector.size].address = &modex_test_case[i]; ++ test_vector.details[test_vector.size].vector_size = sizeof(modex_test_case[i]); + test_vector.size++; + } + /* Load MODINV vector*/ +@@ -583,7 +588,8 @@ load_test_vectors(void) + "TEST_VECTOR_SIZE too small\n"); + return -1; + } +- test_vector.address[test_vector.size] = &modinv_test_case[i]; ++ test_vector.details[test_vector.size].address = &modinv_test_case[i]; ++ test_vector.details[test_vector.size].vector_size = sizeof(modinv_test_case[i]); + test_vector.size++; + } + /* Load RSA vector*/ +@@ -594,7 +600,8 @@ load_test_vectors(void) + "TEST_VECTOR_SIZE too small\n"); + return -1; + } +- test_vector.address[test_vector.size] = &rsa_test_case_list[i]; ++ test_vector.details[test_vector.size].address = &rsa_test_case_list[i]; ++ test_vector.details[test_vector.size].vector_size = sizeof(rsa_test_case_list[i]); + test_vector.size++; + } + return 0; +@@ -619,12 +626,12 @@ test_one_by_one(void) + /* Go through all test cases */ + test_index = 0; + for (i = 0; i < test_vector.size; i++) { +- if (test_one_case(test_vector.address[i], 0) != TEST_SUCCESS) ++ if (test_one_case(test_vector.details[i], 0) != TEST_SUCCESS) + status = TEST_FAILED; + } + if (sessionless) { + for (i = 0; i < test_vector.size; i++) { +- if (test_one_case(test_vector.address[i], 1) ++ if (test_one_case(test_vector.details[i], 1) + != TEST_SUCCESS) + status = TEST_FAILED; + } +@@ -946,8 +953,6 @@ ut_setup_asym(void) + qp_id, ts_params->valid_devs[0]); + } + +- rte_cryptodev_stats_reset(ts_params->valid_devs[0]); +- + /* Start the device */ + TEST_ASSERT_SUCCESS(rte_cryptodev_start(ts_params->valid_devs[0]), + "Failed to start cryptodev %u", +@@ -960,9 +965,6 @@ static void + ut_teardown_asym(void) + { + struct crypto_testsuite_params_asym *ts_params = &testsuite_params; +- struct rte_cryptodev_stats stats; +- +- rte_cryptodev_stats_get(ts_params->valid_devs[0], &stats); + + /* Stop the device */ + rte_cryptodev_stop(ts_params->valid_devs[0]); +@@ -1021,7 +1023,7 @@ test_capability(void) + RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)) { + RTE_LOG(INFO, USER1, + "Device doesn't support asymmetric. Test Skipped\n"); +- return TEST_SUCCESS; ++ return TEST_SKIPPED; + } + + /* print xform capability */ +@@ -1036,6 +1038,7 @@ test_capability(void) + capa = rte_cryptodev_asym_capability_get(dev_id, + (const struct + rte_cryptodev_asym_capability_idx *) &idx); ++ TEST_ASSERT_NOT_NULL(capa, "Failed to get asymmetric capability"); + print_asym_capa(capa); + } + } +@@ -1602,7 +1605,7 @@ error_exit: } static int @@ -22853,7 +24552,7 @@ index c58c7f488b..67659cd1a6 100644 { int status; -@@ -2204,7 +2204,7 @@ static struct unit_test_suite cryptodev_openssl_asym_testsuite = { +@@ -2204,7 +2207,7 @@ static struct unit_test_suite cryptodev_openssl_asym_testsuite = { TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_capability), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, test_dsa), TEST_CASE_ST(ut_setup_asym, ut_teardown_asym, @@ -23956,6 +25655,19 @@ index 6fdc4cd9e3..56d4884529 100644 /* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN */ (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, +diff --git a/dpdk/app/test/test_eal_flags.c b/dpdk/app/test/test_eal_flags.c +index d2b91e2075..f4740ee6e5 100644 +--- a/dpdk/app/test/test_eal_flags.c ++++ b/dpdk/app/test/test_eal_flags.c +@@ -671,7 +671,7 @@ test_missing_c_flag(void) + launch_proc(argv26) == 0 || launch_proc(argv27) == 0 || + launch_proc(argv28) == 0 || launch_proc(argv30) == 0) { + printf("Error - " +- "process ran without error with invalid --lcore flag\n"); ++ "process ran without error with invalid --lcores flag\n"); + return -1; + } + diff --git a/dpdk/app/test/test_event_crypto_adapter.c b/dpdk/app/test/test_event_crypto_adapter.c index a38e389abd..3d720fe68b 100644 --- a/dpdk/app/test/test_event_crypto_adapter.c @@ -23975,6 +25687,21 @@ index a38e389abd..3d720fe68b 100644 ret = rte_vdev_init( RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); +diff --git a/dpdk/app/test/test_event_eth_tx_adapter.c b/dpdk/app/test/test_event_eth_tx_adapter.c +index c19a87a86a..2aeb28d8e9 100644 +--- a/dpdk/app/test/test_event_eth_tx_adapter.c ++++ b/dpdk/app/test/test_event_eth_tx_adapter.c +@@ -482,6 +482,10 @@ tx_adapter_service(void) + int internal_port; + uint32_t cap; + ++ /* Initialize mbufs */ ++ for (i = 0; i < RING_SIZE; i++) ++ rte_pktmbuf_reset(&bufs[i]); ++ + memset(&dev_conf, 0, sizeof(dev_conf)); + err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); diff --git a/dpdk/app/test/test_event_timer_adapter.c b/dpdk/app/test/test_event_timer_adapter.c index 1a440dfd10..12d5936c60 100644 --- a/dpdk/app/test/test_event_timer_adapter.c @@ -24376,6 +26103,338 @@ index 1a440dfd10..12d5936c60 100644 TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n"); rte_mempool_put(eventdev_test_mempool, evtim); +diff --git a/dpdk/app/test/test_eventdev.c b/dpdk/app/test/test_eventdev.c +index 336529038e..7fbd99cb1c 100644 +--- a/dpdk/app/test/test_eventdev.c ++++ b/dpdk/app/test/test_eventdev.c +@@ -33,9 +33,15 @@ testsuite_setup(void) + uint8_t count; + count = rte_event_dev_count(); + if (!count) { ++ int ret; ++ + printf("Failed to find a valid event device," +- " testing with event_skeleton device\n"); +- return rte_vdev_init("event_skeleton", NULL); ++ " trying with event_skeleton device\n"); ++ ret = rte_vdev_init("event_skeleton", NULL); ++ if (ret != 0) { ++ printf("No event device, skipping\n"); ++ return TEST_SKIPPED; ++ } + } + return TEST_SUCCESS; + } +diff --git a/dpdk/app/test/test_fbarray.c b/dpdk/app/test/test_fbarray.c +index a691bf4458..8a3a3d77ab 100644 +--- a/dpdk/app/test/test_fbarray.c ++++ b/dpdk/app/test/test_fbarray.c +@@ -21,23 +21,41 @@ struct fbarray_testsuite_params { + }; + + static struct fbarray_testsuite_params param; ++static struct fbarray_testsuite_params unaligned; + + #define FBARRAY_TEST_ARR_NAME "fbarray_autotest" + #define FBARRAY_TEST_LEN 256 ++#define FBARRAY_UNALIGNED_TEST_ARR_NAME "fbarray_unaligned_autotest" ++#define FBARRAY_UNALIGNED_TEST_LEN 60 + #define FBARRAY_TEST_ELT_SZ (sizeof(int)) + + static int autotest_setup(void) + { +- return rte_fbarray_init(¶m.arr, FBARRAY_TEST_ARR_NAME, ++ int ret; ++ ++ ret = rte_fbarray_init(¶m.arr, FBARRAY_TEST_ARR_NAME, + FBARRAY_TEST_LEN, FBARRAY_TEST_ELT_SZ); ++ if (ret) { ++ printf("Failed to initialize test array\n"); ++ return -1; ++ } ++ ret = rte_fbarray_init(&unaligned.arr, FBARRAY_UNALIGNED_TEST_ARR_NAME, ++ FBARRAY_UNALIGNED_TEST_LEN, FBARRAY_TEST_ELT_SZ); ++ if (ret) { ++ printf("Failed to initialize unaligned test array\n"); ++ rte_fbarray_destroy(¶m.arr); ++ return -1; ++ } ++ return 0; + } + + static void autotest_teardown(void) + { + rte_fbarray_destroy(¶m.arr); ++ rte_fbarray_destroy(&unaligned.arr); + } + +-static int init_array(void) ++static int init_aligned(void) + { + int i; + for (i = param.start; i <= param.end; i++) { +@@ -47,11 +65,35 @@ static int init_array(void) + return 0; + } + +-static void reset_array(void) ++static int init_unaligned(void) ++{ ++ int i; ++ for (i = unaligned.start; i <= unaligned.end; i++) { ++ if (rte_fbarray_set_used(&unaligned.arr, i)) ++ return -1; ++ } ++ return 0; ++} ++ ++static void reset_aligned(void) + { + int i; + for (i = 0; i < FBARRAY_TEST_LEN; i++) + rte_fbarray_set_free(¶m.arr, i); ++ /* reset param as well */ ++ param.start = -1; ++ param.end = -1; ++} ++ ++static void reset_unaligned(void) ++{ ++ int i; ++ for (i = 0; i < FBARRAY_UNALIGNED_TEST_LEN; i++) ++ rte_fbarray_set_free(&unaligned.arr, i); ++ /* reset param as well */ ++ unaligned.start = -1; ++ unaligned.end = -1; ++ + } + + static int first_msk_test_setup(void) +@@ -59,7 +101,7 @@ static int first_msk_test_setup(void) + /* put all within first mask */ + param.start = 3; + param.end = 10; +- return init_array(); ++ return init_aligned(); + } + + static int cross_msk_test_setup(void) +@@ -67,7 +109,7 @@ static int cross_msk_test_setup(void) + /* put all within second and third mask */ + param.start = 70; + param.end = 160; +- return init_array(); ++ return init_aligned(); + } + + static int multi_msk_test_setup(void) +@@ -75,7 +117,7 @@ static int multi_msk_test_setup(void) + /* put all within first and last mask */ + param.start = 3; + param.end = FBARRAY_TEST_LEN - 20; +- return init_array(); ++ return init_aligned(); + } + + static int last_msk_test_setup(void) +@@ -83,7 +125,7 @@ static int last_msk_test_setup(void) + /* put all within last mask */ + param.start = FBARRAY_TEST_LEN - 20; + param.end = FBARRAY_TEST_LEN - 1; +- return init_array(); ++ return init_aligned(); + } + + static int full_msk_test_setup(void) +@@ -91,16 +133,31 @@ static int full_msk_test_setup(void) + /* fill entire mask */ + param.start = 0; + param.end = FBARRAY_TEST_LEN - 1; +- return init_array(); ++ return init_aligned(); + } + +-static int empty_msk_test_setup(void) ++static int lookahead_test_setup(void) + { +- /* do not fill anything in */ +- reset_array(); +- param.start = -1; +- param.end = -1; +- return 0; ++ /* set index 64 as used */ ++ param.start = 64; ++ param.end = 64; ++ return init_aligned(); ++} ++ ++static int lookbehind_test_setup(void) ++{ ++ /* set index 63 as used */ ++ param.start = 63; ++ param.end = 63; ++ return init_aligned(); ++} ++ ++static int unaligned_test_setup(void) ++{ ++ unaligned.start = 0; ++ /* leave one free bit at the end */ ++ unaligned.end = FBARRAY_UNALIGNED_TEST_LEN - 2; ++ return init_unaligned(); + } + + static int test_invalid(void) +@@ -454,7 +511,7 @@ static int test_basic(void) + if (check_free()) + return TEST_FAILED; + +- reset_array(); ++ reset_aligned(); + + return TEST_SUCCESS; + } +@@ -697,6 +754,26 @@ static int test_find(void) + return TEST_SUCCESS; + } + ++static int test_find_unaligned(void) ++{ ++ TEST_ASSERT_EQUAL((int)unaligned.arr.count, unaligned.end - unaligned.start + 1, ++ "Wrong element count\n"); ++ /* ensure space is free before start */ ++ if (ensure_correct(&unaligned.arr, 0, unaligned.start - 1, false)) ++ return TEST_FAILED; ++ /* ensure space is occupied where it's supposed to be */ ++ if (ensure_correct(&unaligned.arr, unaligned.start, unaligned.end, true)) ++ return TEST_FAILED; ++ /* ensure space after end is free as well */ ++ if (ensure_correct(&unaligned.arr, unaligned.end + 1, FBARRAY_UNALIGNED_TEST_LEN - 1, ++ false)) ++ return TEST_FAILED; ++ /* test if find_biggest API's work correctly */ ++ if (test_biggest(&unaligned.arr, unaligned.start, unaligned.end)) ++ return TEST_FAILED; ++ return TEST_SUCCESS; ++} ++ + static int test_empty(void) + { + TEST_ASSERT_EQUAL((int)param.arr.count, 0, "Wrong element count\n"); +@@ -709,6 +786,87 @@ static int test_empty(void) + return TEST_SUCCESS; + } + ++static int test_lookahead(void) ++{ ++ int ret; ++ ++ /* run regular test first */ ++ ret = test_find(); ++ if (ret != TEST_SUCCESS) ++ return ret; ++ ++ /* test if we can find free chunk while not starting with 0 */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(¶m.arr, 1, param.start), ++ param.start + 1, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} ++ ++static int test_lookbehind(void) ++{ ++ int ret, free_len = 2; ++ ++ /* run regular test first */ ++ ret = test_find(); ++ if (ret != TEST_SUCCESS) ++ return ret; ++ ++ /* test if we can find free chunk while crossing mask boundary */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(¶m.arr, param.start + 1, free_len), ++ param.start - free_len, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} ++ ++static int test_lookahead_mask(void) ++{ ++ /* ++ * There is a certain type of lookahead behavior we want to test here, ++ * namely masking of bits that were scanned with lookahead but that we ++ * know do not match our criteria. This is achieved in following steps: ++ * ++ * 0. Look for a big enough chunk of free space (say, 62 elements) ++ * 1. Trigger lookahead by breaking a run somewhere inside mask 0 ++ * (indices 0-63) ++ * 2. Fail lookahead by breaking the run somewhere inside mask 1 ++ * (indices 64-127) ++ * 3. Ensure that we can still find free space in mask 1 afterwards ++ */ ++ ++ /* break run on first mask */ ++ rte_fbarray_set_used(¶m.arr, 61); ++ /* break run on second mask */ ++ rte_fbarray_set_used(¶m.arr, 70); ++ ++ /* we expect to find free space at 71 */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(¶m.arr, 0, 62), ++ 71, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} ++ ++static int test_lookbehind_mask(void) ++{ ++ /* ++ * There is a certain type of lookbehind behavior we want to test here, ++ * namely masking of bits that were scanned with lookbehind but that we ++ * know do not match our criteria. This is achieved in two steps: ++ * ++ * 0. Look for a big enough chunk of free space (say, 62 elements) ++ * 1. Trigger lookbehind by breaking a run somewhere inside mask 2 ++ * (indices 128-191) ++ * 2. Fail lookbehind by breaking the run somewhere inside mask 1 ++ * (indices 64-127) ++ * 3. Ensure that we can still find free space in mask 1 afterwards ++ */ ++ ++ /* break run on mask 2 */ ++ rte_fbarray_set_used(¶m.arr, 130); ++ /* break run on mask 1 */ ++ rte_fbarray_set_used(¶m.arr, 70); ++ ++ /* start from 190, we expect to find free space at 8 */ ++ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(¶m.arr, 190, 62), ++ 8, "Free chunk index is wrong\n"); ++ return TEST_SUCCESS; ++} + + static struct unit_test_suite fbarray_test_suite = { + .suite_name = "fbarray autotest", +@@ -717,12 +875,19 @@ static struct unit_test_suite fbarray_test_suite = { + .unit_test_cases = { + TEST_CASE(test_invalid), + TEST_CASE(test_basic), +- TEST_CASE_ST(first_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(cross_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(multi_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(last_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(full_msk_test_setup, reset_array, test_find), +- TEST_CASE_ST(empty_msk_test_setup, reset_array, test_empty), ++ TEST_CASE_ST(first_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(cross_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(multi_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(last_msk_test_setup, reset_aligned, test_find), ++ TEST_CASE_ST(full_msk_test_setup, reset_aligned, test_find), ++ /* empty test does not need setup */ ++ TEST_CASE_ST(NULL, reset_aligned, test_empty), ++ TEST_CASE_ST(lookahead_test_setup, reset_aligned, test_lookahead), ++ TEST_CASE_ST(lookbehind_test_setup, reset_aligned, test_lookbehind), ++ /* setup for these tests is more complex so do it in test func */ ++ TEST_CASE_ST(NULL, reset_aligned, test_lookahead_mask), ++ TEST_CASE_ST(NULL, reset_aligned, test_lookbehind_mask), ++ TEST_CASE_ST(unaligned_test_setup, reset_unaligned, test_find_unaligned), + TEST_CASES_END() + } + }; diff --git a/dpdk/app/test/test_hash_readwrite.c b/dpdk/app/test/test_hash_readwrite.c index 6373e62d33..9cc5f3487c 100644 --- a/dpdk/app/test/test_hash_readwrite.c @@ -24495,7 +26554,7 @@ index de40e50611..ff081dd931 100644 } rte_free(mem); diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c -index 53fe898a38..7a2f8a9980 100644 +index 53fe898a38..7444be9399 100644 --- a/dpdk/app/test/test_mbuf.c +++ b/dpdk/app/test/test_mbuf.c @@ -1167,38 +1167,16 @@ test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool) @@ -24601,7 +26660,25 @@ index 53fe898a38..7a2f8a9980 100644 printf("Error with bad-refcnt(MAX) mbuf test\n"); return -1; } -@@ -2744,6 +2722,7 @@ test_nb_segs_and_next_reset(void) +@@ -2367,16 +2345,13 @@ test_pktmbuf_ext_shinfo_init_helper(struct rte_mempool *pktmbuf_pool) + GOTO_FAIL("%s: External buffer is not attached to mbuf\n", + __func__); + +- /* allocate one more mbuf */ ++ /* allocate one more mbuf, it is attached to the same external buffer */ + clone = rte_pktmbuf_clone(m, pktmbuf_pool); + if (clone == NULL) + GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__); + if (rte_pktmbuf_pkt_len(clone) != 0) + GOTO_FAIL("%s: Bad packet length\n", __func__); + +- /* attach the same external buffer to the cloned mbuf */ +- rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len, +- ret_shinfo); + if (clone->ol_flags != RTE_MBUF_F_EXTERNAL) + GOTO_FAIL("%s: External buffer is not attached to mbuf\n", + __func__); +@@ -2744,6 +2719,7 @@ test_nb_segs_and_next_reset(void) /* split m0 chain in two, between m1 and m2 */ m0->nb_segs = 2; @@ -24609,7 +26686,7 @@ index 53fe898a38..7a2f8a9980 100644 m1->next = NULL; m2->nb_segs = 1; -@@ -2764,6 +2743,7 @@ test_nb_segs_and_next_reset(void) +@@ -2764,6 +2740,7 @@ test_nb_segs_and_next_reset(void) m2->nb_segs != 1 || m2->next != NULL) GOTO_FAIL("nb_segs or next was not reset properly"); @@ -24617,6 +26694,32 @@ index 53fe898a38..7a2f8a9980 100644 return 0; fail: +diff --git a/dpdk/app/test/test_power.c b/dpdk/app/test/test_power.c +index b7b5561348..a1b32adf58 100644 +--- a/dpdk/app/test/test_power.c ++++ b/dpdk/app/test/test_power.c +@@ -142,7 +142,7 @@ test_power(void) + /* Test setting a valid environment */ + ret = rte_power_set_env(envs[i]); + if (ret != 0) { +- printf("Unexpectedly unsucceeded on setting a valid environment\n"); ++ printf("Unexpectedly unsuccessful on setting a valid environment\n"); + return -1; + } + +diff --git a/dpdk/app/test/test_power_intel_uncore.c b/dpdk/app/test/test_power_intel_uncore.c +index 31163af84e..38c72fb371 100644 +--- a/dpdk/app/test/test_power_intel_uncore.c ++++ b/dpdk/app/test/test_power_intel_uncore.c +@@ -17,7 +17,7 @@ test_power_intel_uncore(void) + #include + #include + +-#define MAX_UNCORE_FREQS 32 ++#define MAX_UNCORE_FREQS 64 + + #define VALID_PKG 0 + #define VALID_DIE 0 diff --git a/dpdk/app/test/test_reorder.c b/dpdk/app/test/test_reorder.c index f0714a5c18..7b5e590bac 100644 --- a/dpdk/app/test/test_reorder.c @@ -25360,6 +27463,382 @@ index 003537e200..d0a4b948e4 100644 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +diff --git a/dpdk/buildtools/meson.build b/dpdk/buildtools/meson.build +index e1c600e40f..c4f6fe9271 100644 +--- a/dpdk/buildtools/meson.build ++++ b/dpdk/buildtools/meson.build +@@ -6,7 +6,11 @@ check_symbols = find_program('check-symbols.sh') + ldflags_ibverbs_static = find_program('options-ibverbs-static.sh') + objdump = find_program('objdump', 'llvm-objdump') + +-python3 = import('python').find_installation(required: false) ++python3_required_modules = [] ++if host_machine.system() != 'windows' ++ python3_required_modules = ['elftools'] ++endif ++python3 = import('python').find_installation('python3', required: false, modules: python3_required_modules) + if python3.found() + py3 = [python3] + else +@@ -39,16 +43,3 @@ else + pmdinfogen += 'elf' + endif + +-# TODO: starting from Meson 0.51.0 use +-# python3 = import('python').find_installation('python', +-# modules : python3_required_modules) +-python3_required_modules = [] +-if host_machine.system() != 'windows' +- python3_required_modules = ['elftools'] +-endif +-foreach module : python3_required_modules +- script = 'import importlib.util; import sys; exit(importlib.util.find_spec("@0@") is None)' +- if run_command(py3, '-c', script.format(module), check: false).returncode() != 0 +- error('missing python module: @0@'.format(module)) +- endif +-endforeach +diff --git a/dpdk/buildtools/pmdinfogen.py b/dpdk/buildtools/pmdinfogen.py +index 2a44f17bda..dfb89500c0 100755 +--- a/dpdk/buildtools/pmdinfogen.py ++++ b/dpdk/buildtools/pmdinfogen.py +@@ -6,6 +6,7 @@ + import argparse + import ctypes + import json ++import re + import sys + import tempfile + +@@ -66,11 +67,11 @@ class ELFImage: + return [symbol] + return None + +- def find_by_prefix(self, prefix): +- prefix = prefix.encode("utf-8") if self._legacy_elftools else prefix ++ def find_by_pattern(self, pattern): ++ pattern = pattern.encode("utf-8") if self._legacy_elftools else pattern + for i in range(self._symtab.num_symbols()): + symbol = self._symtab.get_symbol(i) +- if symbol.name.startswith(prefix): ++ if re.match(pattern, symbol.name): + yield ELFSymbol(self._image, symbol) + + +@@ -97,9 +98,9 @@ class COFFImage: + def is_big_endian(self): + return False + +- def find_by_prefix(self, prefix): ++ def find_by_pattern(self, pattern): + for symbol in self._image.symbols: +- if symbol.name.startswith(prefix): ++ if re.match(pattern, symbol.name): + yield COFFSymbol(self._image, symbol) + + def find_by_name(self, name): +@@ -199,7 +200,7 @@ class Driver: + + def load_drivers(image): + drivers = [] +- for symbol in image.find_by_prefix("this_pmd_name"): ++ for symbol in image.find_by_pattern("^this_pmd_name[0-9]+$"): + drivers.append(Driver.load(image, symbol)) + return drivers + +diff --git a/dpdk/buildtools/subproject/meson.build b/dpdk/buildtools/subproject/meson.build +index 3192efaa40..203c5d36c6 100644 +--- a/dpdk/buildtools/subproject/meson.build ++++ b/dpdk/buildtools/subproject/meson.build +@@ -2,18 +2,23 @@ + # Copyright(c) 2022 Intel Corporation + + message('DPDK subproject linking: ' + get_option('default_library')) ++subproject_cflags = ['-include', 'rte_config.h'] + machine_args ++if is_freebsd ++ subproject_cflags += ['-D__BSD_VISIBLE'] ++endif + if get_option('default_library') == 'static' + dpdk_dep = declare_dependency( + version: meson.project_version(), + dependencies: dpdk_static_lib_deps, ++ compile_args: subproject_cflags, + # static library deps in DPDK build don't include "link_with" parameters, + # so explicitly link-in both libs and drivers +- link_with: dpdk_static_libraries, +- link_whole: dpdk_drivers, ++ link_whole: dpdk_static_libraries + dpdk_drivers, + link_args: dpdk_extra_ldflags) + else + dpdk_dep = declare_dependency( + version: meson.project_version(), ++ compile_args: subproject_cflags, + # shared library deps include all necessary linking parameters + dependencies: dpdk_shared_lib_deps) + endif +diff --git a/dpdk/config/arm/arm32_armv8_linux_gcc b/dpdk/config/arm/arm32_armv8_linux_gcc +index 269a60ba19..abcb182b16 100644 +--- a/dpdk/config/arm/arm32_armv8_linux_gcc ++++ b/dpdk/config/arm/arm32_armv8_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'arm-linux-gnueabihf-g++'] + ar = 'arm-linux-gnueabihf-gcc-ar' + strip = 'arm-linux-gnueabihf-strip' + pkgconfig = 'arm-linux-gnueabihf-pkg-config' ++pkg-config = 'arm-linux-gnueabihf-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_armada_linux_gcc b/dpdk/config/arm/arm64_armada_linux_gcc +index 635b4946a3..8f36d895da 100644 +--- a/dpdk/config/arm/arm64_armada_linux_gcc ++++ b/dpdk/config/arm/arm64_armada_linux_gcc +@@ -5,6 +5,7 @@ ar = 'aarch64-linux-gnu-ar' + as = 'aarch64-linux-gnu-as' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu +index 86ae43937b..e34fabebe5 100644 +--- a/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu ++++ b/dpdk/config/arm/arm64_armv8_linux_clang_ubuntu +@@ -6,6 +6,7 @@ strip = 'llvm-strip' + llvm-config = 'llvm-config' + pcap-config = 'llvm-config' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + + [host_machine] + system = 'linux' +diff --git a/dpdk/config/arm/arm64_armv8_linux_gcc b/dpdk/config/arm/arm64_armv8_linux_gcc +index 529694b49d..a9b136cf48 100644 +--- a/dpdk/config/arm/arm64_armv8_linux_gcc ++++ b/dpdk/config/arm/arm64_armv8_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_bluefield_linux_gcc b/dpdk/config/arm/arm64_bluefield_linux_gcc +index 1286227915..bcffb6534b 100644 +--- a/dpdk/config/arm/arm64_bluefield_linux_gcc ++++ b/dpdk/config/arm/arm64_bluefield_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_centriq2400_linux_gcc b/dpdk/config/arm/arm64_centriq2400_linux_gcc +index bc8737e072..33cb5ef675 100644 +--- a/dpdk/config/arm/arm64_centriq2400_linux_gcc ++++ b/dpdk/config/arm/arm64_centriq2400_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_cn10k_linux_gcc b/dpdk/config/arm/arm64_cn10k_linux_gcc +index 05d2d64cf2..586af83031 100644 +--- a/dpdk/config/arm/arm64_cn10k_linux_gcc ++++ b/dpdk/config/arm/arm64_cn10k_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_cn9k_linux_gcc b/dpdk/config/arm/arm64_cn9k_linux_gcc +index 7416454de0..7e77ee06ae 100644 +--- a/dpdk/config/arm/arm64_cn9k_linux_gcc ++++ b/dpdk/config/arm/arm64_cn9k_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_dpaa_linux_gcc b/dpdk/config/arm/arm64_dpaa_linux_gcc +index 8465b5097b..bf0eab18e2 100644 +--- a/dpdk/config/arm/arm64_dpaa_linux_gcc ++++ b/dpdk/config/arm/arm64_dpaa_linux_gcc +@@ -5,6 +5,7 @@ ar = 'aarch64-linux-gnu-ar' + as = 'aarch64-linux-gnu-as' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_emag_linux_gcc b/dpdk/config/arm/arm64_emag_linux_gcc +index 248169ed68..3e8c15f8a4 100644 +--- a/dpdk/config/arm/arm64_emag_linux_gcc ++++ b/dpdk/config/arm/arm64_emag_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_ft2000plus_linux_gcc b/dpdk/config/arm/arm64_ft2000plus_linux_gcc +index ae9f779056..4ecb251d3f 100644 +--- a/dpdk/config/arm/arm64_ft2000plus_linux_gcc ++++ b/dpdk/config/arm/arm64_ft2000plus_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_graviton2_linux_gcc b/dpdk/config/arm/arm64_graviton2_linux_gcc +index fdb298bb11..b5f681bc27 100644 +--- a/dpdk/config/arm/arm64_graviton2_linux_gcc ++++ b/dpdk/config/arm/arm64_graviton2_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_graviton3_linux_gcc b/dpdk/config/arm/arm64_graviton3_linux_gcc +index 19b422075d..77b5168836 100644 +--- a/dpdk/config/arm/arm64_graviton3_linux_gcc ++++ b/dpdk/config/arm/arm64_graviton3_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_kunpeng920_linux_gcc b/dpdk/config/arm/arm64_kunpeng920_linux_gcc +index 193fb48a61..6e7d8ac667 100644 +--- a/dpdk/config/arm/arm64_kunpeng920_linux_gcc ++++ b/dpdk/config/arm/arm64_kunpeng920_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_kunpeng930_linux_gcc b/dpdk/config/arm/arm64_kunpeng930_linux_gcc +index e4281ceb4f..61fe482a2e 100644 +--- a/dpdk/config/arm/arm64_kunpeng930_linux_gcc ++++ b/dpdk/config/arm/arm64_kunpeng930_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_n1sdp_linux_gcc b/dpdk/config/arm/arm64_n1sdp_linux_gcc +index 2806a4241b..c3c12098d8 100644 +--- a/dpdk/config/arm/arm64_n1sdp_linux_gcc ++++ b/dpdk/config/arm/arm64_n1sdp_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_n2_linux_gcc b/dpdk/config/arm/arm64_n2_linux_gcc +index 7404bd197b..89200861c4 100644 +--- a/dpdk/config/arm/arm64_n2_linux_gcc ++++ b/dpdk/config/arm/arm64_n2_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_stingray_linux_gcc b/dpdk/config/arm/arm64_stingray_linux_gcc +index 08148b5c3d..358f3b43d3 100644 +--- a/dpdk/config/arm/arm64_stingray_linux_gcc ++++ b/dpdk/config/arm/arm64_stingray_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_thunderx2_linux_gcc b/dpdk/config/arm/arm64_thunderx2_linux_gcc +index 32ae938e95..124a97da01 100644 +--- a/dpdk/config/arm/arm64_thunderx2_linux_gcc ++++ b/dpdk/config/arm/arm64_thunderx2_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_thunderxt83_linux_gcc b/dpdk/config/arm/arm64_thunderxt83_linux_gcc +index e9d9e62d44..433c3cb4e3 100644 +--- a/dpdk/config/arm/arm64_thunderxt83_linux_gcc ++++ b/dpdk/config/arm/arm64_thunderxt83_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_thunderxt88_linux_gcc b/dpdk/config/arm/arm64_thunderxt88_linux_gcc +index c6e5a5656a..81975aea14 100644 +--- a/dpdk/config/arm/arm64_thunderxt88_linux_gcc ++++ b/dpdk/config/arm/arm64_thunderxt88_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] +diff --git a/dpdk/config/arm/arm64_tys2500_linux_gcc b/dpdk/config/arm/arm64_tys2500_linux_gcc +index fce85fb0d8..24e6539a15 100644 +--- a/dpdk/config/arm/arm64_tys2500_linux_gcc ++++ b/dpdk/config/arm/arm64_tys2500_linux_gcc +@@ -4,6 +4,7 @@ cpp = ['ccache', 'aarch64-linux-gnu-g++'] + ar = 'aarch64-linux-gnu-gcc-ar' + strip = 'aarch64-linux-gnu-strip' + pkgconfig = 'aarch64-linux-gnu-pkg-config' ++pkg-config = 'aarch64-linux-gnu-pkg-config' + pcap-config = '' + + [host_machine] diff --git a/dpdk/config/arm/meson.build b/dpdk/config/arm/meson.build index 6442ec9596..5028c74613 100644 --- a/dpdk/config/arm/meson.build @@ -25426,10 +27905,30 @@ index 6442ec9596..5028c74613 100644 else warning('Configuration compiler option ' + diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build -index 6d9ffd4f4b..265aaa995c 100644 +index 6d9ffd4f4b..c755083db0 100644 --- a/dpdk/config/meson.build +++ b/dpdk/config/meson.build -@@ -122,7 +122,7 @@ if cpu_instruction_set == 'generic' +@@ -91,13 +91,14 @@ else + cpu_instruction_set = 'generic' + endif + endif ++ if platform == 'native' ++ if cpu_instruction_set == 'auto' ++ cpu_instruction_set = 'native' ++ endif ++ endif + endif + +-if platform == 'native' +- if cpu_instruction_set == 'auto' +- cpu_instruction_set = 'native' +- endif +-elif platform == 'generic' ++if platform == 'generic' + if cpu_instruction_set == 'auto' + cpu_instruction_set = 'generic' + endif +@@ -122,7 +123,7 @@ if cpu_instruction_set == 'generic' elif host_machine.cpu_family().startswith('ppc') cpu_instruction_set = 'power8' elif host_machine.cpu_family().startswith('riscv') @@ -25438,7 +27937,7 @@ index 6d9ffd4f4b..265aaa995c 100644 endif endif -@@ -139,7 +139,7 @@ endif +@@ -139,13 +140,16 @@ endif toolchain = cc.get_id() dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain) @@ -25447,7 +27946,16 @@ index 6d9ffd4f4b..265aaa995c 100644 dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8) dpdk_conf.set('RTE_ARCH_32', cc.sizeof('void *') == 4) -@@ -191,7 +191,7 @@ if find_libnuma + + if not is_windows + add_project_link_arguments('-Wl,--no-as-needed', language: 'c') ++ if cc.has_link_argument('-Wl,--undefined-version') ++ add_project_link_arguments('-Wl,--undefined-version', language: 'c') ++ endif + endif + + # use pthreads if available for the platform +@@ -191,7 +195,7 @@ if find_libnuma endif has_libfdt = 0 @@ -25456,7 +27964,7 @@ index 6d9ffd4f4b..265aaa995c 100644 if fdt_dep.found() and cc.has_header('fdt.h') dpdk_conf.set10('RTE_HAS_LIBFDT', true) has_libfdt = 1 -@@ -199,11 +199,12 @@ if fdt_dep.found() and cc.has_header('fdt.h') +@@ -199,11 +203,12 @@ if fdt_dep.found() and cc.has_header('fdt.h') dpdk_extra_ldflags += '-lfdt' endif @@ -25471,7 +27979,7 @@ index 6d9ffd4f4b..265aaa995c 100644 libarchive = dependency('libarchive', required: false, method: 'pkg-config') if libarchive.found() -@@ -365,7 +366,7 @@ if max_numa_nodes == 'detect' +@@ -365,7 +370,7 @@ if max_numa_nodes == 'detect' error('Discovery of max_numa_nodes not supported for cross-compilation.') endif # overwrite the default value with discovered values @@ -25492,6 +28000,18 @@ index 3c4876d434..7b8c85e948 100644 #define RTE_MAX_VFIO_CONTAINERS 64 /* bsd module defines */ +diff --git a/dpdk/config/x86/cross-mingw b/dpdk/config/x86/cross-mingw +index cddebda5b5..11597eaa26 100644 +--- a/dpdk/config/x86/cross-mingw ++++ b/dpdk/config/x86/cross-mingw +@@ -5,6 +5,7 @@ ld = 'x86_64-w64-mingw32-ld' + ar = 'x86_64-w64-mingw32-ar' + strip = 'x86_64-w64-mingw32-strip' + pkgconfig = 'x86_64-w64-mingw32-pkg-config' ++pkg-config = 'x86_64-w64-mingw32-pkg-config' + objdump = 'x86_64-w64-mingw32-objdump' + + [host_machine] diff --git a/dpdk/devtools/check-git-log.sh b/dpdk/devtools/check-git-log.sh index 01d8aa0717..2ee7f2db64 100755 --- a/dpdk/devtools/check-git-log.sh @@ -25601,6 +28121,38 @@ index f0886c3bd1..dd8ebab447 100644 HTML_DYNAMIC_SECTIONS = YES HTML_EXTRA_STYLESHEET = @TOPDIR@/doc/api/custom.css SEARCHENGINE = YES +diff --git a/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst b/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst +index 9d71585e9e..950c5dfb5a 100644 +--- a/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst ++++ b/dpdk/doc/guides/bbdevs/fpga_5gnr_fec.rst +@@ -165,7 +165,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure: + uint8_t dl_bandwidth; + uint8_t ul_load_balance; + uint8_t dl_load_balance; +- uint16_t flr_time_out; + }; + + - ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and +@@ -191,10 +190,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure: + If all hardware queues exceeds the watermark, no code blocks will be + streamed in from UL/DL code block FIFO. + +-- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The +- time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for +- the FLR time out then set this setting to 0x262=610. +- + + An example configuration code calling the function ``rte_fpga_5gnr_fec_configure()`` is shown + below: +@@ -219,7 +214,7 @@ below: + /* setup FPGA PF */ + ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf); + TEST_ASSERT_SUCCESS(ret, +- "Failed to configure 4G FPGA PF for bbdev %s", ++ "Failed to configure 5GNR FPGA PF for bbdev %s", + info->dev_name); + + diff --git a/dpdk/doc/guides/conf.py b/dpdk/doc/guides/conf.py index a55ce38800..0f7ff5282d 100644 --- a/dpdk/doc/guides/conf.py @@ -25613,6 +28165,31 @@ index a55ce38800..0f7ff5282d 100644 print('.. table:: ' + table_name + '\n', file=outfile) print_table_header(outfile, num_cols, header_names, title) print_table_body(outfile, num_cols, ini_files, ini_data, default_features) +diff --git a/dpdk/doc/guides/cryptodevs/overview.rst b/dpdk/doc/guides/cryptodevs/overview.rst +index d754b0cfc6..b068d0d19c 100644 +--- a/dpdk/doc/guides/cryptodevs/overview.rst ++++ b/dpdk/doc/guides/cryptodevs/overview.rst +@@ -20,17 +20,17 @@ Supported Feature Flags + - "OOP SGL In SGL Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Scatter-gather list Output", + which means PMD supports different scatter-gather styled input and output buffers +- (i.e. both can consists of multiple segments). ++ (i.e. both can consist of multiple segments). + + - "OOP SGL In LB Out" feature flag stands for + "Out-of-place Scatter-gather list Input, Linear Buffers Output", +- which means PMD supports input from scatter-gathered styled buffers, ++ which means PMD supports input from scatter-gather styled buffers, + outputting linear buffers (i.e. single segment). + + - "OOP LB In SGL Out" feature flag stands for + "Out-of-place Linear Buffers Input, Scatter-gather list Output", + which means PMD supports input from linear buffer, outputting +- scatter-gathered styled buffers. ++ scatter-gather styled buffers. + + - "OOP LB In LB Out" feature flag stands for + "Out-of-place Linear Buffers Input, Linear Buffers Output", diff --git a/dpdk/doc/guides/cryptodevs/qat.rst b/dpdk/doc/guides/cryptodevs/qat.rst index d1e64475c4..b1b893a251 100644 --- a/dpdk/doc/guides/cryptodevs/qat.rst @@ -25629,6 +28206,18 @@ index d1e64475c4..b1b893a251 100644 +-----+-----+-----+-----+----------+---------------+---------------+------------+--------+------+--------+--------+ * Note: Symmetric mixed crypto algorithms feature on Gen 2 works only with IDZ driver version 4.9.0+ +diff --git a/dpdk/doc/guides/dmadevs/hisilicon.rst b/dpdk/doc/guides/dmadevs/hisilicon.rst +index 8c1f0f8886..974bc49376 100644 +--- a/dpdk/doc/guides/dmadevs/hisilicon.rst ++++ b/dpdk/doc/guides/dmadevs/hisilicon.rst +@@ -13,7 +13,6 @@ Supported Kunpeng SoCs + ---------------------- + + * Kunpeng 920 +-* Kunpeng 930 + + + Device Setup diff --git a/dpdk/doc/guides/gpus/cuda.rst b/dpdk/doc/guides/gpus/cuda.rst index 114e3bc8cb..6520c17c3e 100644 --- a/dpdk/doc/guides/gpus/cuda.rst @@ -25703,7 +28292,7 @@ index 114e3bc8cb..6520c17c3e 100644 l2fwd-nv is not intended to be used for performance (testpmd is the good candidate for this). diff --git a/dpdk/doc/guides/linux_gsg/enable_func.rst b/dpdk/doc/guides/linux_gsg/enable_func.rst -index 829084d80e..2344d97403 100644 +index 829084d80e..5511640cb8 100644 --- a/dpdk/doc/guides/linux_gsg/enable_func.rst +++ b/dpdk/doc/guides/linux_gsg/enable_func.rst @@ -55,12 +55,12 @@ Refer to the `documentation `_ +-section to learn how these limits affect EAL. ++See :ref:`Hugepage Mapping ` section to learn how these limits affect EAL. + + Device Control + ~~~~~~~~~~~~~~ diff --git a/dpdk/doc/guides/linux_gsg/sys_reqs.rst b/dpdk/doc/guides/linux_gsg/sys_reqs.rst index a7e8261e22..dfeaf4e1c5 100644 --- a/dpdk/doc/guides/linux_gsg/sys_reqs.rst @@ -25766,23 +28365,388 @@ index 293eab8787..871d14142c 100644 Listed below are the rte_flow actions supported: +diff --git a/dpdk/doc/guides/nics/features.rst b/dpdk/doc/guides/nics/features.rst +index 1a1dc16c1e..4332ff0a31 100644 +--- a/dpdk/doc/guides/nics/features.rst ++++ b/dpdk/doc/guides/nics/features.rst +@@ -34,6 +34,17 @@ Supports getting the speed capabilities that the current device is capable of. + * **[related] API**: ``rte_eth_dev_info_get()``. + + ++.. _nic_features_link_speeds_config: ++ ++Link speed configuration ++------------------------ ++ ++Supports configurating fixed speed and link autonegotiation. ++ ++* **[uses] user config**: ``dev_conf.link_speeds:RTE_ETH_LINK_SPEED_*``. ++* **[related] API**: ``rte_eth_dev_configure()``. ++ ++ + .. _nic_features_link_status: + + Link status +@@ -740,6 +751,19 @@ Supports congestion management. + ``rte_eth_cman_config_set()``, ``rte_eth_cman_config_get()``. + + ++.. _nic_features_traffic_manager: ++ ++Traffic manager ++--------------- ++ ++Supports Traffic manager. ++ ++* **[implements] rte_tm_ops**: ``capabilities_get``, ``shaper_profile_add``, ++ ``hierarchy_commit`` and so on. ++* **[related] API**: ``rte_tm_capabilities_get()``, ``rte_tm_shaper_profile_add()``, ++ ``rte_tm_hierarchy_commit()`` and so on. ++ ++ + .. _nic_features_fw_version: + + FW version +diff --git a/dpdk/doc/guides/nics/features/atlantic.ini b/dpdk/doc/guides/nics/features/atlantic.ini +index ef4155027c..29969c1493 100644 +--- a/dpdk/doc/guides/nics/features/atlantic.ini ++++ b/dpdk/doc/guides/nics/features/atlantic.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Queue start/stop = Y +diff --git a/dpdk/doc/guides/nics/features/bnxt.ini b/dpdk/doc/guides/nics/features/bnxt.ini +index 50a0b5bfa6..e7bb800f08 100644 +--- a/dpdk/doc/guides/nics/features/bnxt.ini ++++ b/dpdk/doc/guides/nics/features/bnxt.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +diff --git a/dpdk/doc/guides/nics/features/cnxk.ini b/dpdk/doc/guides/nics/features/cnxk.ini +index f81628da77..f0ead29134 100644 +--- a/dpdk/doc/guides/nics/features/cnxk.ini ++++ b/dpdk/doc/guides/nics/features/cnxk.ini +@@ -28,6 +28,7 @@ RSS key update = Y + RSS reta update = Y + Inner RSS = Y + Congestion management = Y ++Traffic manager = Y + Inline protocol = Y + Flow control = Y + Scattered Rx = Y +diff --git a/dpdk/doc/guides/nics/features/default.ini b/dpdk/doc/guides/nics/features/default.ini +index 510cc6679d..013c360ddf 100644 +--- a/dpdk/doc/guides/nics/features/default.ini ++++ b/dpdk/doc/guides/nics/features/default.ini +@@ -8,6 +8,7 @@ + ; + [Features] + Speed capabilities = ++Link speed configuration = + Link status = + Link status event = + Removal event = +@@ -42,6 +43,7 @@ VLAN filter = + Flow control = + Rate limitation = + Congestion management = ++Traffic manager = + Inline crypto = + Inline protocol = + CRC offload = +diff --git a/dpdk/doc/guides/nics/features/dpaa.ini b/dpdk/doc/guides/nics/features/dpaa.ini +index a382c7160c..b136ed191a 100644 +--- a/dpdk/doc/guides/nics/features/dpaa.ini ++++ b/dpdk/doc/guides/nics/features/dpaa.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Burst mode info = Y +diff --git a/dpdk/doc/guides/nics/features/dpaa2.ini b/dpdk/doc/guides/nics/features/dpaa2.ini +index 26dc8c2178..f02da463d9 100644 +--- a/dpdk/doc/guides/nics/features/dpaa2.ini ++++ b/dpdk/doc/guides/nics/features/dpaa2.ini +@@ -17,6 +17,7 @@ Unicast MAC filter = Y + RSS hash = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + VLAN offload = Y + L3 checksum offload = Y + L4 checksum offload = Y +diff --git a/dpdk/doc/guides/nics/features/hns3.ini b/dpdk/doc/guides/nics/features/hns3.ini +index 338b4e6864..8b623d3077 100644 +--- a/dpdk/doc/guides/nics/features/hns3.ini ++++ b/dpdk/doc/guides/nics/features/hns3.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -28,6 +29,7 @@ RSS reta update = Y + DCB = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + FEC = Y +diff --git a/dpdk/doc/guides/nics/features/i40e.ini b/dpdk/doc/guides/nics/features/i40e.ini +index e241dad047..ef7514c44b 100644 +--- a/dpdk/doc/guides/nics/features/i40e.ini ++++ b/dpdk/doc/guides/nics/features/i40e.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -27,6 +28,7 @@ SR-IOV = Y + DCB = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + QinQ offload = P diff --git a/dpdk/doc/guides/nics/features/iavf.ini b/dpdk/doc/guides/nics/features/iavf.ini -index 9db2865b71..5cdf0ddee6 100644 +index 9db2865b71..c7f8ace499 100644 --- a/dpdk/doc/guides/nics/features/iavf.ini +++ b/dpdk/doc/guides/nics/features/iavf.ini -@@ -21,7 +21,7 @@ RSS key update = Y +@@ -20,8 +20,10 @@ RSS hash = Y + RSS key update = Y RSS reta update = Y VLAN filter = Y ++Traffic manager = Y ++Inline crypto = Y CRC offload = Y -VLAN offload = Y +VLAN offload = P L3 checksum offload = P L4 checksum offload = P Timestamp offload = P +@@ -30,7 +32,6 @@ Inner L4 checksum = P + Packet type parsing = Y + Rx descriptor status = Y + Tx descriptor status = Y +-Inline crypto = Y + Basic stats = Y + Multiprocess aware = Y + FreeBSD = Y +diff --git a/dpdk/doc/guides/nics/features/ice.ini b/dpdk/doc/guides/nics/features/ice.ini +index 13f8871dcc..62869ef0a0 100644 +--- a/dpdk/doc/guides/nics/features/ice.ini ++++ b/dpdk/doc/guides/nics/features/ice.ini +@@ -8,6 +8,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -26,6 +27,7 @@ RSS hash = Y + RSS key update = Y + RSS reta update = Y + VLAN filter = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + QinQ offload = P +diff --git a/dpdk/doc/guides/nics/features/ice_dcf.ini b/dpdk/doc/guides/nics/features/ice_dcf.ini +index 3b11622d4c..0e86338990 100644 +--- a/dpdk/doc/guides/nics/features/ice_dcf.ini ++++ b/dpdk/doc/guides/nics/features/ice_dcf.ini +@@ -22,6 +22,7 @@ Promiscuous mode = Y + Allmulticast mode = Y + Unicast MAC filter = Y + VLAN filter = Y ++Traffic manager = Y + VLAN offload = Y + Extended stats = Y + Basic stats = Y +diff --git a/dpdk/doc/guides/nics/features/igb.ini b/dpdk/doc/guides/nics/features/igb.ini +index 7b4af6f86c..ee2408f3ee 100644 +--- a/dpdk/doc/guides/nics/features/igb.ini ++++ b/dpdk/doc/guides/nics/features/igb.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = P ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +diff --git a/dpdk/doc/guides/nics/features/igc.ini b/dpdk/doc/guides/nics/features/igc.ini +index b5deea3f61..a43b8eaefd 100644 +--- a/dpdk/doc/guides/nics/features/igc.ini ++++ b/dpdk/doc/guides/nics/features/igc.ini +@@ -4,6 +4,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + FW version = Y +diff --git a/dpdk/doc/guides/nics/features/ionic.ini b/dpdk/doc/guides/nics/features/ionic.ini +index af0fc5462a..64b2316288 100644 +--- a/dpdk/doc/guides/nics/features/ionic.ini ++++ b/dpdk/doc/guides/nics/features/ionic.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Fast mbuf free = Y +diff --git a/dpdk/doc/guides/nics/features/ipn3ke.ini b/dpdk/doc/guides/nics/features/ipn3ke.ini +index 1f6b780273..e412978820 100644 +--- a/dpdk/doc/guides/nics/features/ipn3ke.ini ++++ b/dpdk/doc/guides/nics/features/ipn3ke.ini +@@ -25,6 +25,7 @@ SR-IOV = Y + DCB = Y + VLAN filter = Y + Flow control = Y ++Traffic manager = Y + CRC offload = Y + VLAN offload = Y + QinQ offload = Y +diff --git a/dpdk/doc/guides/nics/features/ixgbe.ini b/dpdk/doc/guides/nics/features/ixgbe.ini +index 8590ac857f..cb9331dbcd 100644 +--- a/dpdk/doc/guides/nics/features/ixgbe.ini ++++ b/dpdk/doc/guides/nics/features/ixgbe.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -27,6 +28,7 @@ DCB = Y + VLAN filter = Y + Flow control = Y + Rate limitation = Y ++Traffic manager = Y + Inline crypto = Y + CRC offload = P + VLAN offload = P +diff --git a/dpdk/doc/guides/nics/features/mvpp2.ini b/dpdk/doc/guides/nics/features/mvpp2.ini +index 653c9d08cb..ccc2c2d4f8 100644 +--- a/dpdk/doc/guides/nics/features/mvpp2.ini ++++ b/dpdk/doc/guides/nics/features/mvpp2.ini +@@ -12,8 +12,9 @@ Allmulticast mode = Y + Unicast MAC filter = Y + Multicast MAC filter = Y + RSS hash = Y +-Flow control = Y + VLAN filter = Y ++Flow control = Y ++Traffic manager = Y + CRC offload = Y + L3 checksum offload = Y + L4 checksum offload = Y +diff --git a/dpdk/doc/guides/nics/features/ngbe.ini b/dpdk/doc/guides/nics/features/ngbe.ini +index 2701c5f051..1dfd92e96b 100644 +--- a/dpdk/doc/guides/nics/features/ngbe.ini ++++ b/dpdk/doc/guides/nics/features/ngbe.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Free Tx mbuf on demand = Y +diff --git a/dpdk/doc/guides/nics/features/octeontx.ini b/dpdk/doc/guides/nics/features/octeontx.ini +index fa1e18b120..46ae8318a9 100644 +--- a/dpdk/doc/guides/nics/features/octeontx.ini ++++ b/dpdk/doc/guides/nics/features/octeontx.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Lock-free Tx queue = Y +diff --git a/dpdk/doc/guides/nics/features/sfc.ini b/dpdk/doc/guides/nics/features/sfc.ini +index f5ac644278..1cb294384a 100644 +--- a/dpdk/doc/guides/nics/features/sfc.ini ++++ b/dpdk/doc/guides/nics/features/sfc.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +diff --git a/dpdk/doc/guides/nics/features/thunderx.ini b/dpdk/doc/guides/nics/features/thunderx.ini +index b33bb37c82..2ab8db7239 100644 +--- a/dpdk/doc/guides/nics/features/thunderx.ini ++++ b/dpdk/doc/guides/nics/features/thunderx.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Queue start/stop = Y +diff --git a/dpdk/doc/guides/nics/features/txgbe.ini b/dpdk/doc/guides/nics/features/txgbe.ini +index e21083052c..be0af3dfad 100644 +--- a/dpdk/doc/guides/nics/features/txgbe.ini ++++ b/dpdk/doc/guides/nics/features/txgbe.ini +@@ -5,6 +5,7 @@ + ; + [Features] + Speed capabilities = Y ++Link speed configuration = Y + Link status = Y + Link status event = Y + Rx interrupt = Y +@@ -26,6 +27,7 @@ DCB = Y + VLAN filter = Y + Flow control = Y + Rate limitation = Y ++Traffic manager = Y + Inline crypto = Y + CRC offload = P + VLAN offload = P diff --git a/dpdk/doc/guides/nics/hns3.rst b/dpdk/doc/guides/nics/hns3.rst -index 791c9cc2ed..380024600b 100644 +index 791c9cc2ed..836a91f86e 100644 --- a/dpdk/doc/guides/nics/hns3.rst +++ b/dpdk/doc/guides/nics/hns3.rst +@@ -6,7 +6,7 @@ HNS3 Poll Mode Driver + + The hns3 PMD (**librte_net_hns3**) provides poll mode driver support + for the inbuilt HiSilicon Network Subsystem(HNS) network engine +-found in the HiSilicon Kunpeng 920 SoC and Kunpeng 930 SoC . ++found in the HiSilicon Kunpeng 920 SoC (HIP08) and Kunpeng 930 SoC (HIP09/HIP10). + + Features + -------- @@ -30,7 +30,6 @@ Features of the HNS3 PMD are: - DCB - Scattered and gather for TX and RX @@ -26081,7 +29045,7 @@ index 005c0b2ca7..341146c4e7 100644 ------------------------------ diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst -index 51f51259e3..239e297d2a 100644 +index 51f51259e3..b047d7db58 100644 --- a/dpdk/doc/guides/nics/mlx5.rst +++ b/dpdk/doc/guides/nics/mlx5.rst @@ -455,8 +455,12 @@ Limitations @@ -26099,6 +29063,17 @@ index 51f51259e3..239e297d2a 100644 - For ConnectX-5 trusted device, the application metadata with SET_TAG index 0 is not supported before ``RTE_FLOW_ACTION_TYPE_SAMPLE`` action. +@@ -557,8 +561,8 @@ Limitations + + - Cannot co-exist with ASO meter, ASO age action in a single flow rule. + - Flow rules insertion rate and memory consumption need more optimization. +- - 256 ports maximum. +- - 4M connections maximum with ``dv_flow_en`` 1 mode. 16M with ``dv_flow_en`` 2. ++ - 16 ports maximum. ++ - 32M connections maximum. + + - Multi-thread flow insertion: + @@ -1121,6 +1125,9 @@ for an additional list of options shared with other mlx5 drivers. - 0. If representor matching is disabled, then there will be no implicit item added. As a result, ingress flow rules will match traffic @@ -26237,6 +29212,19 @@ index aadd60b5d4..dca4e789b3 100644 2. Boot OCTEON CN9K/CN10K with debugfs supported kernel. 3. Verify ``debugfs`` mounted by default "mount | grep -i debugfs" or mount it manually by using. +diff --git a/dpdk/doc/guides/platform/mlx5.rst b/dpdk/doc/guides/platform/mlx5.rst +index 3cc1dd29e2..a8dcba9683 100644 +--- a/dpdk/doc/guides/platform/mlx5.rst ++++ b/dpdk/doc/guides/platform/mlx5.rst +@@ -228,7 +228,7 @@ DevX SDK Installation + The DevX SDK must be installed on the machine building the Windows PMD. + Additional information can be found at + `How to Integrate Windows DevX in Your Development Environment +-`_. ++`_. + The minimal supported WinOF2 version is 2.60. + + diff --git a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst b/dpdk/doc/guides/prog_guide/cryptodev_lib.rst index 01aad842a9..2b513bbf82 100644 --- a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst @@ -26927,6 +29915,103 @@ index 9b522458c8..0000000000 - d="m 32.13263,137.96494 1.19624,93.60569 156.25849,0.0883" - style="fill:none;stroke:#41719c;stroke-width:0.56864393px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#marker5421)" /> \ No newline at end of file +diff --git a/dpdk/doc/guides/prog_guide/img/mbuf1.svg b/dpdk/doc/guides/prog_guide/img/mbuf1.svg +index a08bf3b6c0..111a874c00 100644 +--- a/dpdk/doc/guides/prog_guide/img/mbuf1.svg ++++ b/dpdk/doc/guides/prog_guide/img/mbuf1.svg +@@ -487,7 +487,7 @@ + sodipodi:role="line" + id="tspan5256" + x="59.842155" +- y="282.37683">m->pkt.next = NULL ++ y="282.37683">m->next = NULL + m->pkt.next = NULL ++ y="628.45935">m->next = NULL + m->pkt.next = mseg3 ++ y="628.45935">m->next = mseg3 + m->pkt.next = mseg2 ++ y="628.45935">m->next = mseg2 + l2_len = len(out_eth) + mb->l3_len = len(out_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM + set out_ip checksum to 0 in the packet + + This is supported on hardware advertising RTE_ETH_TX_OFFLOAD_IPV4_CKSUM. +@@ -143,7 +143,7 @@ a vxlan-encapsulated tcp packet: + + mb->l2_len = len(out_eth) + mb->l3_len = len(out_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_UDP_CKSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM + set out_ip checksum to 0 in the packet + set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum() + +@@ -154,7 +154,7 @@ a vxlan-encapsulated tcp packet: + + mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth) + mb->l3_len = len(in_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM + set in_ip checksum to 0 in the packet + + This is similar to case 1), but l2_len is different. It is supported +@@ -165,7 +165,7 @@ a vxlan-encapsulated tcp packet: + + mb->l2_len = len(out_eth + out_ip + out_udp + vxlan + in_eth) + mb->l3_len = len(in_ip) +- mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CSUM | RTE_MBUF_F_TX_TCP_CKSUM ++ mb->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM + set in_ip checksum to 0 in the packet + set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum() + diff --git a/dpdk/doc/guides/prog_guide/multi_proc_support.rst b/dpdk/doc/guides/prog_guide/multi_proc_support.rst index 815e8bdc43..df234548a7 100644 --- a/dpdk/doc/guides/prog_guide/multi_proc_support.rst @@ -26956,6 +30041,32 @@ index 815e8bdc43..df234548a7 100644 The rte part of the filenames of each of the above is configurable using the file-prefix parameter. In addition to specifying the file-prefix parameter, +diff --git a/dpdk/doc/guides/prog_guide/packet_framework.rst b/dpdk/doc/guides/prog_guide/packet_framework.rst +index 3d4e3b66cc..b263f23f17 100644 +--- a/dpdk/doc/guides/prog_guide/packet_framework.rst ++++ b/dpdk/doc/guides/prog_guide/packet_framework.rst +@@ -512,7 +512,7 @@ the number of L2 or L3 cache memory misses is greatly reduced, hence one of the + This is because the cost of L2/L3 cache memory miss on memory read accesses is high, as usually due to data dependency between instructions, + the CPU execution units have to stall until the read operation is completed from L3 cache memory or external DRAM memory. + By using prefetch instructions, the latency of memory read accesses is hidden, +-provided that it is preformed early enough before the respective data structure is actually used. ++provided that it is performed early enough before the respective data structure is actually used. + + By splitting the processing into several stages that are executed on different packets (the packets from the input burst are interlaced), + enough work is created to allow the prefetch instructions to complete successfully (before the prefetched data structures are actually accessed) and +diff --git a/dpdk/doc/guides/prog_guide/profile_app.rst b/dpdk/doc/guides/prog_guide/profile_app.rst +index 14292d4c25..a6b5fb4d5e 100644 +--- a/dpdk/doc/guides/prog_guide/profile_app.rst ++++ b/dpdk/doc/guides/prog_guide/profile_app.rst +@@ -59,7 +59,7 @@ addition to the standard events, ``perf`` can be used to profile arm64 + specific PMU (Performance Monitor Unit) events through raw events (``-e`` + ``-rXX``). + +-For more derails refer to the ++For more details refer to the + `ARM64 specific PMU events enumeration `_. + + diff --git a/dpdk/doc/guides/prog_guide/rte_flow.rst b/dpdk/doc/guides/prog_guide/rte_flow.rst index 3e6242803d..d0b7833a2f 100644 --- a/dpdk/doc/guides/prog_guide/rte_flow.rst @@ -27226,10 +30337,10 @@ index 2bb115d13f..f8befc6594 100644 +- 3rd Generation Intel® Xeon® Scalable Processors. +- 2nd Generation Intel® Xeon® Scalable Processors. diff --git a/dpdk/doc/guides/rel_notes/release_22_11.rst b/dpdk/doc/guides/rel_notes/release_22_11.rst -index 26e0560725..7ce5c436c0 100644 +index 26e0560725..db8d9405d2 100644 --- a/dpdk/doc/guides/rel_notes/release_22_11.rst +++ b/dpdk/doc/guides/rel_notes/release_22_11.rst -@@ -805,3 +805,1094 @@ Tested Platforms +@@ -805,3 +805,1738 @@ Tested Platforms ~~~~~~~~~~~~~ * drivers: fix symbol exports when map is omitted @@ -28324,6 +31435,650 @@ index 26e0560725..7ce5c436c0 100644 +~~~~~~~~~~~~~~~~~~~~ + + ++ ++22.11.5 Release Notes ++--------------------- ++ ++ ++22.11.5 Fixes ++~~~~~~~~~~~~~ ++ ++* app/crypto-perf: add missing op resubmission ++* app/crypto-perf: fix copy segment size ++* app/crypto-perf: fix data comparison ++* app/crypto-perf: fix encrypt operation verification ++* app/crypto-perf: fix next segment mbuf ++* app/crypto-perf: fix out-of-place mbuf size ++* app/crypto-perf: verify strdup return ++* app/dumpcap: verify strdup return ++* app/pdump: verify strdup return ++* app/testpmd: fix async flow create failure handling ++* app/testpmd: fix burst option parsing ++* app/testpmd: fix crash in multi-process forwarding ++* app/testpmd: fix error message for invalid option ++* app/testpmd: fix GRO packets flush on timeout ++* app/testpmd: fix --stats-period option check ++* app/testpmd: hide --bitrate-stats in help if disabled ++* app/testpmd: return if no packets in GRO heavy weight mode ++* app/testpmd: verify strdup return ++* baseband/acc: fix common logs ++* baseband/acc: fix logtypes register ++* baseband/fpga_5gnr_fec: use a better random generator ++* build: fix linker warnings about undefined symbols ++* build: fix reasons conflict ++* build: link static libs with whole-archive in subproject ++* build: pass cflags in subproject ++* bus/dpaa: verify strdup return ++* bus/fslmc: verify strdup return ++* bus/ifpga: remove dead code ++* bus/vdev: fix devargs in secondary process ++* bus/vdev: verify strdup return ++* ci: update versions of actions in GHA ++* common/cnxk: fix link config for SDP ++* common/cnxk: fix mbox region copy ++* common/cnxk: fix mbox struct attributes ++* common/cnxk: fix memory leak in CPT init ++* common/cnxk: fix possible out-of-bounds access ++* common/cnxk: fix RSS RETA configuration ++* common/cnxk: fix Tx MTU configuration ++* common/cnxk: fix VLAN check for inner header ++* common/cnxk: remove CN9K inline IPsec FP opcodes ++* common/cnxk: remove dead code ++* common/mlx5: fix calloc parameters ++* common/mlx5: fix duplicate read of general capabilities ++* common/sfc_efx/base: use C11 static assert ++* config: fix CPU instruction set for cross-build ++* cryptodev: remove unused extern variable ++* crypto/ipsec_mb: fix incorrectly setting cipher keys ++* crypto/qat: fix crash with CCM null AAD pointer ++* dmadev: fix calloc parameters ++* dma/dpaa2: fix logtype register ++* dma/idxd: verify strdup return ++* doc: add --latencystats option in testpmd guide ++* doc: add link speeds configuration in features table ++* doc: add traffic manager in features table ++* doc: fix commands in eventdev test tool guide ++* doc: fix configuration in baseband 5GNR driver guide ++* doc: fix default IP fragments maximum in programmer guide ++* doc: fix typo in packet framework guide ++* doc: fix typo in profiling guide ++* doc: fix typos in cryptodev overview ++* doc: update link to Windows DevX in mlx5 guide ++* drivers/net: fix buffer overflow for packet types list ++* eal: verify strdup return ++* eal/x86: add AMD vendor check for TSC calibration ++* ethdev: fix NVGRE encap flow action description ++* event/cnxk: fix dequeue timeout configuration ++* event/cnxk: verify strdup return ++* eventdev/crypto: fix enqueueing ++* eventdev: fix calloc parameters ++* eventdev: fix Doxygen processing of vector struct ++* eventdev: improve Doxygen comments on configure struct ++* event/dlb2: remove superfluous memcpy ++* event/opdl: fix compile-time check ++* examples/ipsec-secgw: fix cryptodev to SA mapping ++* examples/ipsec-secgw: fix Rx queue ID in Rx callback ++* examples/ipsec-secgw: fix typo in error message ++* examples/ipsec-secgw: fix width of variables ++* examples/l3fwd: fix Rx over not ready port ++* examples/packet_ordering: fix Rx with reorder mode disabled ++* examples/qos_sched: fix memory leak in args parsing ++* examples/vhost: verify strdup return ++* hash: remove some dead code ++* kernel/freebsd: fix module build on FreeBSD 14 ++* lib: add newline in logs ++* lib: remove redundant newline from logs ++* lib: use dedicated logtypes and macros ++* net: add macros for VLAN metadata parsing ++* net/af_xdp: fix leak on XSK configuration failure ++* net/af_xdp: fix memzone leak on config failure ++* net/bnx2x: fix calloc parameters ++* net/bnx2x: fix warnings about memcpy lengths ++* net/bnxt: fix 50G and 100G forced speed ++* net/bnxt: fix array overflow ++* net/bnxt: fix backward firmware compatibility ++* net/bnxt: fix deadlock in ULP timer callback ++* net/bnxt: fix null pointer dereference ++* net/bnxt: fix number of Tx queues being created ++* net/bnxt: fix speed change from 200G to 25G on Thor ++* net/bnxt: modify locking for representor Tx ++* net/bonding: fix flow count query ++* net/cnxk: add cookies check for multi-segment offload ++* net/cnxk: fix buffer size configuration ++* net/cnxk: fix flow RSS configuration ++* net/cnxk: fix mbuf fields in multi-segment Tx ++* net/cnxk: fix MTU limit ++* net/ena/base: limit exponential backoff ++* net/ena/base: restructure interrupt handling ++* net/ena: fix fast mbuf free ++* net/ena: fix mbuf double free in fast free mode ++* net/failsafe: fix memory leak in args parsing ++* net: fix TCP/UDP checksum with padding data ++* net/hns3: enable PFC for all user priorities ++* net/hns3: fix disable command with firmware ++* net/hns3: fix reset level comparison ++* net/hns3: fix VF multiple count on one reset ++* net/hns3: refactor handle mailbox function ++* net/hns3: refactor PF mailbox message struct ++* net/hns3: refactor send mailbox function ++* net/hns3: refactor VF mailbox message struct ++* net/hns3: remove QinQ insert support for VF ++* net/hns3: support new device ++* net/i40e: remove incorrect 16B descriptor read block ++* net/i40e: remove redundant judgment in flow parsing ++* net/iavf: fix memory leak on security context error ++* net/iavf: remove error logs for VLAN offloading ++* net/iavf: remove incorrect 16B descriptor read block ++* net/ice: fix link update ++* net/ice: fix memory leaks ++* net/ice: fix tunnel TSO capabilities ++* net/ice: fix version for experimental symbols ++* net/ice: remove incorrect 16B descriptor read block ++* net/ionic: fix device close ++* net/ionic: fix missing volatile type for cqe pointers ++* net/ionic: fix RSS query ++* net/ixgbe: fix memoy leak after device init failure ++* net/ixgbe: increase VF reset timeout ++* net/ixgbevf: fix RSS init for x550 NICs ++* net/mana: fix memory leak on MR allocation ++* net/mana: handle MR cache expansion failure ++* net/mana: prevent values overflow returned from RDMA layer ++* net/memif: fix extra mbuf refcnt update in zero copy Tx ++* net/mlx5: fix age position in hairpin split ++* net/mlx5: fix async flow create error handling ++* net/mlx5: fix condition of LACP miss flow ++* net/mlx5: fix connection tracking action validation ++* net/mlx5: fix conntrack action handle representation ++* net/mlx5: fix counters map in bonding mode ++* net/mlx5: fix DR context release ordering ++* net/mlx5: fix drop action release timing ++* net/mlx5: fix error packets drop in regular Rx ++* net/mlx5: fix flow configure validation ++* net/mlx5: fix flow counter cache starvation ++* net/mlx5: fix GENEVE option item translation ++* net/mlx5: fix GENEVE TLV option management ++* net/mlx5: fix HWS meter actions availability ++* net/mlx5: fix incorrect counter cache dereference ++* net/mlx5: fix IP-in-IP tunnels recognition ++* net/mlx5: fix jump action validation ++* net/mlx5: fix meter policy priority ++* net/mlx5: fix rollback on failed flow configure ++* net/mlx5: fix stats query crash in secondary process ++* net/mlx5: fix template clean up of FDB control flow rule ++* net/mlx5: fix use after free when releasing Tx queues ++* net/mlx5: fix VLAN handling in meter split ++* net/mlx5: fix VLAN ID in flow modify ++* net/mlx5: fix warning about copy length ++* net/mlx5/hws: check not supported fields in VXLAN ++* net/mlx5/hws: enable multiple integrity items ++* net/mlx5/hws: fix port ID for root table ++* net/mlx5/hws: fix tunnel protocol checks ++* net/mlx5/hws: fix VLAN inner type ++* net/mlx5/hws: fix VLAN item in non-relaxed mode ++* net/mlx5: prevent ioctl failure log flooding ++* net/mlx5: prevent querying aged flows on uninit port ++* net/mlx5: remove device status check in flow creation ++* net/mlx5: remove duplication of L3 flow item validation ++* net/mlx5: remove GENEVE options length limitation ++* net/netvsc: fix VLAN metadata parsing ++* net/nfp: fix calloc parameters ++* net/nfp: fix device close ++* net/nfp: fix device resource freeing ++* net/nfp: fix resource leak for CoreNIC firmware ++* net/nfp: fix resource leak for exit of CoreNIC firmware ++* net/nfp: fix resource leak for exit of flower firmware ++* net/nfp: fix resource leak for flower firmware ++* net/nfp: fix resource leak for PF initialization ++* net/nfp: fix switch domain free check ++* net/nfp: free switch domain ID on close ++* net/tap: do not overwrite flow API errors ++* net/tap: fix traffic control handle calculation ++* net/tap: log Netlink extended ack unavailability ++* net/thunderx: fix DMAC control register update ++* net/virtio: remove duplicate queue xstats ++* net/vmxnet3: fix initialization on FreeBSD ++* net/vmxnet3: ignore Rx queue interrupt setup on FreeBSD ++* pipeline: fix calloc parameters ++* rawdev: fix calloc parameters ++* regexdev: fix logtype register ++* Revert "build: add libarchive to optional external dependencies" ++* telemetry: fix connected clients count ++* telemetry: fix empty JSON dictionaries ++* test/bpf: fix mbuf init in some filter test ++* test/cfgfile: fix typo in error messages ++* test: do not count skipped tests as executed ++* test/event: fix crash in Tx adapter freeing ++* test/event: skip test if no driver is present ++* test: fix probing in secondary process ++* test/mbuf: fix external mbuf case with assert enabled ++* test/power: fix typo in error message ++* test: verify strdup return ++* vdpa/mlx5: fix queue enable drain CQ ++* version: 22.11.5-rc1 ++* vhost: fix deadlock during vDPA SW live migration ++* vhost: fix memory leak in Virtio Tx split path ++* vhost: fix virtqueue access check in vhost-user setup ++ ++22.11.5 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* Red Hat(R) Testing ++ ++ * Platform ++ ++ * RHEL 9 ++ * Kernel 5.14 ++ * Qemu 8.2.0 ++ * libvirt 10.0.0 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP reconnect with dpdk-client, qemu-server: PASS ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* Intel(R) Testing ++ ++ * Basic Intel(R) NIC testing ++ * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu20.04, Ubuntu22.04, Fedora38, RHEL8.7, RHEL9.2, FreeBSD13.1, SUSE15, Centos7.9, openEuler22.03-SP1,OpenAnolis8.8 etc. ++ * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * PPF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. ++ * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. ++ * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. ++ ++ * Basic cryptodev and virtio testing ++ * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc. ++ * Cryptodev: ++ * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. ++ * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc. ++ ++ ++* Nvidia(R) Testing ++ ++ * Basic functionality via testpmd/example applications ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow and flow_director ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Buffer Split tests ++ * Tx scheduling tests ++ ++ * Build tests ++ ++ * Ubuntu 20.04.6 with MLNX_OFED_LINUX-24.01-0.3.3.1. ++ * Ubuntu 20.04.6 with rdma-core master (4b08a22). ++ * Ubuntu 20.04.6 with rdma-core v28.0. ++ * Fedora 38 with rdma-core v44.0. ++ * Fedora 40 (Rawhide) with rdma-core v48.0. ++ * OpenSUSE Leap 15.5 with rdma-core v42.0. ++ * Windows Server 2019 with Clang 16.0.6. ++ ++ * BlueField-2 ++ ++ * DOCA 2.6.0 ++ * fw 24.40.1000 ++ ++ * ConnectX-7 ++ ++ * Ubuntu 20.04 ++ * Driver MLNX_OFED_LINUX-24.01-0.3.3.1 ++ * fw 28.40.1000 ++ ++ * ConnectX-6 Dx ++ ++ * Ubuntu 20.04 ++ * Driver MLNX_OFED_LINUX-24.01-0.3.3.1 ++ * fw 22.40.1000 ++ ++22.11.6 Release Notes ++--------------------- ++ ++ ++22.11.6 Fixes ++~~~~~~~~~~~~~ ++ ++* app/bbdev: fix interrupt tests ++* app/crypto-perf: fix result for asymmetric ++* app/crypto-perf: remove redundant local variable ++* app/dumpcap: handle SIGTERM and SIGHUP ++* app/pdump: handle SIGTERM and SIGHUP ++* app/testpmd: add postpone option to async flow destroy ++* app/testpmd: fix build on signed comparison ++* app/testpmd: fix help string of BPF load command ++* app/testpmd: fix lcore ID restriction ++* app/testpmd: fix outer IP checksum offload ++* app/testpmd: fix parsing for connection tracking item ++* app/testpmd: handle IEEE1588 init failure ++* baseband/acc: fix memory barrier ++* baseband/la12xx: forbid secondary process ++* bpf: disable on 32-bit x86 ++* bpf: fix load hangs with six IPv6 addresses ++* bpf: fix MOV instruction evaluation ++* buildtools: fix build with clang 17 and ASan ++* build: use builtin helper for python dependencies ++* bus/dpaa: fix bus scan for DMA devices ++* bus/dpaa: fix memory leak in bus scan ++* bus/dpaa: remove redundant file descriptor check ++* bus/pci: fix build with musl 1.2.4 / Alpine 3.19 ++* bus/pci: fix FD in secondary process ++* bus/pci: fix UIO resource mapping in secondary process ++* bus/vdev: fix device reinitialization ++* bus/vdev: revert fix devargs in secondary process ++* common/dpaax/caamflib: fix PDCP AES-AES watchdog error ++* common/dpaax/caamflib: fix PDCP-SDAP watchdog error ++* common/dpaax: fix IOVA table cleanup ++* common/dpaax: fix node array overrun ++* common/idpf: fix flex descriptor mask ++* common/mlx5: fix PRM structs ++* common/mlx5: fix unsigned/signed mismatch ++* common/mlx5: remove unneeded field when modify RQ table ++* config: fix warning for cross build with meson >= 1.3.0 ++* crypto/cnxk: fix minimal input normalization ++* cryptodev: fix build without crypto callbacks ++* cryptodev: validate crypto callbacks from next node ++* crypto/dpaa2_sec: fix event queue user context ++* crypto/dpaa_sec: fix IPsec descriptor ++* crypto/ipsec_mb: fix function comment ++* crypto/openssl: fix GCM and CCM thread unsafe contexts ++* crypto/openssl: make per-QP auth context clones ++* crypto/openssl: make per-QP cipher context clones ++* crypto/openssl: optimize 3DES-CTR context init ++* crypto/openssl: set cipher padding once ++* crypto/qat: fix GEN4 write ++* crypto/qat: fix log message typo ++* crypto/qat: fix placement of OOP offset ++* dmadev: fix structure alignment ++* dma/hisilicon: remove support for HIP09 platform ++* dma/idxd: fix setup with Ubuntu 24.04 ++* doc: add baseline mode in l3fwd-power guide ++* doc: fix link to hugepage mapping from Linux guide ++* doc: fix mbuf flags ++* doc: fix testpmd ring size command ++* doc: fix typo in l2fwd-crypto guide ++* doc: remove empty section from testpmd guide ++* doc: remove reference to mbuf pkt field ++* eal: fix logs for '--lcores' ++* eal/linux: lower log level on allocation attempt failure ++* eal/unix: support ZSTD compression for firmware ++* eal/windows: install sched.h file ++* ethdev: fix device init without socket-local memory ++* ethdev: fix GENEVE option item conversion ++* eventdev/crypto: fix opaque field handling ++* event/sw: fix warning from useless snprintf ++* examples/fips_validation: fix dereference and out-of-bound ++* examples: fix lcore ID restriction ++* examples: fix port ID restriction ++* examples: fix queue ID restriction ++* examples/ipsec-secgw: fix SA salt endianness ++* examples/ipsec-secgw: revert SA salt endianness ++* examples/l3fwd: fix crash in ACL mode for mixed traffic ++* examples/l3fwd: fix crash on multiple sockets ++* fbarray: fix finding for unaligned length ++* fbarray: fix incorrect lookahead behavior ++* fbarray: fix incorrect lookbehind behavior ++* fbarray: fix lookahead ignore mask handling ++* fbarray: fix lookbehind ignore mask handling ++* graph: fix ID collisions ++* hash: check name when creating a hash ++* hash: fix RCU reclamation size ++* hash: fix return code description in Doxygen ++* kni: fix build with Linux 6.8 ++* latencystats: fix literal float suffix ++* malloc: fix multi-process wait condition handling ++* mbuf: fix dynamic fields copy ++* mempool: replace GCC pragma with cast ++* net/af_packet: align Rx/Tx structs to cache line ++* net/af_xdp: count mbuf allocation failures ++* net/af_xdp: fix port ID in Rx mbuf ++* net/af_xdp: fix stats reset ++* net/af_xdp: remove unused local statistic ++* net/ark: fix index arithmetic ++* net/axgbe: check only minimum speed for cables ++* net/axgbe: delay AN timeout during KR training ++* net/axgbe: disable interrupts during device removal ++* net/axgbe: disable RRC for yellow carp devices ++* net/axgbe: enable PLL control for fixed PHY modes only ++* net/axgbe: fix connection for SFP+ active cables ++* net/axgbe: fix fluctuations for 1G Bel Fuse SFP ++* net/axgbe: fix linkup in PHY status ++* net/axgbe: fix MDIO access for non-zero ports and CL45 PHYs ++* net/axgbe: fix SFP codes check for DAC cables ++* net/axgbe: fix Tx flow on 30H HW ++* net/axgbe: reset link when link never comes back ++* net/axgbe: update DMA coherency values ++* net/bonding: fix failover time of LACP with mode 4 ++* net/cnxk: fix outbound security with higher packet burst ++* net/cnxk: fix promiscuous state after MAC change ++* net/cnxk: fix RSS config ++* net/dpaa: forbid MTU configuration for shared interface ++* net/e1000/base: fix link power down ++* net/ena: fix bad checksum handling ++* net/ena: fix checksum handling ++* net/ena: fix return value check ++* net: fix outer UDP checksum in Intel prepare helper ++* net/fm10k: fix cleanup during init failure ++* net/hns3: check Rx DMA address alignmnent ++* net/hns3: disable SCTP verification tag for RSS hash input ++* net/hns3: fix double free for Rx/Tx queue ++* net/hns3: fix offload flag of IEEE 1588 ++* net/hns3: fix Rx timestamp flag ++* net/hns3: fix uninitialized variable in FEC query ++* net/hns3: fix variable overflow ++* net/i40e: fix outer UDP checksum offload for X710 ++* net/iavf: remove outer UDP checksum offload for X710 VF ++* net/ice/base: fix board type definition ++* net/ice/base: fix check for existing switch rule ++* net/ice/base: fix GCS descriptor field offsets ++* net/ice/base: fix masking when reading context ++* net/ice/base: fix memory leak in firmware version check ++* net/ice/base: fix pointer to variable outside scope ++* net/ice/base: fix potential TLV length overflow ++* net/ice/base: fix preparing PHY for timesync command ++* net/ice/base: fix return type of bitmap hamming weight ++* net/ice/base: fix sign extension ++* net/ice/base: fix size when allocating children arrays ++* net/ice/base: fix temporary failures reading NVM ++* net/ice: fix check for outer UDP checksum offload ++* net/ice: fix memory leaks in raw pattern parsing ++* net/ice: fix return value for raw pattern parsing ++* net/ice: fix sizing of filter hash table ++* net/ionic: fix mbuf double-free when emptying array ++* net/ixgbe/base: fix 5G link speed reported on VF ++* net/ixgbe/base: fix PHY ID for X550 ++* net/ixgbe/base: revert advertising for X550 2.5G/5G ++* net/ixgbe: do not create delayed interrupt handler twice ++* net/ixgbe: do not update link status in secondary process ++* net/mlx5: break flow resource release loop ++* net/mlx5: fix access to flow template operations ++* net/mlx5: fix Arm build with GCC 9.1 ++* net/mlx5: fix crash on counter pool destroy ++* net/mlx5: fix disabling E-Switch default flow rules ++* net/mlx5: fix end condition of reading xstats ++* net/mlx5: fix flow template indirect action failure ++* net/mlx5: fix hash Rx queue release in flow sample ++* net/mlx5: fix indexed pool with invalid index ++* net/mlx5: fix MTU configuration ++* net/mlx5: fix start without duplicate flow patterns ++* net/mlx5: fix uplink port probing in bonding mode ++* net/mlx5/hws: add template match none flag ++* net/mlx5/hws: decrease log level for creation failure ++* net/mlx5/hws: fix action template dump ++* net/mlx5/hws: fix deletion of action vport ++* net/mlx5/hws: fix function comment ++* net/mlx5/hws: fix port ID on root item convert ++* net/mlx5/hws: fix spinlock release on context open ++* net/mlx5/hws: remove unused variable ++* net/mlx5: support jump in meter hierarchy ++* net/nfp: adapt reverse sequence card ++* net/nfp: disable ctrl VNIC queues on close ++* net/nfp: fix allocation of switch domain ++* net/nfp: fix disabling 32-bit build ++* net/nfp: fix IPv6 TTL and DSCP flow action ++* net/nfp: fix representor port queue release ++* net/nfp: forbid offload flow rules with empty action list ++* net/nfp: remove redundant function call ++* net/ngbe: add special config for YT8531SH-CA PHY ++* net/ngbe: fix hotplug remove ++* net/ngbe: fix memory leaks ++* net/ngbe: fix MTU range ++* net/ngbe: keep PHY power down while device probing ++* net/tap: fix file descriptor check in isolated flow ++* net/txgbe: fix flow filters in VT mode ++* net/txgbe: fix hotplug remove ++* net/txgbe: fix memory leaks ++* net/txgbe: fix MTU range ++* net/txgbe: fix Rx interrupt ++* net/txgbe: fix tunnel packet parsing ++* net/txgbe: fix Tx hang on queue disable ++* net/txgbe: fix VF promiscuous and allmulticast ++* net/txgbe: reconfigure more MAC Rx registers ++* net/txgbe: restrict configuration of VLAN strip offload ++* net/virtio: fix MAC table update ++* net/virtio-user: add memcpy check ++* net/vmxnet3: fix init logs ++* pcapng: add memcpy check ++* power: increase the number of UNCORE frequencies ++* telemetry: fix connection parameter parsing ++* telemetry: lower log level on socket error ++* test/crypto: fix allocation comment ++* test/crypto: fix asymmetric capability test ++* test/crypto: fix enqueue/dequeue callback case ++* test/crypto: fix vector global buffer overflow ++* test/crypto: remove unused stats in setup ++* test: force IOVA mode on PPC64 without huge pages ++* usertools/devbind: fix indentation ++* vdpa/sfc: remove dead code ++* version: 22.11.6-rc1 ++* vhost: cleanup resubmit info before inflight setup ++* vhost: fix build with GCC 13 ++ ++22.11.6 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* Red Hat(R) Testing ++ ++ * Platform ++ ++ * RHEL 9 ++ * Kernel 5.14 ++ * Qemu 7.2.0 ++ * libvirt 9.0 ++ * openvswitch 3.1 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP reconnect with dpdk-client, qemu-server: PASS ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* Intel(R) Testing ++ ++ * Basic Intel(R) NIC testing ++ * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu24.04, Ubuntu22.04, Fedora40, RHEL9.3, RHEL9.4, FreeBSD14, SUSE15, Centos7.9, openEuler22.03-SP1,OpenAnolis8.8 etc. ++ * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * PPF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. ++ * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. ++ * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. ++ ++ * Basic cryptodev and virtio testing ++ * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc. ++ * Cryptodev: ++ * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. ++ * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc. ++ ++ ++* Nvidia(R) Testing ++ ++ * Basic functionality via testpmd/example applications ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow and flow_director ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Buffer Split tests ++ * Tx scheduling tests ++ ++ * Build tests ++ ++ * Debian 12 with MLNX_OFED_LINUX-24.04-0.7.0.0. ++ * Ubuntu 20.04.6 with MLNX_OFED_LINUX-24.07-0.6.1.0. ++ * Ubuntu 20.04.6 with rdma-core master (dd9c687). ++ * Ubuntu 20.04.6 with rdma-core v28.0. ++ * Fedora 38 with rdma-core v48.0. ++ * Fedora 42 (Rawhide) with rdma-core v51.0. ++ * OpenSUSE Leap 15.6 with rdma-core v49.1. ++ ++ * BlueField-2 ++ ++ * DOCA 2.8.0 ++ * fw 24.42.1000 ++ ++ * ConnectX-7 ++ ++ * Ubuntu 20.04 ++ * Driver MLNX_OFED_LINUX-24.07-0.6.1.0 ++ * fw 28.42.1000 ++ ++ * ConnectX-6 Dx ++ ++ * Ubuntu 20.04 ++ * Driver MLNX_OFED_LINUX-24.07-0.6.1.0 ++ * fw 22.42.1000 diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst index 3ada3575ba..51621b692f 100644 --- a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst @@ -28346,6 +32101,19 @@ index 3ada3575ba..51621b692f 100644 To compile the sample application see :doc:`compiling`. +diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst +index ce49eab96f..7ff304d05c 100644 +--- a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst ++++ b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst +@@ -30,7 +30,7 @@ Compiling the Application + + To compile the sample application see :doc:`compiling`. + +-The application is located in the ``l2fwd-crypt`` sub-directory. ++The application is located in the ``l2fwd-crypto`` sub-directory. + + Running the Application + ----------------------- diff --git a/dpdk/doc/guides/sample_app_ug/l3_forward.rst b/dpdk/doc/guides/sample_app_ug/l3_forward.rst index 94b22da01e..1cc2c1dd1d 100644 --- a/dpdk/doc/guides/sample_app_ug/l3_forward.rst @@ -28361,6 +32129,20 @@ index 94b22da01e..1cc2c1dd1d 100644 During the initialization phase route rules for IPv4 and IPv6 are read from rule files. Compiling the Application +diff --git a/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst b/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst +index fa3ef67c08..d2192eaa53 100644 +--- a/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst ++++ b/dpdk/doc/guides/sample_app_ug/l3_forward_power_man.rst +@@ -350,6 +350,9 @@ will use automatic PMD power management. + This mode is limited to one queue per core, + and has three available power management schemes: + ++``baseline`` ++ This mode will not enable any power saving features. ++ + ``monitor`` + This will use ``rte_power_monitor()`` function to enter + a power-optimized state (subject to platform support). diff --git a/dpdk/doc/guides/sample_app_ug/pipeline.rst b/dpdk/doc/guides/sample_app_ug/pipeline.rst index 49d50136bc..7c86bf484a 100644 --- a/dpdk/doc/guides/sample_app_ug/pipeline.rst @@ -28388,6 +32170,60 @@ index cb9c4f2169..51e69fc20d 100644 1. help: show help message 2. list: list all available vdpa devices +diff --git a/dpdk/doc/guides/testpmd_app_ug/run_app.rst b/dpdk/doc/guides/testpmd_app_ug/run_app.rst +index 074f910fc9..954e084fe5 100644 +--- a/dpdk/doc/guides/testpmd_app_ug/run_app.rst ++++ b/dpdk/doc/guides/testpmd_app_ug/run_app.rst +@@ -418,6 +418,10 @@ The command line options are: + + Set the logical core N to perform bitrate calculation. + ++* ``--latencystats=N`` ++ ++ Set the logical core N to perform latency and jitter calculations. ++ + * ``--print-event `` + + Enable printing the occurrence of the designated event. Using all will +diff --git a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst +index 0037506a79..b46c574325 100644 +--- a/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst ++++ b/dpdk/doc/guides/testpmd_app_ug/testpmd_funcs.rst +@@ -1995,7 +1995,7 @@ port config - queue ring size + + Configure a rx/tx queue ring size:: + +- testpmd> port (port_id) (rxq|txq) (queue_id) ring_size (value) ++ testpmd> port config (port_id) (rxq|txq) (queue_id) ring_size (value) + + Only take effect after command that (re-)start the port or command that setup specific queue. + +@@ -2843,14 +2843,6 @@ where: + * ``red`` enable 1, disable 0 marking IP ecn for yellow marked packets with ecn of 2'b01 or 2'b10 + to ecn of 2'b11 when IP is caring TCP or SCTP + +-Filter Functions +----------------- +- +-This section details the available filter functions that are available. +- +-Note these functions interface the deprecated legacy filtering framework, +-superseded by *rte_flow*. See `Flow rules management`_. +- + .. _testpmd_rte_flow: + + Flow rules management +@@ -2860,10 +2852,6 @@ Control of the generic flow API (*rte_flow*) is fully exposed through the + ``flow`` command (configuration, validation, creation, destruction, queries + and operation modes). + +-Considering *rte_flow* overlaps with all `Filter Functions`_, using both +-features simultaneously may cause undefined side-effects and is therefore +-not recommended. +- + ``flow`` syntax + ~~~~~~~~~~~~~~~ + diff --git a/dpdk/doc/guides/tools/cryptoperf.rst b/dpdk/doc/guides/tools/cryptoperf.rst index c77e253417..f30784674d 100644 --- a/dpdk/doc/guides/tools/cryptoperf.rst @@ -28411,10 +32247,176 @@ index a9217de4ee..fdb9030171 100644 - jq '.[] | select(.pci_ids[] | .vendor == "15b3" and .device == "1013").kmod' + jq '.[] | select(.pci_ids[]? | .vendor == "15b3" and .device == "1013").kmod' "* ib_uverbs & mlx5_core & mlx5_ib" +diff --git a/dpdk/doc/guides/tools/testeventdev.rst b/dpdk/doc/guides/tools/testeventdev.rst +index cd278e8998..7c1f4d64cf 100644 +--- a/dpdk/doc/guides/tools/testeventdev.rst ++++ b/dpdk/doc/guides/tools/testeventdev.rst +@@ -295,7 +295,7 @@ Example command to run order queue test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0x1f -s 0x10 --vdev=event_sw0 -- \ + --test=order_queue --plcores 1 --wlcores 2,3 + + +@@ -358,7 +358,7 @@ Example command to run order ``all types queue`` test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev=event_octeontx -- \ ++ sudo /app/dpdk-test-eventdev -c 0x1f -- \ + --test=order_atq --plcores 1 --wlcores 2,3 + + +@@ -462,14 +462,14 @@ Example command to run perf queue test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ + --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 + + Example command to run perf queue test with producer enqueuing a burst of events: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ + --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 \ + --prod_enq_burst_sz=32 + +@@ -477,15 +477,15 @@ Example command to run perf queue test with ethernet ports: + + .. code-block:: console + +- sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \ ++ sudo build/app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ + --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --prod_type_ethdev + + Example command to run perf queue test with event timer adapter: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev="event_octeontx" -- \ +- --wlcores 4 --plcores 12 --test perf_queue --stlist=a \ ++ sudo /app/dpdk-test-eventdev -c 0xfff1 \ ++ -- --wlcores 4 --plcores 12 --test perf_queue --stlist=a \ + --prod_type_timerdev --fwd_latency + + PERF_ATQ Test +@@ -572,15 +572,15 @@ Example command to run perf ``all types queue`` test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev=event_octeontx -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -- \ + --test=perf_atq --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 + + Example command to run perf ``all types queue`` test with event timer adapter: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev --vdev="event_octeontx" -- \ +- --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \ ++ sudo /app/dpdk-test-eventdev -c 0xfff1 \ ++ -- --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \ + --stlist=a --prod_type_timerdev --fwd_latency + + +@@ -804,13 +804,13 @@ Example command to run pipeline atq test: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -- \ + --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a + + Example command to run pipeline atq test with vector events: + + .. code-block:: console + +- sudo /app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \ ++ sudo /app/dpdk-test-eventdev -c 0xf -- \ + --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a \ + --enable_vector --vector_size 512 +diff --git a/dpdk/drivers/baseband/acc/acc_common.c b/dpdk/drivers/baseband/acc/acc_common.c +new file mode 100644 +index 0000000000..f8d2b19570 +--- /dev/null ++++ b/dpdk/drivers/baseband/acc/acc_common.c +@@ -0,0 +1,7 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (c) 2023 Red Hat, Inc. ++ */ ++ ++#include ++ ++RTE_LOG_REGISTER_SUFFIX(acc_common_logtype, common, INFO); +diff --git a/dpdk/drivers/baseband/acc/acc_common.h b/dpdk/drivers/baseband/acc/acc_common.h +index c076dc72cc..7ea3cc9a02 100644 +--- a/dpdk/drivers/baseband/acc/acc_common.h ++++ b/dpdk/drivers/baseband/acc/acc_common.h +@@ -131,9 +131,11 @@ + #define ACC_LIM_31 20 /* 0.31 */ + #define ACC_MAX_E (128 * 1024 - 2) + ++extern int acc_common_logtype; ++ + /* Helper macro for logging */ + #define rte_acc_log(level, fmt, ...) \ +- rte_log(RTE_LOG_ ## level, RTE_LOG_NOTICE, fmt "\n", \ ++ rte_log(RTE_LOG_ ## level, acc_common_logtype, fmt "\n", \ + ##__VA_ARGS__) + + /* ACC100 DMA Descriptor triplet */ +@@ -962,6 +964,9 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n, + req_elem_addr, + (void *)q->mmio_reg_enqueue); + ++ q->aq_enqueued++; ++ q->sw_ring_head += enq_batch_size; ++ + rte_wmb(); + + #ifdef RTE_BBDEV_OFFLOAD_COST +@@ -976,8 +981,6 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n, + rte_rdtsc_precise() - start_time; + #endif + +- q->aq_enqueued++; +- q->sw_ring_head += enq_batch_size; + n -= enq_batch_size; + + } while (n); +diff --git a/dpdk/drivers/baseband/acc/meson.build b/dpdk/drivers/baseband/acc/meson.build +index 77c393b533..1cbb06d107 100644 +--- a/dpdk/drivers/baseband/acc/meson.build ++++ b/dpdk/drivers/baseband/acc/meson.build +@@ -3,6 +3,6 @@ + + deps += ['bbdev', 'bus_pci'] + +-sources = files('rte_acc100_pmd.c', 'rte_acc200_pmd.c') ++sources = files('acc_common.c', 'rte_acc100_pmd.c', 'rte_acc200_pmd.c') + + headers = files('rte_acc_cfg.h') diff --git a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c -index ba8247d47e..56a11e98b8 100644 +index ba8247d47e..955c0236e4 100644 --- a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +++ b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +@@ -26,9 +26,9 @@ + #include "acc200_cfg.h" + + #ifdef RTE_LIBRTE_BBDEV_DEBUG +-RTE_LOG_REGISTER_DEFAULT(acc100_logtype, DEBUG); ++RTE_LOG_REGISTER_SUFFIX(acc100_logtype, acc100, DEBUG); + #else +-RTE_LOG_REGISTER_DEFAULT(acc100_logtype, NOTICE); ++RTE_LOG_REGISTER_SUFFIX(acc100_logtype, acc100, NOTICE); + #endif + + /* Calculate the offset of the enqueue register */ @@ -622,6 +622,7 @@ acc100_dev_close(struct rte_bbdev *dev) rte_free(d->tail_ptrs); rte_free(d->info_ring); @@ -28479,9 +32481,21 @@ index ba8247d47e..56a11e98b8 100644 if (unlikely(ops == 0)) return 0; diff --git a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c -index c5123cfef0..4fc078fe26 100644 +index c5123cfef0..8bda3a8e07 100644 --- a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c +++ b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c +@@ -24,9 +24,9 @@ + #include "acc200_pmd.h" + + #ifdef RTE_LIBRTE_BBDEV_DEBUG +-RTE_LOG_REGISTER_DEFAULT(acc200_logtype, DEBUG); ++RTE_LOG_REGISTER_SUFFIX(acc200_logtype, acc200, DEBUG); + #else +-RTE_LOG_REGISTER_DEFAULT(acc200_logtype, NOTICE); ++RTE_LOG_REGISTER_SUFFIX(acc200_logtype, acc200, NOTICE); + #endif + + /* Calculate the offset of the enqueue register. */ @@ -1848,6 +1848,9 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, r = op->turbo_enc.tb_params.r; @@ -28677,10 +32691,18 @@ index c5123cfef0..4fc078fe26 100644 q->aq_dequeued += aq_dequeued; diff --git a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c -index d520d5238f..0dfeba08e1 100644 +index d520d5238f..171aed4d86 100644 --- a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +++ b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c -@@ -569,17 +569,21 @@ static int +@@ -16,6 +16,7 @@ + #ifdef RTE_BBDEV_OFFLOAD_COST + #include + #endif ++#include + + #include + #include +@@ -569,17 +570,21 @@ static int fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id) { struct fpga_5gnr_fec_device *d = dev->data->dev_private; @@ -28707,7 +32729,7 @@ index d520d5238f..0dfeba08e1 100644 /* Clear queue head and tail variables */ q->tail = q->head_free_desc = 0; -@@ -887,9 +891,11 @@ check_desc_error(uint32_t error_code) { +@@ -887,9 +892,11 @@ check_desc_error(uint32_t error_code) { static inline uint16_t get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index) { @@ -28720,6 +32742,29 @@ index d520d5238f..0dfeba08e1 100644 if (n_cb == n) { if (rv_index == 1) return (bg == 1 ? K0_1_1 : K0_1_2) * z_c; +@@ -1496,7 +1503,7 @@ fpga_mutex_acquisition(struct fpga_queue *q) + { + uint32_t mutex_ctrl, mutex_read, cnt = 0; + /* Assign a unique id for the duration of the DDR access */ +- q->ddr_mutex_uuid = rand(); ++ q->ddr_mutex_uuid = rte_rand(); + /* Request and wait for acquisition of the mutex */ + mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1; + do { +diff --git a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c +index bb754a5395..1a56e73abd 100644 +--- a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c ++++ b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c +@@ -1084,6 +1084,9 @@ la12xx_bbdev_remove(struct rte_vdev_device *vdev) + + PMD_INIT_FUNC_TRACE(); + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + if (vdev == NULL) + return -EINVAL; + diff --git a/dpdk/drivers/baseband/turbo_sw/meson.build b/dpdk/drivers/baseband/turbo_sw/meson.build index 417ec63394..aeb9a76f9e 100644 --- a/dpdk/drivers/baseband/turbo_sw/meson.build @@ -28741,6 +32786,38 @@ index 417ec63394..aeb9a76f9e 100644 ext_deps += dep_turbo ext_deps += dependency('flexran_sdk_crc', required: true) ext_deps += dependency('flexran_sdk_rate_matching', required: true) +diff --git a/dpdk/drivers/bus/dpaa/base/qbman/process.c b/dpdk/drivers/bus/dpaa/base/qbman/process.c +index 3504ec97db..3e4622f606 100644 +--- a/dpdk/drivers/bus/dpaa/base/qbman/process.c ++++ b/dpdk/drivers/bus/dpaa/base/qbman/process.c +@@ -1,7 +1,7 @@ + /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2011-2016 Freescale Semiconductor Inc. +- * Copyright 2017,2020 NXP ++ * Copyright 2017,2020,2022,2024 NXP + * + */ + #include +@@ -27,15 +27,16 @@ static int check_fd(void) + { + int ret; + +- if (fd >= 0) +- return 0; + ret = pthread_mutex_lock(&fd_init_lock); + assert(!ret); ++ + /* check again with the lock held */ + if (fd < 0) + fd = open(PROCESS_PATH, O_RDWR); ++ + ret = pthread_mutex_unlock(&fd_init_lock); + assert(!ret); ++ + return (fd >= 0) ? 0 : -ENODEV; + } + diff --git a/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/dpdk/drivers/bus/dpaa/base/qbman/qman.c index 3949bf8712..83db0a534e 100644 --- a/dpdk/drivers/bus/dpaa/base/qbman/qman.c @@ -28771,6 +32848,69 @@ index 3949bf8712..83db0a534e 100644 fq_state_change(p, fq, msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, &swapped_msg); +diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c +index e57159f5d8..aaf2a5f43e 100644 +--- a/dpdk/drivers/bus/dpaa/dpaa_bus.c ++++ b/dpdk/drivers/bus/dpaa/dpaa_bus.c +@@ -187,6 +187,7 @@ dpaa_create_device_list(void) + if (dev->intr_handle == NULL) { + DPAA_BUS_LOG(ERR, "Failed to allocate intr handle"); + ret = -ENOMEM; ++ free(dev); + goto cleanup; + } + +@@ -220,7 +221,7 @@ dpaa_create_device_list(void) + + if (dpaa_sec_available()) { + DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available"); +- return 0; ++ goto qdma_dpaa; + } + + /* Creating SEC Devices */ +@@ -238,6 +239,7 @@ dpaa_create_device_list(void) + if (dev->intr_handle == NULL) { + DPAA_BUS_LOG(ERR, "Failed to allocate intr handle"); + ret = -ENOMEM; ++ free(dev); + goto cleanup; + } + +@@ -259,6 +261,7 @@ dpaa_create_device_list(void) + + rte_dpaa_bus.device_count += i; + ++qdma_dpaa: + /* Creating QDMA Device */ + for (i = 0; i < RTE_DPAA_QDMA_DEVICES; i++) { + dev = calloc(1, sizeof(struct rte_dpaa_device)); +@@ -791,6 +794,10 @@ dpaa_bus_dev_iterate(const void *start, const char *str, + + /* Now that name=device_name format is available, split */ + dup = strdup(str); ++ if (dup == NULL) { ++ DPAA_BUS_DEBUG("Dup string (%s) failed!\n", str); ++ return NULL; ++ } + dev_name = dup + strlen("name="); + + if (start != NULL) { +diff --git a/dpdk/drivers/bus/fslmc/fslmc_bus.c b/dpdk/drivers/bus/fslmc/fslmc_bus.c +index 57bfb5111a..89f0f329c0 100644 +--- a/dpdk/drivers/bus/fslmc/fslmc_bus.c ++++ b/dpdk/drivers/bus/fslmc/fslmc_bus.c +@@ -634,6 +634,10 @@ fslmc_bus_dev_iterate(const void *start, const char *str, + + /* Now that name=device_name format is available, split */ + dup = strdup(str); ++ if (dup == NULL) { ++ DPAA2_BUS_DEBUG("Dup string (%s) failed!\n", str); ++ return NULL; ++ } + dev_name = dup + strlen("name="); + + if (start != NULL) { diff --git a/dpdk/drivers/bus/fslmc/mc/mc_sys.c b/dpdk/drivers/bus/fslmc/mc/mc_sys.c index ab9a074835..76fdcd5c8a 100644 --- a/dpdk/drivers/bus/fslmc/mc/mc_sys.c @@ -28821,8 +32961,46 @@ index bb943b58b5..07e316b38e 100644 if (rawdev->dev_ops && rawdev->dev_ops->firmware_load && rawdev->dev_ops->firmware_load(rawdev, +diff --git a/dpdk/drivers/bus/ifpga/ifpga_logs.h b/dpdk/drivers/bus/ifpga/ifpga_logs.h +index 873e0a4f2f..248bccba5d 100644 +--- a/dpdk/drivers/bus/ifpga/ifpga_logs.h ++++ b/dpdk/drivers/bus/ifpga/ifpga_logs.h +@@ -9,10 +9,6 @@ + + extern int ifpga_bus_logtype; + +-#define IFPGA_LOG(level, fmt, args...) \ +- rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \ +- __func__, ##args) +- + #define IFPGA_BUS_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \ + __func__, ##args) +diff --git a/dpdk/drivers/bus/pci/linux/pci_uio.c b/dpdk/drivers/bus/pci/linux/pci_uio.c +index d52125e49b..81a1ed6fa0 100644 +--- a/dpdk/drivers/bus/pci/linux/pci_uio.c ++++ b/dpdk/drivers/bus/pci/linux/pci_uio.c +@@ -245,7 +245,7 @@ pci_uio_alloc_resource(struct rte_pci_device *dev, + } + snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num); + +- /* save fd if in primary process */ ++ /* save fd */ + fd = open(devname, O_RDWR); + if (fd < 0) { + RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", +@@ -283,6 +283,9 @@ pci_uio_alloc_resource(struct rte_pci_device *dev, + } + } + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + /* allocate the mapping details for secondary processes*/ + *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0); + if (*uio_res == NULL) { diff --git a/dpdk/drivers/bus/pci/linux/pci_vfio.c b/dpdk/drivers/bus/pci/linux/pci_vfio.c -index fab3483d9f..fe83e1a04e 100644 +index fab3483d9f..44c132ae8a 100644 --- a/dpdk/drivers/bus/pci/linux/pci_vfio.c +++ b/dpdk/drivers/bus/pci/linux/pci_vfio.c @@ -2,6 +2,7 @@ @@ -28833,6 +33011,132 @@ index fab3483d9f..fe83e1a04e 100644 #include #include #include +@@ -52,7 +53,7 @@ pci_vfio_read_config(const struct rte_intr_handle *intr_handle, + if (vfio_dev_fd < 0) + return -1; + +- return pread64(vfio_dev_fd, buf, len, ++ return pread(vfio_dev_fd, buf, len, + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs); + } + +@@ -65,7 +66,7 @@ pci_vfio_write_config(const struct rte_intr_handle *intr_handle, + if (vfio_dev_fd < 0) + return -1; + +- return pwrite64(vfio_dev_fd, buf, len, ++ return pwrite(vfio_dev_fd, buf, len, + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs); + } + +@@ -79,7 +80,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + uint8_t cap_id, cap_offset; + + /* read PCI capability pointer from config space */ +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_CAPABILITY_LIST); + if (ret != sizeof(reg)) { +@@ -94,7 +95,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + while (cap_offset) { + + /* read PCI capability ID */ +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset); + if (ret != sizeof(reg)) { +@@ -108,7 +109,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + + /* if we haven't reached MSI-X, check next capability */ + if (cap_id != PCI_CAP_ID_MSIX) { +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset); + if (ret != sizeof(reg)) { +@@ -125,7 +126,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + /* else, read table offset */ + else { + /* table offset resides in the next 4 bytes */ +- ret = pread64(fd, ®, sizeof(reg), ++ ret = pread(fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset + 4); + if (ret != sizeof(reg)) { +@@ -134,7 +135,7 @@ pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table) + return -1; + } + +- ret = pread64(fd, &flags, sizeof(flags), ++ ret = pread(fd, &flags, sizeof(flags), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + cap_offset + 2); + if (ret != sizeof(flags)) { +@@ -161,7 +162,7 @@ pci_vfio_enable_bus_memory(int dev_fd) + uint16_t cmd; + int ret; + +- ret = pread64(dev_fd, &cmd, sizeof(cmd), ++ ret = pread(dev_fd, &cmd, sizeof(cmd), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + +@@ -174,7 +175,7 @@ pci_vfio_enable_bus_memory(int dev_fd) + return 0; + + cmd |= PCI_COMMAND_MEMORY; +- ret = pwrite64(dev_fd, &cmd, sizeof(cmd), ++ ret = pwrite(dev_fd, &cmd, sizeof(cmd), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + +@@ -193,7 +194,7 @@ pci_vfio_set_bus_master(int dev_fd, bool op) + uint16_t reg; + int ret; + +- ret = pread64(dev_fd, ®, sizeof(reg), ++ ret = pread(dev_fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + if (ret != sizeof(reg)) { +@@ -207,7 +208,7 @@ pci_vfio_set_bus_master(int dev_fd, bool op) + else + reg &= ~(PCI_COMMAND_MASTER); + +- ret = pwrite64(dev_fd, ®, sizeof(reg), ++ ret = pwrite(dev_fd, ®, sizeof(reg), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_COMMAND); + +@@ -463,7 +464,7 @@ pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index) + uint32_t ioport_bar; + int ret; + +- ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar), ++ ret = pread(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar), + VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + + PCI_BASE_ADDRESS_0 + bar_index*4); + if (ret != sizeof(ioport_bar)) { +@@ -1132,7 +1133,7 @@ pci_vfio_ioport_read(struct rte_pci_ioport *p, + if (vfio_dev_fd < 0) + return; + +- if (pread64(vfio_dev_fd, data, ++ if (pread(vfio_dev_fd, data, + len, p->base + offset) <= 0) + RTE_LOG(ERR, EAL, + "Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n", +@@ -1149,7 +1150,7 @@ pci_vfio_ioport_write(struct rte_pci_ioport *p, + if (vfio_dev_fd < 0) + return; + +- if (pwrite64(vfio_dev_fd, data, ++ if (pwrite(vfio_dev_fd, data, + len, p->base + offset) <= 0) + RTE_LOG(ERR, EAL, + "Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n", diff --git a/dpdk/drivers/bus/pci/pci_common.c b/dpdk/drivers/bus/pci/pci_common.c index bc3a7f39fe..756e308fdf 100644 --- a/dpdk/drivers/bus/pci/pci_common.c @@ -28880,6 +33184,103 @@ index bc3a7f39fe..756e308fdf 100644 struct rte_pci_bus rte_pci_bus = { .bus = { .scan = rte_pci_scan, +diff --git a/dpdk/drivers/bus/pci/pci_common_uio.c b/dpdk/drivers/bus/pci/pci_common_uio.c +index 76c661f054..a06378b239 100644 +--- a/dpdk/drivers/bus/pci/pci_common_uio.c ++++ b/dpdk/drivers/bus/pci/pci_common_uio.c +@@ -26,7 +26,7 @@ EAL_REGISTER_TAILQ(rte_uio_tailq) + static int + pci_uio_map_secondary(struct rte_pci_device *dev) + { +- int fd, i, j; ++ int fd, i = 0, j, res_idx; + struct mapped_pci_resource *uio_res; + struct mapped_pci_res_list *uio_res_list = + RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list); +@@ -37,7 +37,15 @@ pci_uio_map_secondary(struct rte_pci_device *dev) + if (rte_pci_addr_cmp(&uio_res->pci_addr, &dev->addr)) + continue; + +- for (i = 0; i != uio_res->nb_maps; i++) { ++ /* Map all BARs */ ++ for (res_idx = 0; res_idx != PCI_MAX_RESOURCE; res_idx++) { ++ /* skip empty BAR */ ++ if (dev->mem_resource[res_idx].phys_addr == 0) ++ continue; ++ ++ if (i >= uio_res->nb_maps) ++ return -1; ++ + /* + * open devname, to mmap it + */ +@@ -71,7 +79,9 @@ pci_uio_map_secondary(struct rte_pci_device *dev) + } + return -1; + } +- dev->mem_resource[i].addr = mapaddr; ++ dev->mem_resource[res_idx].addr = mapaddr; ++ ++ i++; + } + return 0; + } +@@ -96,15 +106,15 @@ pci_uio_map_resource(struct rte_pci_device *dev) + if (rte_intr_dev_fd_set(dev->intr_handle, -1)) + return -1; + +- /* secondary processes - use already recorded details */ +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return pci_uio_map_secondary(dev); +- + /* allocate uio resource */ + ret = pci_uio_alloc_resource(dev, &uio_res); + if (ret) + return ret; + ++ /* secondary processes - use already recorded details */ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return pci_uio_map_secondary(dev); ++ + /* Map all BARs */ + for (i = 0; i != PCI_MAX_RESOURCE; i++) { + /* skip empty BAR */ +@@ -220,6 +230,18 @@ pci_uio_unmap_resource(struct rte_pci_device *dev) + if (uio_res == NULL) + return; + ++ /* close fd */ ++ if (rte_intr_fd_get(dev->intr_handle) >= 0) ++ close(rte_intr_fd_get(dev->intr_handle)); ++ uio_cfg_fd = rte_intr_dev_fd_get(dev->intr_handle); ++ if (uio_cfg_fd >= 0) { ++ close(uio_cfg_fd); ++ rte_intr_dev_fd_set(dev->intr_handle, -1); ++ } ++ ++ rte_intr_fd_set(dev->intr_handle, -1); ++ rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN); ++ + /* secondary processes - just free maps */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return pci_uio_unmap(uio_res); +@@ -231,16 +253,4 @@ pci_uio_unmap_resource(struct rte_pci_device *dev) + + /* free uio resource */ + rte_free(uio_res); +- +- /* close fd if in primary process */ +- if (rte_intr_fd_get(dev->intr_handle) >= 0) +- close(rte_intr_fd_get(dev->intr_handle)); +- uio_cfg_fd = rte_intr_dev_fd_get(dev->intr_handle); +- if (uio_cfg_fd >= 0) { +- close(uio_cfg_fd); +- rte_intr_dev_fd_set(dev->intr_handle, -1); +- } +- +- rte_intr_fd_set(dev->intr_handle, -1); +- rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN); + } diff --git a/dpdk/drivers/bus/pci/rte_bus_pci.h b/dpdk/drivers/bus/pci/rte_bus_pci.h index b193114fe5..76cbf49ab8 100644 --- a/dpdk/drivers/bus/pci/rte_bus_pci.h @@ -28918,10 +33319,45 @@ index 161ab86d3b..f262af3316 100644 rte_pci_unregister; }; diff --git a/dpdk/drivers/bus/vdev/vdev.c b/dpdk/drivers/bus/vdev/vdev.c -index 41bc07dde7..7974b27295 100644 +index 41bc07dde7..ec7abe7cda 100644 --- a/dpdk/drivers/bus/vdev/vdev.c +++ b/dpdk/drivers/bus/vdev/vdev.c -@@ -578,18 +578,19 @@ vdev_cleanup(void) +@@ -247,6 +247,10 @@ alloc_devargs(const char *name, const char *args) + devargs->data = strdup(args); + else + devargs->data = strdup(""); ++ if (devargs->data == NULL) { ++ free(devargs); ++ return NULL; ++ } + devargs->args = devargs->data; + + ret = strlcpy(devargs->name, name, sizeof(devargs->name)); +@@ -272,6 +276,7 @@ insert_vdev(const char *name, const char *args, + return -EINVAL; + + devargs = alloc_devargs(name, args); ++ + if (!devargs) + return -ENOMEM; + +@@ -283,7 +288,6 @@ insert_vdev(const char *name, const char *args, + + dev->device.bus = &rte_vdev_bus; + dev->device.numa_node = SOCKET_ID_ANY; +- dev->device.name = devargs->name; + + if (find_vdev(name)) { + /* +@@ -298,6 +302,7 @@ insert_vdev(const char *name, const char *args, + if (init) + rte_devargs_insert(&devargs); + dev->device.devargs = devargs; ++ dev->device.name = devargs->name; + TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); + + if (p_dev) +@@ -578,18 +583,19 @@ vdev_cleanup(void) int ret = 0; if (dev->device.driver == NULL) @@ -28944,7 +33380,7 @@ index 41bc07dde7..7974b27295 100644 } diff --git a/dpdk/drivers/common/cnxk/cnxk_security.c b/dpdk/drivers/common/cnxk/cnxk_security.c -index 85105472a1..bdb5433d13 100644 +index 85105472a1..dd19ea9ab9 100644 --- a/dpdk/drivers/common/cnxk/cnxk_security.c +++ b/dpdk/drivers/common/cnxk/cnxk_security.c @@ -274,6 +274,14 @@ ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa) @@ -28984,6 +33420,265 @@ index 85105472a1..bdb5433d13 100644 /* Outer header flow label source */ if (!ipsec_xfrm->options.copy_flabel) { sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = +@@ -598,235 +614,6 @@ cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa) + return !!sa->w2.s.valid; + } + +-static inline int +-ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm, +- struct rte_crypto_sym_xform *crypto_xfrm) +-{ +- if (crypto_xfrm->next == NULL) +- return -EINVAL; +- +- if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { +- if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH || +- crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) +- return -EINVAL; +- } else { +- if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER || +- crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH) +- return -EINVAL; +- } +- +- return 0; +-} +- +-static int +-onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt, +- uint8_t *cipher_key, uint8_t *hmac_opad_ipad, +- struct rte_security_ipsec_xform *ipsec_xfrm, +- struct rte_crypto_sym_xform *crypto_xfrm) +-{ +- struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm; +- int rc, length, auth_key_len; +- const uint8_t *key = NULL; +- uint8_t ccm_flag = 0; +- +- /* Set direction */ +- switch (ipsec_xfrm->direction) { +- case RTE_SECURITY_IPSEC_SA_DIR_INGRESS: +- ctl->direction = ROC_IE_SA_DIR_INBOUND; +- auth_xfrm = crypto_xfrm; +- cipher_xfrm = crypto_xfrm->next; +- break; +- case RTE_SECURITY_IPSEC_SA_DIR_EGRESS: +- ctl->direction = ROC_IE_SA_DIR_OUTBOUND; +- cipher_xfrm = crypto_xfrm; +- auth_xfrm = crypto_xfrm->next; +- break; +- default: +- return -EINVAL; +- } +- +- /* Set protocol - ESP vs AH */ +- switch (ipsec_xfrm->proto) { +- case RTE_SECURITY_IPSEC_SA_PROTO_ESP: +- ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP; +- break; +- case RTE_SECURITY_IPSEC_SA_PROTO_AH: +- return -ENOTSUP; +- default: +- return -EINVAL; +- } +- +- /* Set mode - transport vs tunnel */ +- switch (ipsec_xfrm->mode) { +- case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT: +- ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT; +- break; +- case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL: +- ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL; +- break; +- default: +- return -EINVAL; +- } +- +- /* Set encryption algorithm */ +- if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) { +- length = crypto_xfrm->aead.key.length; +- +- switch (crypto_xfrm->aead.algo) { +- case RTE_CRYPTO_AEAD_AES_GCM: +- ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM; +- ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL; +- memcpy(salt, &ipsec_xfrm->salt, 4); +- key = crypto_xfrm->aead.key.data; +- break; +- case RTE_CRYPTO_AEAD_AES_CCM: +- ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CCM; +- ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL; +- ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN; +- *salt = ccm_flag; +- memcpy(PLT_PTR_ADD(salt, 1), &ipsec_xfrm->salt, 3); +- key = crypto_xfrm->aead.key.data; +- break; +- default: +- return -ENOTSUP; +- } +- +- } else { +- rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm); +- if (rc) +- return rc; +- +- switch (cipher_xfrm->cipher.algo) { +- case RTE_CRYPTO_CIPHER_AES_CBC: +- ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC; +- break; +- case RTE_CRYPTO_CIPHER_AES_CTR: +- ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR; +- break; +- default: +- return -ENOTSUP; +- } +- +- switch (auth_xfrm->auth.algo) { +- case RTE_CRYPTO_AUTH_SHA1_HMAC: +- ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1; +- break; +- default: +- return -ENOTSUP; +- } +- auth_key_len = auth_xfrm->auth.key.length; +- if (auth_key_len < 20 || auth_key_len > 64) +- return -ENOTSUP; +- +- key = cipher_xfrm->cipher.key.data; +- length = cipher_xfrm->cipher.key.length; +- +- ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad); +- } +- +- switch (length) { +- case ROC_CPT_AES128_KEY_LEN: +- ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128; +- break; +- case ROC_CPT_AES192_KEY_LEN: +- ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192; +- break; +- case ROC_CPT_AES256_KEY_LEN: +- ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256; +- break; +- default: +- return -EINVAL; +- } +- +- memcpy(cipher_key, key, length); +- +- if (ipsec_xfrm->options.esn) +- ctl->esn_en = 1; +- +- ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi); +- return 0; +-} +- +-int +-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa, +- struct rte_security_ipsec_xform *ipsec_xfrm, +- struct rte_crypto_sym_xform *crypto_xfrm) +-{ +- struct roc_ie_onf_sa_ctl *ctl = &sa->ctl; +- int rc; +- +- rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key, +- sa->hmac_key, ipsec_xfrm, +- crypto_xfrm); +- if (rc) +- return rc; +- +- rte_wmb(); +- +- /* Enable SA */ +- ctl->valid = 1; +- return 0; +-} +- +-int +-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa, +- struct rte_security_ipsec_xform *ipsec_xfrm, +- struct rte_crypto_sym_xform *crypto_xfrm) +-{ +- struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel; +- struct roc_ie_onf_sa_ctl *ctl = &sa->ctl; +- int rc; +- +- /* Fill common params */ +- rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key, +- sa->hmac_key, ipsec_xfrm, +- crypto_xfrm); +- if (rc) +- return rc; +- +- if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) +- goto skip_tunnel_info; +- +- /* Tunnel header info */ +- switch (tunnel->type) { +- case RTE_SECURITY_IPSEC_TUNNEL_IPV4: +- memcpy(&sa->ip_src, &tunnel->ipv4.src_ip, +- sizeof(struct in_addr)); +- memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip, +- sizeof(struct in_addr)); +- break; +- case RTE_SECURITY_IPSEC_TUNNEL_IPV6: +- return -ENOTSUP; +- default: +- return -EINVAL; +- } +- +- /* Update udp encap ports */ +- if (ipsec_xfrm->options.udp_encap == 1) { +- sa->udp_src = 4500; +- sa->udp_dst = 4500; +- } +- +-skip_tunnel_info: +- rte_wmb(); +- +- /* Enable SA */ +- ctl->valid = 1; +- return 0; +-} +- +-bool +-cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa) +-{ +- return !!sa->ctl.valid; +-} +- +-bool +-cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa) +-{ +- return !!sa->ctl.valid; +-} +- + uint8_t + cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo, + enum rte_crypto_auth_algorithm a_algo, +diff --git a/dpdk/drivers/common/cnxk/cnxk_security.h b/dpdk/drivers/common/cnxk/cnxk_security.h +index 4e477ec53f..77fcd82b12 100644 +--- a/dpdk/drivers/common/cnxk/cnxk_security.h ++++ b/dpdk/drivers/common/cnxk/cnxk_security.h +@@ -47,18 +47,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa, + bool __roc_api cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa); + bool __roc_api cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa); + +-/* [CN9K, CN10K) */ +-int __roc_api +-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa, +- struct rte_security_ipsec_xform *ipsec_xfrm, +- struct rte_crypto_sym_xform *crypto_xfrm); +-int __roc_api +-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa, +- struct rte_security_ipsec_xform *ipsec_xfrm, +- struct rte_crypto_sym_xform *crypto_xfrm); +-bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa); +-bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa); +- + /* [CN9K] */ + int __roc_api + cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec, diff --git a/dpdk/drivers/common/cnxk/cnxk_security_ar.h b/dpdk/drivers/common/cnxk/cnxk_security_ar.h index deb38db0d0..d0151a752c 100644 --- a/dpdk/drivers/common/cnxk/cnxk_security_ar.h @@ -29021,11 +33716,53 @@ index 849735921c..ca8170624d 100644 deps = ['eal', 'pci', 'bus_pci', 'mbuf', 'security'] sources = files( 'roc_ae.c', +diff --git a/dpdk/drivers/common/cnxk/roc_cpt.c b/dpdk/drivers/common/cnxk/roc_cpt.c +index fb97ec89b2..c9adfe6a02 100644 +--- a/dpdk/drivers/common/cnxk/roc_cpt.c ++++ b/dpdk/drivers/common/cnxk/roc_cpt.c +@@ -656,7 +656,7 @@ roc_cpt_dev_init(struct roc_cpt *roc_cpt) + rc = dev_init(dev, pci_dev); + if (rc) { + plt_err("Failed to init roc device"); +- goto fail; ++ return rc; + } + + cpt->pci_dev = pci_dev; +@@ -688,6 +688,7 @@ roc_cpt_dev_init(struct roc_cpt *roc_cpt) + return 0; + + fail: ++ dev_fini(dev, pci_dev); + return rc; + } + diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c -index 59128a3552..33865f43fa 100644 +index 59128a3552..67fffc89fe 100644 --- a/dpdk/drivers/common/cnxk/roc_dev.c +++ b/dpdk/drivers/common/cnxk/roc_dev.c -@@ -969,6 +969,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) +@@ -190,9 +190,8 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg) + vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz); + if (vf_msg) { + mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg); +- memcpy((uint8_t *)vf_msg + +- sizeof(struct mbox_msghdr), &linfo, +- sizeof(struct cgx_link_user_info)); ++ mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), &linfo, ++ sizeof(struct cgx_link_user_info)); + + vf_msg->rc = msg->rc; + vf_msg->pcifunc = msg->pcifunc; +@@ -467,6 +466,8 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg) + size_t size; + + size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN); ++ if (size < sizeof(struct mbox_msghdr)) ++ return; + /* Send UP message to all VF's */ + for (vf = 0; vf < vf_mbox->ndevs; vf++) { + /* VF active */ +@@ -969,6 +970,7 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) case PCI_DEVID_CNXK_RVU_AF_VF: case PCI_DEVID_CNXK_RVU_VF: case PCI_DEVID_CNXK_RVU_SDP_VF: @@ -29051,6 +33788,77 @@ index 93c8318a3d..0e2f803077 100644 if (dpi_mz == NULL) { plt_err("dpi memzone reserve failed"); rc = -ENOMEM; +diff --git a/dpdk/drivers/common/cnxk/roc_ie_on.h b/dpdk/drivers/common/cnxk/roc_ie_on.h +index 057ff95362..585522e7d3 100644 +--- a/dpdk/drivers/common/cnxk/roc_ie_on.h ++++ b/dpdk/drivers/common/cnxk/roc_ie_on.h +@@ -268,66 +268,6 @@ struct roc_ie_on_inb_sa { + #define ROC_IE_ON_UCC_L2_HDR_INFO_ERR 0xCF + #define ROC_IE_ON_UCC_L2_HDR_LEN_ERR 0xE0 + +-struct roc_ie_onf_sa_ctl { +- uint32_t spi; +- uint64_t exp_proto_inter_frag : 8; +- uint64_t rsvd_41_40 : 2; +- /* Disable SPI, SEQ data in RPTR for Inbound inline */ +- uint64_t spi_seq_dis : 1; +- uint64_t esn_en : 1; +- uint64_t rsvd_44_45 : 2; +- uint64_t encap_type : 2; +- uint64_t enc_type : 3; +- uint64_t rsvd_48 : 1; +- uint64_t auth_type : 4; +- uint64_t valid : 1; +- uint64_t direction : 1; +- uint64_t outer_ip_ver : 1; +- uint64_t inner_ip_ver : 1; +- uint64_t ipsec_mode : 1; +- uint64_t ipsec_proto : 1; +- uint64_t aes_key_len : 2; +-}; +- +-struct roc_onf_ipsec_outb_sa { +- /* w0 */ +- struct roc_ie_onf_sa_ctl ctl; +- +- /* w1 */ +- uint8_t nonce[4]; +- uint16_t udp_src; +- uint16_t udp_dst; +- +- /* w2 */ +- uint32_t ip_src; +- uint32_t ip_dst; +- +- /* w3-w6 */ +- uint8_t cipher_key[32]; +- +- /* w7-w12 */ +- uint8_t hmac_key[48]; +-}; +- +-struct roc_onf_ipsec_inb_sa { +- /* w0 */ +- struct roc_ie_onf_sa_ctl ctl; +- +- /* w1 */ +- uint8_t nonce[4]; /* Only for AES-GCM */ +- uint32_t unused; +- +- /* w2 */ +- uint32_t esn_hi; +- uint32_t esn_low; +- +- /* w3-w6 */ +- uint8_t cipher_key[32]; +- +- /* w7-w12 */ +- uint8_t hmac_key[48]; +-}; +- + #define ROC_ONF_IPSEC_INB_MAX_L2_SZ 32UL + #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ 30UL + #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2) diff --git a/dpdk/drivers/common/cnxk/roc_io.h b/dpdk/drivers/common/cnxk/roc_io.h index 13f98ed549..45cbb4e587 100644 --- a/dpdk/drivers/common/cnxk/roc_io.h @@ -29096,7 +33904,7 @@ index 13f98ed549..45cbb4e587 100644 static __plt_always_inline void diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h -index 8b0384c737..18aa97b84a 100644 +index 8b0384c737..9fc22d8a65 100644 --- a/dpdk/drivers/common/cnxk/roc_mbox.h +++ b/dpdk/drivers/common/cnxk/roc_mbox.h @@ -471,7 +471,7 @@ struct lmtst_tbl_setup_req { @@ -29108,6 +33916,39 @@ index 8b0384c737..18aa97b84a 100644 #define CGX_TX_STATS_COUNT 18 uint64_t __io rx_stats[CGX_RX_STATS_COUNT]; uint64_t __io tx_stats[CGX_TX_STATS_COUNT]; +@@ -855,12 +855,12 @@ struct nix_cn10k_aq_enq_req { + struct nix_cn10k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { +- struct nix_cn10k_rq_ctx_s rq; +- struct nix_cn10k_sq_ctx_s sq; +- struct nix_cq_ctx_s cq; +- struct nix_rsse_s rss; +- struct nix_rx_mce_s mce; +- struct nix_band_prof_s prof; ++ __io struct nix_cn10k_rq_ctx_s rq; ++ __io struct nix_cn10k_sq_ctx_s sq; ++ __io struct nix_cq_ctx_s cq; ++ __io struct nix_rsse_s rss; ++ __io struct nix_rx_mce_s mce; ++ __io struct nix_band_prof_s prof; + }; + }; + +@@ -1096,11 +1096,11 @@ struct nix_rq_cpt_field_mask_cfg_req { + #define RQ_CTX_MASK_MAX 6 + union { + uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX]; +- struct nix_cn10k_rq_ctx_s rq_set; ++ __io struct nix_cn10k_rq_ctx_s rq_set; + }; + union { + uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX]; +- struct nix_cn10k_rq_ctx_s rq_mask; ++ __io struct nix_cn10k_rq_ctx_s rq_mask; + }; + struct nix_lf_rx_ipec_cfg1_req { + uint32_t __io spb_cpt_aura; @@ -1169,7 +1169,7 @@ struct nix_bp_cfg_req { * so maximum 256 channels are possible. */ @@ -29117,6 +33958,32 @@ index 8b0384c737..18aa97b84a 100644 #define NIX_LBK_MAX_CHAN 1 struct nix_bp_cfg_rsp { struct mbox_msghdr hdr; +diff --git a/dpdk/drivers/common/cnxk/roc_nix.c b/dpdk/drivers/common/cnxk/roc_nix.c +index 2a320cc291..b5c87e8056 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix.c ++++ b/dpdk/drivers/common/cnxk/roc_nix.c +@@ -426,7 +426,7 @@ skip_dev_init: + sdp_lbk_id_update(pci_dev, nix); + nix->pci_dev = pci_dev; + nix->reta_sz = reta_sz; +- nix->mtu = ROC_NIX_DEFAULT_HW_FRS; ++ nix->mtu = roc_nix_max_pkt_len(roc_nix); + + /* Always start with full FC for LBK */ + if (nix->lbk_link) { +diff --git a/dpdk/drivers/common/cnxk/roc_nix.h b/dpdk/drivers/common/cnxk/roc_nix.h +index 6654a2df78..1125fff020 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix.h ++++ b/dpdk/drivers/common/cnxk/roc_nix.h +@@ -236,8 +236,6 @@ struct roc_nix_eeprom_info { + #define ROC_NIX_RSS_KEY_LEN 48 /* 352 Bits */ + #define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1) + +-#define ROC_NIX_DEFAULT_HW_FRS 1514 +- + #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11 + #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2 + diff --git a/dpdk/drivers/common/cnxk/roc_nix_fc.c b/dpdk/drivers/common/cnxk/roc_nix_fc.c index 033e17a4bf..5e8a01c775 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_fc.c @@ -29140,10 +34007,20 @@ index 033e17a4bf..5e8a01c775 100644 if (rc) goto exit; diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.c b/dpdk/drivers/common/cnxk/roc_nix_inl.c -index 782536db4c..92ff44888d 100644 +index 782536db4c..15fec65d93 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_inl.c +++ b/dpdk/drivers/common/cnxk/roc_nix_inl.c -@@ -1039,7 +1039,7 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable) +@@ -399,8 +399,7 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags) + return -EFAULT; + + PLT_SET_USED(max_frags); +- if (idev == NULL) +- return -ENOTSUP; ++ + roc_cpt = idev->cpt; + if (!roc_cpt) { + plt_err("Cannot support inline inbound, cryptodev not probed"); +@@ -1039,7 +1038,7 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable) return -EFAULT; if (roc_model_is_cn10kb_a0()) { @@ -29152,6 +34029,83 @@ index 782536db4c..92ff44888d 100644 if (rc) { plt_err("Failed to get rq mask rc=%d", rc); return rc; +diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.h b/dpdk/drivers/common/cnxk/roc_nix_inl.h +index c537262819..2201717318 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_inl.h ++++ b/dpdk/drivers/common/cnxk/roc_nix_inl.h +@@ -4,24 +4,6 @@ + #ifndef _ROC_NIX_INL_H_ + #define _ROC_NIX_INL_H_ + +-/* ONF INB HW area */ +-#define ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ \ +- PLT_ALIGN(sizeof(struct roc_onf_ipsec_inb_sa), ROC_ALIGN) +-/* ONF INB SW reserved area */ +-#define ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD 384 +-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ \ +- (ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD) +-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2 9 +- +-/* ONF OUTB HW area */ +-#define ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ \ +- PLT_ALIGN(sizeof(struct roc_onf_ipsec_outb_sa), ROC_ALIGN) +-/* ONF OUTB SW reserved area */ +-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD 128 +-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ \ +- (ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD) +-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8 +- + /* ON INB HW area */ + #define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ \ + PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN) +@@ -31,10 +13,10 @@ + (ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD) + #define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10 + +-/* ONF OUTB HW area */ ++/* ON OUTB HW area */ + #define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ \ + PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN) +-/* ONF OUTB SW reserved area */ ++/* ON OUTB SW reserved area */ + #define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256 + #define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ \ + (ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD) +@@ -107,34 +89,6 @@ roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa) + return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ); + } + +-static inline struct roc_onf_ipsec_inb_sa * +-roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx) +-{ +- uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2; +- +- return PLT_PTR_ADD(base, off); +-} +- +-static inline struct roc_onf_ipsec_outb_sa * +-roc_nix_inl_onf_ipsec_outb_sa(uintptr_t base, uint64_t idx) +-{ +- uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2; +- +- return PLT_PTR_ADD(base, off); +-} +- +-static inline void * +-roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(void *sa) +-{ +- return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ); +-} +- +-static inline void * +-roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa) +-{ +- return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ); +-} +- + static inline struct roc_ot_ipsec_inb_sa * + roc_nix_inl_ot_ipsec_inb_sa(uintptr_t base, uint64_t idx) + { diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c b/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c index c3d94dd0da..746b3d0a03 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_inl_dev.c @@ -29175,6 +34129,42 @@ index c3d94dd0da..746b3d0a03 100644 if (rc) { plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc); return rc; +diff --git a/dpdk/drivers/common/cnxk/roc_nix_rss.c b/dpdk/drivers/common/cnxk/roc_nix_rss.c +index 7de69aabeb..5182c5a9cb 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_rss.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_rss.c +@@ -182,7 +182,7 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group, + if (rc) + return rc; + +- memcpy(&nix->reta[group], reta, ROC_NIX_RSS_RETA_MAX); ++ memcpy(&nix->reta[group], reta, sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX); + return 0; + } + +@@ -195,7 +195,7 @@ roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group, + if (group >= ROC_NIX_RSS_GRPS) + return NIX_ERR_PARAM; + +- memcpy(reta, &nix->reta[group], ROC_NIX_RSS_RETA_MAX); ++ memcpy(reta, &nix->reta[group], sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX); + return 0; + } + +diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm.c b/dpdk/drivers/common/cnxk/roc_nix_tm.c +index be8da714cd..810949ca76 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_tm.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_tm.c +@@ -326,6 +326,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc, + uint8_t k = 0; + int rc = 0; + ++ if (roc_nix_is_sdp(roc_nix)) ++ return 0; ++ + sq_s = nix->sqs[sq]; + if (!sq_s) + return -ENOENT; diff --git a/dpdk/drivers/common/cnxk/roc_npa.c b/dpdk/drivers/common/cnxk/roc_npa.c index ee42434c38..ea58030477 100644 --- a/dpdk/drivers/common/cnxk/roc_npa.c @@ -29538,7 +34528,7 @@ index fe57811a84..52f7d96b41 100644 plt_err("Unknown NIX_RX_ACTIONOP found"); return; diff --git a/dpdk/drivers/common/cnxk/roc_npc_parse.c b/dpdk/drivers/common/cnxk/roc_npc_parse.c -index ff00c746d6..e695b755d7 100644 +index ff00c746d6..ce66d2a7d8 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_parse.c +++ b/dpdk/drivers/common/cnxk/roc_npc_parse.c @@ -97,6 +97,7 @@ npc_parse_pre_l2(struct npc_parse_state *pst) @@ -29766,7 +34756,7 @@ index ff00c746d6..e695b755d7 100644 - while (pattern->type == ROC_NPC_ITEM_TYPE_VLAN) { - if (nr_vlans > NPC_MAX_SUPPORTED_VLANS - 1) - return NPC_ERR_PATTERN_NOTSUP; -- + - vlan_item[nr_vlans] = pattern->spec; - nr_vlans++; - @@ -29780,7 +34770,7 @@ index ff00c746d6..e695b755d7 100644 - pattern++; - pattern = npc_parse_skip_void_and_any_items(pattern); - } - +- - switch (nr_vlans) { - case 1: - lt = NPC_LT_LB_CTAG; @@ -29986,6 +34976,45 @@ index ff00c746d6..e695b755d7 100644 break; case ROC_NPC_ITEM_TYPE_L3_CUSTOM: lt = NPC_LT_LC_CUSTOM0; +@@ -806,6 +1019,7 @@ npc_parse_lf(struct npc_parse_state *pst) + { + const struct roc_npc_item_info *pattern, *last_pattern; + char hw_mask[NPC_MAX_EXTRACT_HW_LEN]; ++ const struct roc_npc_flow_item_eth *eth_item; + struct npc_parse_item_info info; + int lid, lt, lflags; + int nr_vlans = 0; +@@ -822,10 +1036,12 @@ npc_parse_lf(struct npc_parse_state *pst) + lt = NPC_LT_LF_TU_ETHER; + lflags = 0; + ++ eth_item = pst->pattern->spec; ++ + /* No match support for vlan tags */ + info.def_mask = NULL; + info.hw_mask = NULL; +- info.len = pst->pattern->size; ++ info.len = sizeof(eth_item->hdr); + info.spec = NULL; + info.mask = NULL; + info.hw_hdr_len = 0; +@@ -856,12 +1072,15 @@ npc_parse_lf(struct npc_parse_state *pst) + } + + info.hw_mask = &hw_mask; +- info.len = pst->pattern->size; ++ info.len = sizeof(eth_item->hdr); + info.hw_hdr_len = 0; + npc_get_hw_supp_mask(pst, &info, lid, lt); + info.spec = NULL; + info.mask = NULL; + ++ if (eth_item && eth_item->has_vlan) ++ pst->set_vlan_ltype_mask = true; ++ + rc = npc_parse_item_basic(pst->pattern, &info); + if (rc != 0) + return rc; diff --git a/dpdk/drivers/common/cnxk/roc_npc_priv.h b/dpdk/drivers/common/cnxk/roc_npc_priv.h index 1a597280d1..1de33932e7 100644 --- a/dpdk/drivers/common/cnxk/roc_npc_priv.h @@ -30106,10 +35135,21 @@ index c357c19c0b..5b0ddac42d 100644 /* Below fields are accessed by hardware */ union { diff --git a/dpdk/drivers/common/cnxk/version.map b/dpdk/drivers/common/cnxk/version.map -index 17f0ec6b48..ae9eaf360c 100644 +index 17f0ec6b48..dae69e7272 100644 --- a/dpdk/drivers/common/cnxk/version.map +++ b/dpdk/drivers/common/cnxk/version.map -@@ -353,6 +353,7 @@ INTERNAL { +@@ -15,10 +15,6 @@ INTERNAL { + cnxk_logtype_sso; + cnxk_logtype_tim; + cnxk_logtype_tm; +- cnxk_onf_ipsec_inb_sa_fill; +- cnxk_onf_ipsec_outb_sa_fill; +- cnxk_onf_ipsec_inb_sa_valid; +- cnxk_onf_ipsec_outb_sa_valid; + cnxk_ot_ipsec_inb_sa_fill; + cnxk_ot_ipsec_outb_sa_fill; + cnxk_ot_ipsec_inb_sa_valid; +@@ -353,6 +349,7 @@ INTERNAL { roc_npc_mcam_write_entry; roc_npc_mcam_read_counter; roc_npc_profile_name_get; @@ -30117,6 +35157,184 @@ index 17f0ec6b48..ae9eaf360c 100644 roc_npc_validate_portid_action; roc_ot_ipsec_inb_sa_init; roc_ot_ipsec_outb_sa_init; +diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h b/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h +index 8ec6aac915..26bf52827e 100644 +--- a/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h ++++ b/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h +@@ -726,6 +726,74 @@ static inline void __gen_auth_key(struct program *program, + authdata->key, authdata->key_type); + } + ++/** ++ * rta_inline_ipsec_query() - Provide indications on which data items can be inlined ++ * and which shall be referenced in IPsec shared descriptor. ++ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands, ++ * excluding the data items to be inlined (or corresponding ++ * pointer if an item is not inlined). Each cnstr_* function that ++ * generates descriptors should have a define mentioning ++ * corresponding length. ++ * @jd_len: Maximum length of the job descriptor(s) that will be used ++ * together with the shared descriptor. ++ * @data_len: Array of lengths of the data items trying to be inlined ++ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0 ++ * otherwise. ++ * @count: Number of data items (size of @data_len array); must be <= 32 ++ * @auth_algtype: Authentication algorithm type. ++ * @auth_index: Index value of data_len for authentication key length. ++ * -1 if authentication key length is not present in data_len. ++ * ++ * Return: 0 if data can be inlined / referenced, negative value if not. If 0, ++ * check @inl_mask for details. ++ */ ++static inline int ++rta_inline_ipsec_query(unsigned int sd_base_len, ++ unsigned int jd_len, ++ unsigned int *data_len, ++ uint32_t *inl_mask, ++ unsigned int count, ++ uint32_t auth_algtype, ++ int32_t auth_index) ++{ ++ uint32_t dkp_protid; ++ ++ switch (auth_algtype & OP_PCL_IPSEC_AUTH_MASK) { ++ case OP_PCL_IPSEC_HMAC_MD5_96: ++ case OP_PCL_IPSEC_HMAC_MD5_128: ++ dkp_protid = OP_PCLID_DKP_MD5; ++ break; ++ case OP_PCL_IPSEC_HMAC_SHA1_96: ++ case OP_PCL_IPSEC_HMAC_SHA1_160: ++ dkp_protid = OP_PCLID_DKP_SHA1; ++ break; ++ case OP_PCL_IPSEC_HMAC_SHA2_256_128: ++ dkp_protid = OP_PCLID_DKP_SHA256; ++ break; ++ case OP_PCL_IPSEC_HMAC_SHA2_384_192: ++ dkp_protid = OP_PCLID_DKP_SHA384; ++ break; ++ case OP_PCL_IPSEC_HMAC_SHA2_512_256: ++ dkp_protid = OP_PCLID_DKP_SHA512; ++ break; ++ default: ++ return rta_inline_query(sd_base_len, ++ jd_len, ++ data_len, ++ inl_mask, count); ++ } ++ ++ /* Updating the maximum supported inline key length */ ++ if (auth_index != -1) { ++ if (split_key_len(dkp_protid) > data_len[auth_index]) ++ data_len[auth_index] = split_key_len(dkp_protid); ++ } ++ return rta_inline_query(sd_base_len, ++ jd_len, ++ data_len, ++ inl_mask, count); ++} ++ + /** + * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared + * descriptor. +diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h +index 289ee2a7d5..070cad0147 100644 +--- a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h ++++ b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h +@@ -1023,6 +1023,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p, + SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1); + MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | +@@ -1070,6 +1075,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p, + + MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | +diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h +index b38c15a24f..d41bacf8f9 100644 +--- a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h ++++ b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: BSD-3-Clause +- * Copyright 2020-2022 NXP ++ * Copyright 2020-2023 NXP + */ + + #ifndef __DESC_SDAP_H__ +@@ -628,6 +628,10 @@ static inline int pdcp_sdap_insert_no_snoop_op( + /* Save the ICV generated */ + MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); + /* The CHA will be reused so we need to clear it */ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | +@@ -718,6 +722,10 @@ static inline int pdcp_sdap_insert_no_snoop_op( + /* Save the ICV which is stalling in output FIFO to MATH3 */ + MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); + /* Reset class 1 CHA */ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | +diff --git a/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/dpdk/drivers/common/dpaax/dpaax_iova_table.c +index 9daac4bc03..860e702333 100644 +--- a/dpdk/drivers/common/dpaax/dpaax_iova_table.c ++++ b/dpdk/drivers/common/dpaax/dpaax_iova_table.c +@@ -1,5 +1,5 @@ + /* SPDX-License-Identifier: BSD-3-Clause +- * Copyright 2018 NXP ++ * Copyright 2018-2023 NXP + */ + + #include +@@ -139,10 +139,12 @@ read_memory_node(unsigned int *count) + } + + DPAAX_DEBUG("Device-tree memory node data:"); +- do { ++ ++ while (j > 0) { ++ --j; + DPAAX_DEBUG(" %08" PRIx64 " %08zu", + nodes[j].addr, nodes[j].len); +- } while (--j); ++ } + + cleanup: + close(fd); +@@ -255,10 +257,7 @@ dpaax_iova_table_populate(void) + void + dpaax_iova_table_depopulate(void) + { +- if (dpaax_iova_table_p == NULL) +- return; +- +- rte_free(dpaax_iova_table_p->entries); ++ rte_free(dpaax_iova_table_p); + dpaax_iova_table_p = NULL; + + DPAAX_DEBUG("IOVA Table cleaned"); diff --git a/dpdk/drivers/common/iavf/iavf_common.c b/dpdk/drivers/common/iavf/iavf_common.c index 855a0ab2f5..dc7662bc1b 100644 --- a/dpdk/drivers/common/iavf/iavf_common.c @@ -30291,6 +35509,19 @@ index 529b62212d..3ce25e644d 100644 +int idpf_send_msg_to_cp(struct idpf_hw *hw, int v_opcode, int v_retval, u8 *msg, u16 msglen); #endif /* _IDPF_PROTOTYPE_H_ */ +diff --git a/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h b/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h +index b8cb22e474..14b897b777 100644 +--- a/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h ++++ b/dpdk/drivers/common/idpf/base/virtchnl2_lan_desc.h +@@ -110,7 +110,7 @@ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S) + #define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 12 + #define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M \ +- MAKEMASK(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M) ++ MAKEMASK(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S) + #define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15 + #define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \ + BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S) diff --git a/dpdk/drivers/common/mlx5/linux/meson.build b/dpdk/drivers/common/mlx5/linux/meson.build index 7e1575efc8..b13ae29844 100644 --- a/dpdk/drivers/common/mlx5/linux/meson.build @@ -30446,6 +35677,19 @@ index d6e91b5296..02b5d54363 100644 __rte_internal int +diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.c b/dpdk/drivers/common/mlx5/mlx5_common_mr.c +index 0e1d2434ab..1fdd8b4c80 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_common_mr.c ++++ b/dpdk/drivers/common/mlx5/mlx5_common_mr.c +@@ -1381,7 +1381,7 @@ mlx5_mempool_get_chunks(struct rte_mempool *mp, struct mlx5_range **out, + + DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name); + n = mp->nb_mem_chunks; +- *out = calloc(sizeof(**out), n); ++ *out = calloc(n, sizeof(**out)); + if (*out == NULL) + return -1; + rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out); diff --git a/dpdk/drivers/common/mlx5/mlx5_common_pci.c b/dpdk/drivers/common/mlx5/mlx5_common_pci.c index 73178ce0f3..fdf03f2a53 100644 --- a/dpdk/drivers/common/mlx5/mlx5_common_pci.c @@ -30460,7 +35704,7 @@ index 73178ce0f3..fdf03f2a53 100644 switch (pci_dev->id.device_id) { case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c -index 59cebb530f..9fdca2fecc 100644 +index 59cebb530f..9a0fc3501d 100644 --- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c @@ -543,7 +543,7 @@ mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, @@ -30472,8 +35716,42 @@ index 59cebb530f..9fdca2fecc 100644 vdpa_attr->valid = 0; } else { vdpa_attr->valid = 1; -@@ -1002,6 +1002,8 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, - general_obj_types) & +@@ -902,18 +902,6 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr, + max_geneve_tlv_option_data_len); + attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); +- attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); +- attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); +- attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS); +- attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & +- MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE); + attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr, + wqe_index_ignore_cap); + attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd); +@@ -937,6 +925,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + /* Read the general_obj_types bitmap and extract the relevant bits. */ + general_obj_types_supported = MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types); ++ attr->qos.flow_meter_aso_sup = ++ !!(general_obj_types_supported & ++ MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); + attr->vdpa.valid = !!(general_obj_types_supported & + MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); + attr->vdpa.queue_counters_valid = +@@ -998,10 +989,11 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled); + attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time); + attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto); +- attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr, +- general_obj_types) & ++ attr->ct_offload = !!(general_obj_types_supported & MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD); attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop); + attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); @@ -30481,7 +35759,7 @@ index 59cebb530f..9fdca2fecc 100644 attr->max_flow_counter_15_0 = MLX5_GET(cmd_hca_cap, hcattr, max_flow_counter_15_0); attr->max_flow_counter_31_16 = MLX5_GET(cmd_hca_cap, hcattr, -@@ -1013,7 +1015,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -1013,7 +1005,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->flow_access_aso_opc_mod = MLX5_GET(cmd_hca_cap, hcattr, flow_access_aso_opc_mod); if (attr->crypto) { @@ -30492,8 +35770,34 @@ index 59cebb530f..9fdca2fecc 100644 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, MLX5_GET_HCA_CAP_OP_MOD_CRYPTO | MLX5_HCA_CAP_OPMOD_GET_CUR); +@@ -1667,7 +1661,7 @@ mlx5_devx_cmd_create_rqt(void *ctx, + uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; + void *rqt_ctx; + struct mlx5_devx_obj *rqt = NULL; +- int i; ++ unsigned int i; + + in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); + if (!in) { +@@ -1720,7 +1714,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, + uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; + uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); + void *rqt_ctx; +- int i; ++ unsigned int i; + int ret; + + if (!in) { +@@ -1733,7 +1727,6 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, + MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); + rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); + MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); +- MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size); + MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); + for (i = 0; i < rqt_attr->rqt_actual_size; i++) + MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); diff --git a/dpdk/drivers/common/mlx5/mlx5_prm.h b/dpdk/drivers/common/mlx5/mlx5_prm.h -index 2b5c43ee6e..dab70b9469 100644 +index 2b5c43ee6e..f2cd353672 100644 --- a/dpdk/drivers/common/mlx5/mlx5_prm.h +++ b/dpdk/drivers/common/mlx5/mlx5_prm.h @@ -1679,7 +1679,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { @@ -30507,7 +35811,16 @@ index 2b5c43ee6e..dab70b9469 100644 u8 log_max_current_mc_list[0x5]; u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; -@@ -2121,10 +2123,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { +@@ -2114,17 +2116,18 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { + u8 reserved_at_d0[0x3]; + u8 log_conn_track_max_alloc[0x5]; + u8 reserved_at_d8[0x3]; +- u8 log_max_conn_track_offload[0x5]; +- u8 reserved_at_e0[0x20]; /* End of DW7. */ ++ u8 log_max_conn_track_offload[0x5]; /* End of DW7. */ ++ u8 reserved_at_e0[0x20]; + u8 reserved_at_100[0x60]; + u8 reserved_at_160[0x3]; u8 hairpin_sq_wqe_bb_size[0x5]; u8 hairpin_sq_wq_in_host_mem[0x1]; u8 hairpin_data_buffer_locked[0x1]; @@ -30541,6 +35854,15 @@ index 2b5c43ee6e..dab70b9469 100644 }; struct mlx5_ifc_create_cq_out_bits { +@@ -3305,7 +3311,7 @@ struct mlx5_ifc_stc_ste_param_vport_bits { + u8 eswitch_owner_vhca_id[0x10]; + u8 vport_number[0x10]; + u8 eswitch_owner_vhca_id_valid[0x1]; +- u8 reserved_at_21[0x59]; ++ u8 reserved_at_21[0x5f]; + }; + + union mlx5_ifc_stc_param_bits { diff --git a/dpdk/drivers/common/mlx5/version.map b/dpdk/drivers/common/mlx5/version.map index 4f72900519..03c8ce5593 100644 --- a/dpdk/drivers/common/mlx5/version.map @@ -30778,9 +36100,34 @@ index 1b3a5deabf..1ce262f715 100644 { return 0; diff --git a/dpdk/drivers/common/qat/meson.build b/dpdk/drivers/common/qat/meson.build -index b84e5b3c6c..95b52b78c3 100644 +index b84e5b3c6c..3680e0a215 100644 --- a/dpdk/drivers/common/qat/meson.build +++ b/dpdk/drivers/common/qat/meson.build +@@ -17,13 +17,13 @@ qat_compress_relpath = '../../' + qat_compress_path + if disable_drivers.contains(qat_crypto_path) + qat_crypto = false + dpdk_drvs_disabled += qat_crypto_path +- set_variable(qat_crypto_path.underscorify() + '_disable_reason', ++ set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason', + 'Explicitly disabled via build config') + endif + if disable_drivers.contains(qat_compress_path) + qat_compress = false + dpdk_drvs_disabled += qat_compress_path +- set_variable(qat_compress_path.underscorify() + '_disable_reason', ++ set_variable('drv_' + qat_compress_path.underscorify() + '_disable_reason', + 'Explicitly disabled via build config') + endif + +@@ -31,7 +31,7 @@ libcrypto = dependency('libcrypto', required: false, method: 'pkg-config') + if qat_crypto and not libcrypto.found() + qat_crypto = false + dpdk_drvs_disabled += qat_crypto_path +- set_variable(qat_crypto_path.underscorify() + '_disable_reason', ++ set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason', + 'missing dependency, libcrypto') + endif + @@ -54,14 +54,6 @@ if libipsecmb.found() and libcrypto_3.found() endif endif @@ -31065,10 +36412,43 @@ index 66f00943a5..f911125e86 100644 #endif /* _QAT_QP_H_ */ diff --git a/dpdk/drivers/common/sfc_efx/base/efx.h b/dpdk/drivers/common/sfc_efx/base/efx.h -index 92ec18761b..49e29dcc1c 100644 +index 92ec18761b..6f9cbe7311 100644 --- a/dpdk/drivers/common/sfc_efx/base/efx.h +++ b/dpdk/drivers/common/sfc_efx/base/efx.h -@@ -4582,6 +4582,24 @@ efx_mae_action_set_populate_mark( +@@ -7,6 +7,8 @@ + #ifndef _SYS_EFX_H + #define _SYS_EFX_H + ++#include ++ + #include "efx_annote.h" + #include "efsys.h" + #include "efx_types.h" +@@ -17,14 +19,20 @@ + extern "C" { + #endif + +-#define EFX_STATIC_ASSERT(_cond) \ +- ((void)sizeof (char[(_cond) ? 1 : -1])) ++/* ++ * Triggers an error at compilation time if the condition is false. ++ * ++ * The { } exists to workaround a bug in clang (#55821) ++ * where it would not handle _Static_assert in a switch case. ++ */ ++#define EFX_STATIC_ASSERT(_cond) \ ++ { static_assert((_cond), #_cond); } + + #define EFX_ARRAY_SIZE(_array) \ + (sizeof (_array) / sizeof ((_array)[0])) + + #define EFX_FIELD_OFFSET(_type, _field) \ +- ((size_t)&(((_type *)0)->_field)) ++ offsetof(_type, _field) + + /* The macro expands divider twice */ + #define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d)) +@@ -4582,6 +4590,24 @@ efx_mae_action_set_populate_mark( __in efx_mae_actions_t *spec, __in uint32_t mark_value); @@ -31093,7 +36473,7 @@ index 92ec18761b..49e29dcc1c 100644 LIBEFX_API extern __checkReturn efx_rc_t efx_mae_action_set_populate_deliver( -@@ -4730,6 +4748,20 @@ efx_mae_action_set_fill_in_counter_id( +@@ -4730,6 +4756,20 @@ efx_mae_action_set_fill_in_counter_id( __in efx_mae_actions_t *spec, __in const efx_counter_t *counter_idp); @@ -32321,6 +37701,71 @@ index 221a0a5235..a5271d7227 100644 if (ccp_pmd_init_done) { RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); return -EFAULT; +diff --git a/dpdk/drivers/crypto/cnxk/cnxk_ae.h b/dpdk/drivers/crypto/cnxk/cnxk_ae.h +index adf719da73..156bd2e94f 100644 +--- a/dpdk/drivers/crypto/cnxk/cnxk_ae.h ++++ b/dpdk/drivers/crypto/cnxk/cnxk_ae.h +@@ -27,13 +27,22 @@ struct cnxk_ae_sess { + }; + + static __rte_always_inline void +-cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len) ++cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len, size_t max) + { ++ uint8_t msw_len = *len % 8; ++ uint64_t msw_val = 0; + size_t i; + +- /* Strip leading NUL bytes */ +- for (i = 0; i < *len; i++) { +- if ((*data)[i] != 0) ++ if (*len <= 8) ++ return; ++ ++ memcpy(&msw_val, *data, msw_len); ++ if (msw_val != 0) ++ return; ++ ++ for (i = msw_len; i < *len && (*len - i) < max; i += 8) { ++ memcpy(&msw_val, &(*data)[i], 8); ++ if (msw_val != 0) + break; + } + *data += i; +@@ -50,8 +59,8 @@ cnxk_ae_fill_modex_params(struct cnxk_ae_sess *sess, + uint8_t *exp = xform->modex.exponent.data; + uint8_t *mod = xform->modex.modulus.data; + +- cnxk_ae_modex_param_normalize(&mod, &mod_len); +- cnxk_ae_modex_param_normalize(&exp, &exp_len); ++ cnxk_ae_modex_param_normalize(&mod, &mod_len, SIZE_MAX); ++ cnxk_ae_modex_param_normalize(&exp, &exp_len, mod_len); + + if (unlikely(exp_len == 0 || mod_len == 0)) + return -EINVAL; +@@ -240,7 +249,7 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, + struct rte_crypto_mod_op_param mod_op; + uint64_t total_key_len; + union cpt_inst_w4 w4; +- uint32_t base_len; ++ size_t base_len; + uint32_t dlen; + uint8_t *dptr; + +@@ -248,8 +257,11 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, + + base_len = mod_op.base.length; + if (unlikely(base_len > mod_len)) { +- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; +- return -ENOTSUP; ++ cnxk_ae_modex_param_normalize(&mod_op.base.data, &base_len, mod_len); ++ if (base_len > mod_len) { ++ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; ++ return -ENOTSUP; ++ } + } + + total_key_len = mod_len + exp_len; diff --git a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c b/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c index 6c28f8942e..b4d1925d23 100644 --- a/dpdk/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c @@ -32429,7 +37874,7 @@ index b07fc22858..32e2b2cd64 100644 cpt_inst_w5.s.gather_sz = ((i + 2) / 3); diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c -index c25e40030b..cf1339f266 100644 +index c25e40030b..eab7091251 100644 --- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1676,7 +1676,7 @@ dpaa2_sec_dump(struct rte_crypto_op *op) @@ -32450,10 +37895,32 @@ index c25e40030b..cf1339f266 100644 printf("PDCP session params:\n" "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n" +@@ -4093,7 +4093,7 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, + cfg.dest_cfg.priority = priority; + + cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; +- cfg.user_ctx = (size_t)(qp); ++ cfg.user_ctx = (size_t)(&qp->rx_vq); + if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { + cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; + cfg.order_preservation_en = 1; diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c -index db52683847..7807c83e54 100644 +index db52683847..8687e3e75e 100644 --- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c +@@ -398,10 +398,10 @@ dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) + + cdb->sh_desc[0] = cipherdata.keylen; + cdb->sh_desc[1] = authdata.keylen; +- err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, ++ err = rta_inline_ipsec_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, + DESC_JOB_IO_LEN, + (unsigned int *)cdb->sh_desc, +- &cdb->sh_desc[2], 2); ++ &cdb->sh_desc[2], 2, authdata.algtype, 1); + + if (err < 0) { + DPAA_SEC_ERR("Crypto: Incorrect key lengths"); @@ -671,7 +671,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) @@ -32482,7 +37949,7 @@ index db52683847..7807c83e54 100644 "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" "\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:" diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c -index 71e02cd051..30f919cd40 100644 +index 71e02cd051..2a5599b7d8 100644 --- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c @@ -139,15 +139,12 @@ int @@ -32502,6 +37969,15 @@ index 71e02cd051..30f919cd40 100644 #if IMB_VERSION(1, 1, 0) > IMB_VERSION_NUM if (qp->mb_mgr) +@@ -409,7 +406,7 @@ ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer) + resp_param->result = ipsec_mb_qp_release(dev, qp_id); + break; + default: +- CDEV_LOG_ERR("invalid mp request type\n"); ++ CDEV_LOG_ERR("invalid mp request type"); + } + + out: @@ -437,15 +434,22 @@ ipsec_mb_sym_session_configure( struct ipsec_mb_dev_private *internals = dev->data->dev_private; struct ipsec_mb_internals *pmd_data = @@ -32573,7 +38049,7 @@ index ec147d2110..4100d921ff 100644 ext_deps += lib diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c -index 147a38932d..ac20d01937 100644 +index 147a38932d..46da01254b 100644 --- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -199,7 +199,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, @@ -32585,6 +38061,68 @@ index 147a38932d..ac20d01937 100644 if (sess->auth.req_digest_len != 4 && sess->auth.req_digest_len != 8 && sess->auth.req_digest_len != 16) { +@@ -1017,9 +1017,6 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, + job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; + job->u.XCBC._k2 = session->auth.xcbc.k2; + job->u.XCBC._k3 = session->auth.xcbc.k3; +- +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_CCM: +@@ -1034,8 +1031,6 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, + job->u.CMAC._key_expanded = session->auth.cmac.expkey; + job->u.CMAC._skey1 = session->auth.cmac.skey1; + job->u.CMAC._skey2 = session->auth.cmac.skey2; +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_GMAC: +@@ -1254,7 +1249,7 @@ imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg) + * + * @return + * - 0 on success, the IMB_JOB will be filled +- * - -1 if invalid session or errors allocationg SGL linear buffer, ++ * - -1 if invalid session or errors allocating SGL linear buffer, + * IMB_JOB will not be filled + */ + static inline int +@@ -1331,24 +1326,17 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, + job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; + job->u.XCBC._k2 = session->auth.xcbc.k2; + job->u.XCBC._k3 = session->auth.xcbc.k3; +- +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_CCM: + job->u.CCM.aad = op->sym->aead.aad.data + 18; + job->u.CCM.aad_len_in_bytes = session->aead.aad_len; +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_CMAC: + job->u.CMAC._key_expanded = session->auth.cmac.expkey; + job->u.CMAC._skey1 = session->auth.cmac.skey1; + job->u.CMAC._skey2 = session->auth.cmac.skey2; +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.decode; + break; + + case IMB_AUTH_AES_GMAC: +@@ -1396,8 +1384,6 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, + job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; + job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; + } +- job->enc_keys = session->cipher.expanded_aes_keys.encode; +- job->dec_keys = session->cipher.expanded_aes_keys.encode; + break; + default: + job->u.HMAC._hashed_auth_key_xor_ipad = diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 55fafbbbec..8a7c74f621 100644 --- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -32707,11 +38245,85 @@ index 9edb0cc00f..d7e8ff7db4 100644 softreq_init(sr, sr->iova); sr->ctx = ctx; sr->op = op; +diff --git a/dpdk/drivers/crypto/openssl/compat.h b/dpdk/drivers/crypto/openssl/compat.h +index 9f9167c4f1..e1814fea8c 100644 +--- a/dpdk/drivers/crypto/openssl/compat.h ++++ b/dpdk/drivers/crypto/openssl/compat.h +@@ -5,6 +5,32 @@ + #ifndef __RTA_COMPAT_H__ + #define __RTA_COMPAT_H__ + ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++static __rte_always_inline void ++free_hmac_ctx(EVP_MAC_CTX *ctx) ++{ ++ EVP_MAC_CTX_free(ctx); ++} ++ ++static __rte_always_inline void ++free_cmac_ctx(EVP_MAC_CTX *ctx) ++{ ++ EVP_MAC_CTX_free(ctx); ++} ++#else ++static __rte_always_inline void ++free_hmac_ctx(HMAC_CTX *ctx) ++{ ++ HMAC_CTX_free(ctx); ++} ++ ++static __rte_always_inline void ++free_cmac_ctx(CMAC_CTX *ctx) ++{ ++ CMAC_CTX_free(ctx); ++} ++#endif ++ + #if (OPENSSL_VERSION_NUMBER < 0x10100000L) + + static __rte_always_inline int diff --git a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h -index ed6841e460..4e224b040b 100644 +index ed6841e460..d67e39cddb 100644 --- a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h +++ b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h -@@ -189,6 +189,8 @@ struct openssl_asym_session { +@@ -79,6 +79,20 @@ struct openssl_qp { + */ + } __rte_cache_aligned; + ++struct evp_ctx_pair { ++ EVP_CIPHER_CTX *cipher; ++ union { ++ EVP_MD_CTX *auth; ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ EVP_MAC_CTX *hmac; ++ EVP_MAC_CTX *cmac; ++#else ++ HMAC_CTX *hmac; ++ CMAC_CTX *cmac; ++#endif ++ }; ++}; ++ + /** OPENSSL crypto private session structure */ + struct openssl_session { + enum openssl_chain_order chain_order; +@@ -165,6 +179,15 @@ struct openssl_session { + /**< digest length */ + } auth; + ++ uint16_t ctx_copies_len; ++ /* < number of entries in ctx_copies */ ++ struct evp_ctx_pair qp_ctx[]; ++ /**< Flexible array member of per-queue-pair structures, each containing ++ * pointers to copies of the cipher and auth EVP contexts. Cipher ++ * contexts are not safe to use from multiple cores simultaneously, so ++ * maintaining these copies allows avoiding per-buffer copying into a ++ * temporary context. ++ */ + } __rte_cache_aligned; + + /** OPENSSL crypto private asymmetric session structure */ +@@ -189,6 +212,8 @@ struct openssl_asym_session { struct dh { DH *dh_key; uint32_t key_op; @@ -32720,7 +38332,7 @@ index ed6841e460..4e224b040b 100644 #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) OSSL_PARAM_BLD * param_bld; OSSL_PARAM_BLD *param_bld_peer; -@@ -198,6 +200,10 @@ struct openssl_asym_session { +@@ -198,6 +223,10 @@ struct openssl_asym_session { DSA *dsa; #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) OSSL_PARAM_BLD * param_bld; @@ -32731,11 +38343,176 @@ index ed6841e460..4e224b040b 100644 #endif } s; } u; +@@ -205,7 +234,8 @@ struct openssl_asym_session { + /** Set and validate OPENSSL crypto session parameters */ + extern int + openssl_set_session_parameters(struct openssl_session *sess, +- const struct rte_crypto_sym_xform *xform); ++ const struct rte_crypto_sym_xform *xform, ++ uint16_t nb_queue_pairs); + + /** Reset OPENSSL crypto session parameters */ + extern void diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -index 05449b6e98..6ae31cb5cd 100644 +index 05449b6e98..9fc8194366 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -@@ -696,7 +696,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, +@@ -349,7 +349,8 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen, + static int + openssl_set_sess_aead_enc_param(struct openssl_session *sess, + enum rte_crypto_aead_algorithm algo, +- uint8_t tag_len, const uint8_t *key) ++ uint8_t tag_len, const uint8_t *key, ++ EVP_CIPHER_CTX **ctx) + { + int iv_type = 0; + unsigned int do_ccm; +@@ -377,7 +378,7 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, + } + + sess->cipher.mode = OPENSSL_CIPHER_LIB; +- sess->cipher.ctx = EVP_CIPHER_CTX_new(); ++ *ctx = EVP_CIPHER_CTX_new(); + + if (get_aead_algo(algo, sess->cipher.key.length, + &sess->cipher.evp_algo) != 0) +@@ -387,19 +388,19 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, + + sess->chain_order = OPENSSL_CHAIN_COMBINED; + +- if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo, ++ if (EVP_EncryptInit_ex(*ctx, sess->cipher.evp_algo, + NULL, NULL, NULL) <= 0) + return -EINVAL; + +- if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length, ++ if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type, sess->iv.length, + NULL) <= 0) + return -EINVAL; + + if (do_ccm) +- EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG, ++ EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG, + tag_len, NULL); + +- if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0) ++ if (EVP_EncryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0) + return -EINVAL; + + return 0; +@@ -409,7 +410,8 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, + static int + openssl_set_sess_aead_dec_param(struct openssl_session *sess, + enum rte_crypto_aead_algorithm algo, +- uint8_t tag_len, const uint8_t *key) ++ uint8_t tag_len, const uint8_t *key, ++ EVP_CIPHER_CTX **ctx) + { + int iv_type = 0; + unsigned int do_ccm = 0; +@@ -436,7 +438,7 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, + } + + sess->cipher.mode = OPENSSL_CIPHER_LIB; +- sess->cipher.ctx = EVP_CIPHER_CTX_new(); ++ *ctx = EVP_CIPHER_CTX_new(); + + if (get_aead_algo(algo, sess->cipher.key.length, + &sess->cipher.evp_algo) != 0) +@@ -446,24 +448,46 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, + + sess->chain_order = OPENSSL_CHAIN_COMBINED; + +- if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo, ++ if (EVP_DecryptInit_ex(*ctx, sess->cipher.evp_algo, + NULL, NULL, NULL) <= 0) + return -EINVAL; + +- if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, ++ if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type, + sess->iv.length, NULL) <= 0) + return -EINVAL; + + if (do_ccm) +- EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG, ++ EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG, + tag_len, NULL); + +- if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0) ++ if (EVP_DecryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0) + return -EINVAL; + + return 0; + } + ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30200000L) ++static int openssl_aesni_ctx_clone(EVP_CIPHER_CTX **dest, ++ struct openssl_session *sess) ++{ ++ /* OpenSSL versions 3.0.0 <= V < 3.2.0 have no dupctx() implementation ++ * for AES-GCM and AES-CCM. In this case, we have to create new empty ++ * contexts and initialise, as we did the original context. ++ */ ++ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) ++ sess->aead_algo = RTE_CRYPTO_AEAD_AES_GCM; ++ ++ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ++ return openssl_set_sess_aead_enc_param(sess, sess->aead_algo, ++ sess->auth.digest_length, sess->cipher.key.data, ++ dest); ++ else ++ return openssl_set_sess_aead_dec_param(sess, sess->aead_algo, ++ sess->auth.digest_length, sess->cipher.key.data, ++ dest); ++} ++#endif ++ + /** Set session cipher parameters */ + static int + openssl_set_session_cipher_parameters(struct openssl_session *sess, +@@ -520,6 +544,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, + sess->cipher.key.length, + sess->cipher.key.data) != 0) + return -EINVAL; ++ ++ ++ /* We use 3DES encryption also for decryption. ++ * IV is not important for 3DES ECB. ++ */ ++ if (EVP_EncryptInit_ex(sess->cipher.ctx, EVP_des_ede3_ecb(), ++ NULL, sess->cipher.key.data, NULL) != 1) ++ return -EINVAL; ++ + break; + + case RTE_CRYPTO_CIPHER_DES_CBC: +@@ -585,6 +618,8 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, + return -ENOTSUP; + } + ++ EVP_CIPHER_CTX_set_padding(sess->cipher.ctx, 0); ++ + return 0; + } + +@@ -622,12 +657,14 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, + return openssl_set_sess_aead_enc_param(sess, + RTE_CRYPTO_AEAD_AES_GCM, + xform->auth.digest_length, +- xform->auth.key.data); ++ xform->auth.key.data, ++ &sess->cipher.ctx); + else + return openssl_set_sess_aead_dec_param(sess, + RTE_CRYPTO_AEAD_AES_GCM, + xform->auth.digest_length, +- xform->auth.key.data); ++ xform->auth.key.data, ++ &sess->cipher.ctx); + break; + + case RTE_CRYPTO_AUTH_MD5: +@@ -696,7 +733,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, algo = digest_name_get(xform->auth.algo); if (!algo) return -EINVAL; @@ -32744,7 +38521,155 @@ index 05449b6e98..6ae31cb5cd 100644 mac = EVP_MAC_fetch(NULL, "HMAC", NULL); sess->auth.hmac.ctx = EVP_MAC_CTX_new(mac); -@@ -1195,8 +1195,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -769,16 +806,19 @@ openssl_set_session_aead_parameters(struct openssl_session *sess, + /* Select cipher direction */ + if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) + return openssl_set_sess_aead_enc_param(sess, xform->aead.algo, +- xform->aead.digest_length, xform->aead.key.data); ++ xform->aead.digest_length, xform->aead.key.data, ++ &sess->cipher.ctx); + else + return openssl_set_sess_aead_dec_param(sess, xform->aead.algo, +- xform->aead.digest_length, xform->aead.key.data); ++ xform->aead.digest_length, xform->aead.key.data, ++ &sess->cipher.ctx); + } + + /** Parse crypto xform chain and set private session parameters */ + int + openssl_set_session_parameters(struct openssl_session *sess, +- const struct rte_crypto_sym_xform *xform) ++ const struct rte_crypto_sym_xform *xform, ++ uint16_t nb_queue_pairs) + { + const struct rte_crypto_sym_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform = NULL; +@@ -840,6 +880,12 @@ openssl_set_session_parameters(struct openssl_session *sess, + } + } + ++ /* ++ * With only one queue pair, the array of copies is not needed. ++ * Otherwise, one entry per queue pair is required. ++ */ ++ sess->ctx_copies_len = nb_queue_pairs > 1 ? nb_queue_pairs : 0; ++ + return 0; + } + +@@ -847,33 +893,45 @@ openssl_set_session_parameters(struct openssl_session *sess, + void + openssl_reset_session(struct openssl_session *sess) + { +- EVP_CIPHER_CTX_free(sess->cipher.ctx); ++ /* Free all the qp_ctx entries. */ ++ for (uint16_t i = 0; i < sess->ctx_copies_len; i++) { ++ if (sess->qp_ctx[i].cipher != NULL) { ++ EVP_CIPHER_CTX_free(sess->qp_ctx[i].cipher); ++ sess->qp_ctx[i].cipher = NULL; ++ } + +- if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI) +- EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx); ++ switch (sess->auth.mode) { ++ case OPENSSL_AUTH_AS_AUTH: ++ EVP_MD_CTX_destroy(sess->qp_ctx[i].auth); ++ sess->qp_ctx[i].auth = NULL; ++ break; ++ case OPENSSL_AUTH_AS_HMAC: ++ free_hmac_ctx(sess->qp_ctx[i].hmac); ++ sess->qp_ctx[i].hmac = NULL; ++ break; ++ case OPENSSL_AUTH_AS_CMAC: ++ free_cmac_ctx(sess->qp_ctx[i].cmac); ++ sess->qp_ctx[i].cmac = NULL; ++ break; ++ } ++ } ++ ++ EVP_CIPHER_CTX_free(sess->cipher.ctx); + + switch (sess->auth.mode) { + case OPENSSL_AUTH_AS_AUTH: + EVP_MD_CTX_destroy(sess->auth.auth.ctx); + break; + case OPENSSL_AUTH_AS_HMAC: +- EVP_PKEY_free(sess->auth.hmac.pkey); +-# if OPENSSL_VERSION_NUMBER >= 0x30000000L +- EVP_MAC_CTX_free(sess->auth.hmac.ctx); +-# else +- HMAC_CTX_free(sess->auth.hmac.ctx); +-# endif ++ free_hmac_ctx(sess->auth.hmac.ctx); + break; + case OPENSSL_AUTH_AS_CMAC: +-# if OPENSSL_VERSION_NUMBER >= 0x30000000L +- EVP_MAC_CTX_free(sess->auth.cmac.ctx); +-# else +- CMAC_CTX_free(sess->auth.cmac.ctx); +-# endif +- break; +- default: ++ free_cmac_ctx(sess->auth.cmac.ctx); + break; + } ++ ++ if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI) ++ EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx); + } + + /** Provide session for operation */ +@@ -913,7 +971,7 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op) + sess = (struct openssl_session *)_sess->driver_priv_data; + + if (unlikely(openssl_set_session_parameters(sess, +- op->sym->xform) != 0)) { ++ op->sym->xform, 1) != 0)) { + rte_mempool_put(qp->sess_mp, _sess); + sess = NULL; + } +@@ -1067,8 +1125,6 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) + goto process_cipher_encrypt_err; + +- EVP_CIPHER_CTX_set_padding(ctx, 0); +- + if (process_openssl_encryption_update(mbuf_src, offset, &dst, + srclen, ctx, inplace)) + goto process_cipher_encrypt_err; +@@ -1117,8 +1173,6 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, + if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) + goto process_cipher_decrypt_err; + +- EVP_CIPHER_CTX_set_padding(ctx, 0); +- + if (process_openssl_decryption_update(mbuf_src, offset, &dst, + srclen, ctx, inplace)) + goto process_cipher_decrypt_err; +@@ -1135,8 +1189,7 @@ process_cipher_decrypt_err: + /** Process cipher des 3 ctr encryption, decryption algorithm */ + static int + process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, +- int offset, uint8_t *iv, uint8_t *key, int srclen, +- EVP_CIPHER_CTX *ctx) ++ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx) + { + uint8_t ebuf[8], ctr[8]; + int unused, n; +@@ -1154,12 +1207,6 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, + src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); + l = rte_pktmbuf_data_len(m) - offset; + +- /* We use 3DES encryption also for decryption. +- * IV is not important for 3DES ecb +- */ +- if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0) +- goto process_cipher_des3ctr_err; +- + memcpy(ctr, iv, 8); + + for (n = 0; n < srclen; n++) { +@@ -1195,8 +1242,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, int srclen, uint8_t *aad, int aadlen, uint8_t *iv, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) { @@ -32757,7 +38682,7 @@ index 05449b6e98..6ae31cb5cd 100644 if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) goto process_auth_encryption_gcm_err; -@@ -1210,9 +1213,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1210,9 +1260,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, srclen, ctx, 0)) goto process_auth_encryption_gcm_err; @@ -32769,7 +38694,7 @@ index 05449b6e98..6ae31cb5cd 100644 if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0) goto process_auth_encryption_gcm_err; -@@ -1274,8 +1279,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1274,8 +1326,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, int srclen, uint8_t *aad, int aadlen, uint8_t *iv, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) { @@ -32782,7 +38707,7 @@ index 05449b6e98..6ae31cb5cd 100644 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0) goto process_auth_decryption_gcm_err; -@@ -1292,9 +1300,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1292,9 +1347,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, srclen, ctx, 0)) goto process_auth_decryption_gcm_err; @@ -32794,7 +38719,272 @@ index 05449b6e98..6ae31cb5cd 100644 if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0) return -EFAULT; -@@ -1797,7 +1807,6 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, +@@ -1416,6 +1473,9 @@ process_openssl_auth_mac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset, + if (m == 0) + goto process_auth_err; + ++ if (EVP_MAC_init(ctx, NULL, 0, NULL) <= 0) ++ goto process_auth_err; ++ + src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); + + l = rte_pktmbuf_data_len(m) - offset; +@@ -1442,11 +1502,9 @@ process_auth_final: + if (EVP_MAC_final(ctx, dst, &dstlen, DIGEST_LENGTH_MAX) != 1) + goto process_auth_err; + +- EVP_MAC_CTX_free(ctx); + return 0; + + process_auth_err: +- EVP_MAC_CTX_free(ctx); + OPENSSL_LOG(ERR, "Process openssl auth failed"); + return -EINVAL; + } +@@ -1558,11 +1616,151 @@ process_auth_err: + # endif + /*----------------------------------------------------------------------------*/ + ++static inline EVP_CIPHER_CTX * ++get_local_cipher_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++{ ++ /* If the array is not being used, just return the main context. */ ++ if (sess->ctx_copies_len == 0) ++ return sess->cipher.ctx; ++ ++ EVP_CIPHER_CTX **lctx = &sess->qp_ctx[qp->id].cipher; ++ ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30200000L ++ /* EVP_CIPHER_CTX_dup() added in OSSL 3.2 */ ++ *lctx = EVP_CIPHER_CTX_dup(sess->cipher.ctx); ++ return *lctx; ++#elif OPENSSL_VERSION_NUMBER >= 0x30000000L ++ if (sess->chain_order == OPENSSL_CHAIN_COMBINED) { ++ /* AESNI special-cased to use openssl_aesni_ctx_clone() ++ * to allow for working around lack of ++ * EVP_CIPHER_CTX_copy support for 3.0.0 <= OSSL Version ++ * < 3.2.0. ++ */ ++ if (openssl_aesni_ctx_clone(lctx, sess) != 0) ++ *lctx = NULL; ++ return *lctx; ++ } ++#endif ++ ++ *lctx = EVP_CIPHER_CTX_new(); ++ EVP_CIPHER_CTX_copy(*lctx, sess->cipher.ctx); ++ } ++ ++ return *lctx; ++} ++ ++static inline EVP_MD_CTX * ++get_local_auth_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++{ ++ /* If the array is not being used, just return the main context. */ ++ if (sess->ctx_copies_len == 0) ++ return sess->auth.auth.ctx; ++ ++ EVP_MD_CTX **lctx = &sess->qp_ctx[qp->id].auth; ++ ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30100000L ++ /* EVP_MD_CTX_dup() added in OSSL 3.1 */ ++ *lctx = EVP_MD_CTX_dup(sess->auth.auth.ctx); ++#else ++ *lctx = EVP_MD_CTX_new(); ++ EVP_MD_CTX_copy(*lctx, sess->auth.auth.ctx); ++#endif ++ } ++ ++ return *lctx; ++} ++ ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++static inline EVP_MAC_CTX * ++#else ++static inline HMAC_CTX * ++#endif ++get_local_hmac_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++{ ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ /* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of ++ * EVP_MAC_CTXs is broken, and doesn't actually reset their ++ * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid ++ * undefined behavior of provided macs on EVP_MAC ++ * reinitialization"). In cases where the fix is not present, ++ * fall back to duplicating the context every buffer as a ++ * workaround, at the cost of performance. ++ */ ++ RTE_SET_USED(qp); ++ return EVP_MAC_CTX_dup(sess->auth.hmac.ctx); ++#else ++ if (sess->ctx_copies_len == 0) ++ return sess->auth.hmac.ctx; ++ ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ EVP_MAC_CTX **lctx = ++#else ++ HMAC_CTX **lctx = ++#endif ++ &sess->qp_ctx[qp->id].hmac; ++ ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ *lctx = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); ++#else ++ *lctx = HMAC_CTX_new(); ++ HMAC_CTX_copy(*lctx, sess->auth.hmac.ctx); ++#endif ++ } ++ ++ return *lctx; ++#endif ++} ++ ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++static inline EVP_MAC_CTX * ++#else ++static inline CMAC_CTX * ++#endif ++get_local_cmac_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++{ ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ /* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of ++ * EVP_MAC_CTXs is broken, and doesn't actually reset their ++ * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid ++ * undefined behavior of provided macs on EVP_MAC ++ * reinitialization"). In cases where the fix is not present, ++ * fall back to duplicating the context every buffer as a ++ * workaround, at the cost of performance. ++ */ ++ RTE_SET_USED(qp); ++ return EVP_MAC_CTX_dup(sess->auth.cmac.ctx); ++#else ++ if (sess->ctx_copies_len == 0) ++ return sess->auth.cmac.ctx; ++ ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ EVP_MAC_CTX **lctx = ++#else ++ CMAC_CTX **lctx = ++#endif ++ &sess->qp_ctx[qp->id].cmac; ++ ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ *lctx = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); ++#else ++ *lctx = CMAC_CTX_new(); ++ CMAC_CTX_copy(*lctx, sess->auth.cmac.ctx); ++#endif ++ } ++ ++ return *lctx; ++#endif ++} ++ + /** Process auth/cipher combined operation */ + static void +-process_openssl_combined_op +- (struct rte_crypto_op *op, struct openssl_session *sess, +- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst) ++process_openssl_combined_op(struct openssl_qp *qp, struct rte_crypto_op *op, ++ struct openssl_session *sess, struct rte_mbuf *mbuf_src, ++ struct rte_mbuf *mbuf_dst) + { + /* cipher */ + uint8_t *dst = NULL, *iv, *tag, *aad; +@@ -1579,6 +1777,8 @@ process_openssl_combined_op + return; + } + ++ EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp); ++ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->iv.offset); + if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { +@@ -1612,12 +1812,12 @@ process_openssl_combined_op + status = process_openssl_auth_encryption_gcm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, sess->cipher.ctx); ++ dst, tag, ctx); + else + status = process_openssl_auth_encryption_ccm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, taglen, sess->cipher.ctx); ++ dst, tag, taglen, ctx); + + } else { + if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC || +@@ -1625,12 +1825,12 @@ process_openssl_combined_op + status = process_openssl_auth_decryption_gcm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, sess->cipher.ctx); ++ dst, tag, ctx); + else + status = process_openssl_auth_decryption_ccm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, taglen, sess->cipher.ctx); ++ dst, tag, taglen, ctx); + } + + if (status != 0) { +@@ -1645,14 +1845,13 @@ process_openssl_combined_op + + /** Process cipher operation */ + static void +-process_openssl_cipher_op +- (struct rte_crypto_op *op, struct openssl_session *sess, +- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst) ++process_openssl_cipher_op(struct openssl_qp *qp, struct rte_crypto_op *op, ++ struct openssl_session *sess, struct rte_mbuf *mbuf_src, ++ struct rte_mbuf *mbuf_dst) + { + uint8_t *dst, *iv; + int srclen, status; + uint8_t inplace = (mbuf_src == mbuf_dst) ? 1 : 0; +- EVP_CIPHER_CTX *ctx_copy; + + /* + * Segmented OOP destination buffer is not supported for encryption/ +@@ -1671,25 +1870,22 @@ process_openssl_cipher_op + + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->iv.offset); +- ctx_copy = EVP_CIPHER_CTX_new(); +- EVP_CIPHER_CTX_copy(ctx_copy, sess->cipher.ctx); ++ ++ EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp); + + if (sess->cipher.mode == OPENSSL_CIPHER_LIB) + if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + status = process_openssl_cipher_encrypt(mbuf_src, dst, + op->sym->cipher.data.offset, iv, +- srclen, ctx_copy, inplace); ++ srclen, ctx, inplace); + else + status = process_openssl_cipher_decrypt(mbuf_src, dst, + op->sym->cipher.data.offset, iv, +- srclen, ctx_copy, inplace); ++ srclen, ctx, inplace); + else + status = process_openssl_cipher_des3ctr(mbuf_src, dst, +- op->sym->cipher.data.offset, iv, +- sess->cipher.key.data, srclen, +- ctx_copy); ++ op->sym->cipher.data.offset, iv, srclen, ctx); + +- EVP_CIPHER_CTX_free(ctx_copy); + if (status != 0) + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + } +@@ -1797,7 +1993,6 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, # if OPENSSL_VERSION_NUMBER >= 0x30000000L EVP_MAC_CTX *ctx_h; EVP_MAC_CTX *ctx_c; @@ -32802,29 +38992,65 @@ index 05449b6e98..6ae31cb5cd 100644 # else HMAC_CTX *ctx_h; CMAC_CTX *ctx_c; -@@ -1818,10 +1827,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, +@@ -1809,48 +2004,40 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, + + switch (sess->auth.mode) { + case OPENSSL_AUTH_AS_AUTH: +- ctx_a = EVP_MD_CTX_create(); +- EVP_MD_CTX_copy_ex(ctx_a, sess->auth.auth.ctx); ++ ctx_a = get_local_auth_ctx(sess, qp); + status = process_openssl_auth(mbuf_src, dst, + op->sym->auth.data.offset, NULL, NULL, srclen, + ctx_a, sess->auth.auth.evp_algo); +- EVP_MD_CTX_destroy(ctx_a); break; case OPENSSL_AUTH_AS_HMAC: ++ ctx_h = get_local_hmac_ctx(sess, qp); # if OPENSSL_VERSION_NUMBER >= 0x30000000L - mac = EVP_MAC_fetch(NULL, "HMAC", NULL); - ctx_h = EVP_MAC_CTX_new(mac); - ctx_h = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); +- ctx_h = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); - EVP_MAC_free(mac); status = process_openssl_auth_mac(mbuf_src, dst, op->sym->auth.data.offset, srclen, ctx_h); -@@ -1836,10 +1842,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, + # else +- ctx_h = HMAC_CTX_new(); +- HMAC_CTX_copy(ctx_h, sess->auth.hmac.ctx); + status = process_openssl_auth_hmac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_h); +- HMAC_CTX_free(ctx_h); + # endif ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ EVP_MAC_CTX_free(ctx_h); ++#endif break; case OPENSSL_AUTH_AS_CMAC: ++ ctx_c = get_local_cmac_ctx(sess, qp); # if OPENSSL_VERSION_NUMBER >= 0x30000000L - mac = EVP_MAC_fetch(NULL, OSSL_MAC_NAME_CMAC, NULL); - ctx_c = EVP_MAC_CTX_new(mac); - ctx_c = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); +- ctx_c = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); - EVP_MAC_free(mac); status = process_openssl_auth_mac(mbuf_src, dst, op->sym->auth.data.offset, srclen, ctx_c); -@@ -1927,7 +1930,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, + # else +- ctx_c = CMAC_CTX_new(); +- CMAC_CTX_copy(ctx_c, sess->auth.cmac.ctx); + status = process_openssl_auth_cmac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_c); +- CMAC_CTX_free(ctx_c); + # endif ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ EVP_MAC_CTX_free(ctx_c); ++#endif + break; + default: + status = -1; +@@ -1927,7 +2114,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, if (EVP_PKEY_sign(dsa_ctx, dsa_sign_data, &outlen, op->message.data, op->message.length) <= 0) { @@ -32833,7 +39059,7 @@ index 05449b6e98..6ae31cb5cd 100644 goto err_dsa_sign; } -@@ -1935,7 +1938,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, +@@ -1935,7 +2122,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, DSA_SIG *sign = d2i_DSA_SIG(NULL, &dsa_sign_data_p, outlen); if (!sign) { OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__); @@ -32842,7 +39068,7 @@ index 05449b6e98..6ae31cb5cd 100644 goto err_dsa_sign; } else { const BIGNUM *r = NULL, *s = NULL; -@@ -1947,7 +1950,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, +@@ -1947,7 +2134,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, } DSA_SIG_free(sign); @@ -32851,7 +39077,7 @@ index 05449b6e98..6ae31cb5cd 100644 return 0; err_dsa_sign: -@@ -1957,6 +1960,7 @@ err_dsa_sign: +@@ -1957,6 +2144,7 @@ err_dsa_sign: EVP_PKEY_CTX_free(key_ctx); if (dsa_ctx) EVP_PKEY_CTX_free(dsa_ctx); @@ -32859,7 +39085,7 @@ index 05449b6e98..6ae31cb5cd 100644 return -1; } -@@ -2633,7 +2637,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, +@@ -2633,7 +2821,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, if (EVP_PKEY_verify_recover(rsa_ctx, tmp, &outlen, op->rsa.sign.data, op->rsa.sign.length) <= 0) { @@ -32868,7 +39094,7 @@ index 05449b6e98..6ae31cb5cd 100644 goto err_rsa; } -@@ -2645,7 +2649,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, +@@ -2645,7 +2833,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, op->rsa.message.length)) { OPENSSL_LOG(ERR, "RSA sign Verification failed"); } @@ -32877,11 +39103,97 @@ index 05449b6e98..6ae31cb5cd 100644 break; default: +@@ -2861,13 +3049,13 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, + + switch (sess->chain_order) { + case OPENSSL_CHAIN_ONLY_CIPHER: +- process_openssl_cipher_op(op, sess, msrc, mdst); ++ process_openssl_cipher_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_ONLY_AUTH: + process_openssl_auth_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_CIPHER_AUTH: +- process_openssl_cipher_op(op, sess, msrc, mdst); ++ process_openssl_cipher_op(qp, op, sess, msrc, mdst); + /* OOP */ + if (msrc != mdst) + copy_plaintext(msrc, mdst, op); +@@ -2875,10 +3063,10 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, + break; + case OPENSSL_CHAIN_AUTH_CIPHER: + process_openssl_auth_op(qp, op, sess, msrc, mdst); +- process_openssl_cipher_op(op, sess, msrc, mdst); ++ process_openssl_cipher_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_COMBINED: +- process_openssl_combined_op(op, sess, msrc, mdst); ++ process_openssl_combined_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_CIPHER_BPI: + process_openssl_docsis_bpi_op(op, sess, msrc, mdst); diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c -index defed4429e..24d6d48262 100644 +index defed4429e..18a8095ba2 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c -@@ -1087,22 +1087,21 @@ err_rsa: +@@ -777,9 +777,35 @@ qp_setup_cleanup: + + /** Returns the size of the symmetric session structure */ + static unsigned +-openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) ++openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev) + { +- return sizeof(struct openssl_session); ++ /* ++ * For 0 qps, return the max size of the session - this is necessary if ++ * the user calls into this function to create the session mempool, ++ * without first configuring the number of qps for the cryptodev. ++ */ ++ if (dev->data->nb_queue_pairs == 0) { ++ unsigned int max_nb_qps = ((struct openssl_private *) ++ dev->data->dev_private)->max_nb_qpairs; ++ return sizeof(struct openssl_session) + ++ (sizeof(struct evp_ctx_pair) * max_nb_qps); ++ } ++ ++ /* ++ * With only one queue pair, the thread safety of multiple context ++ * copies is not necessary, so don't allocate extra memory for the ++ * array. ++ */ ++ if (dev->data->nb_queue_pairs == 1) ++ return sizeof(struct openssl_session); ++ ++ /* ++ * Otherwise, the size of the flexible array member should be enough to ++ * fit pointers to per-qp contexts. This is twice the number of queue ++ * pairs, to allow for auth and cipher contexts. ++ */ ++ return sizeof(struct openssl_session) + ++ (sizeof(struct evp_ctx_pair) * dev->data->nb_queue_pairs); + } + + /** Returns the size of the asymmetric session structure */ +@@ -791,7 +817,7 @@ openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused) + + /** Configure the session from a crypto xform chain */ + static int +-openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, ++openssl_pmd_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess) + { +@@ -803,7 +829,8 @@ openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, + return -EINVAL; + } + +- ret = openssl_set_session_parameters(sess_private_data, xform); ++ ret = openssl_set_session_parameters(sess_private_data, xform, ++ dev->data->nb_queue_pairs); + if (ret != 0) { + OPENSSL_LOG(ERR, "failed configure session parameters"); + +@@ -1087,22 +1114,21 @@ err_rsa: } case RTE_CRYPTO_ASYM_XFORM_DH: { @@ -32913,7 +39225,7 @@ index defed4429e..24d6d48262 100644 OSSL_PARAM_BLD *param_bld = NULL; param_bld = OSSL_PARAM_BLD_new(); if (!param_bld) { -@@ -1112,9 +1111,9 @@ err_rsa: +@@ -1112,9 +1138,9 @@ err_rsa: if ((!OSSL_PARAM_BLD_push_utf8_string(param_bld, "group", "ffdhe2048", 0)) || (!OSSL_PARAM_BLD_push_BN(param_bld, @@ -32925,7 +39237,7 @@ index defed4429e..24d6d48262 100644 OSSL_PARAM_BLD_free(param_bld); goto err_dh; } -@@ -1129,9 +1128,9 @@ err_rsa: +@@ -1129,9 +1155,9 @@ err_rsa: if ((!OSSL_PARAM_BLD_push_utf8_string(param_bld_peer, "group", "ffdhe2048", 0)) || (!OSSL_PARAM_BLD_push_BN(param_bld_peer, @@ -32937,7 +39249,7 @@ index defed4429e..24d6d48262 100644 OSSL_PARAM_BLD_free(param_bld); OSSL_PARAM_BLD_free(param_bld_peer); goto err_dh; -@@ -1140,6 +1139,20 @@ err_rsa: +@@ -1140,6 +1166,20 @@ err_rsa: asym_session->u.dh.param_bld = param_bld; asym_session->u.dh.param_bld_peer = param_bld_peer; #else @@ -32958,7 +39270,7 @@ index defed4429e..24d6d48262 100644 dh = DH_new(); if (dh == NULL) { OPENSSL_LOG(ERR, -@@ -1158,41 +1171,48 @@ err_rsa: +@@ -1158,41 +1198,48 @@ err_rsa: err_dh: OPENSSL_LOG(ERR, " failed to set dh params\n"); @@ -33019,7 +39331,7 @@ index defed4429e..24d6d48262 100644 goto err_dsa; param_bld = OSSL_PARAM_BLD_new(); -@@ -1201,10 +1221,11 @@ err_dh: +@@ -1201,10 +1248,11 @@ err_dh: goto err_dsa; } @@ -33035,7 +39347,7 @@ index defed4429e..24d6d48262 100644 OSSL_PARAM_BLD_free(param_bld); OPENSSL_LOG(ERR, "failed to allocate resources\n"); goto err_dsa; -@@ -1268,18 +1289,25 @@ err_dh: +@@ -1268,18 +1316,25 @@ err_dh: if (ret) { DSA_free(dsa); OPENSSL_LOG(ERR, "Failed to set keys\n"); @@ -33062,7 +39374,7 @@ index defed4429e..24d6d48262 100644 return -1; } default: -@@ -1357,10 +1385,16 @@ static void openssl_reset_asym_session(struct openssl_asym_session *sess) +@@ -1357,10 +1412,16 @@ static void openssl_reset_asym_session(struct openssl_asym_session *sess) if (sess->u.dh.dh_key) DH_free(sess->u.dh.dh_key); #endif @@ -33150,10 +39462,18 @@ index 7f00f6097d..84d58accc7 100644 tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask; } diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c -index b1e5fa9a82..b219a418ba 100644 +index b1e5fa9a82..52218e5a0a 100644 --- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c -@@ -100,6 +100,9 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = { +@@ -9,6 +9,7 @@ + #include "qat_asym.h" + #include "qat_crypto.h" + #include "qat_crypto_pmd_gens.h" ++#include "adf_transport_access_macros_gen4vf.h" + + static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = { + QAT_SYM_CIPHER_CAP(AES_CBC, +@@ -100,6 +101,9 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = { QAT_SYM_CIPHER_CAP(SM4_CTR, CAP_SET(block_size, 16), CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)), @@ -33163,8 +39483,143 @@ index b1e5fa9a82..b219a418ba 100644 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; +@@ -220,6 +224,78 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx, + return 0; + } + ++int ++qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n) ++{ ++ struct qat_qp *qp = qp_data; ++ struct qat_queue *tx_queue = &qp->tx_q; ++ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; ++ ++ if (unlikely(dp_ctx->cached_enqueue != n)) ++ return -1; ++ ++ qp->enqueued += n; ++ qp->stats.enqueued_count += n; ++ ++ tx_queue->tail = dp_ctx->tail; ++ ++ WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr, ++ tx_queue->hw_bundle_number, ++ tx_queue->hw_queue_number, tx_queue->tail); ++ ++ tx_queue->csr_tail = tx_queue->tail; ++ dp_ctx->cached_enqueue = 0; ++ ++ return 0; ++} ++ ++int ++qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n) ++{ ++ struct qat_qp *qp = qp_data; ++ struct qat_queue *rx_queue = &qp->rx_q; ++ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; ++ ++ if (unlikely(dp_ctx->cached_dequeue != n)) ++ return -1; ++ ++ rx_queue->head = dp_ctx->head; ++ rx_queue->nb_processed_responses += n; ++ qp->dequeued += n; ++ qp->stats.dequeued_count += n; ++ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) { ++ uint32_t old_head, new_head; ++ uint32_t max_head; ++ ++ old_head = rx_queue->csr_head; ++ new_head = rx_queue->head; ++ max_head = qp->nb_descriptors * rx_queue->msg_size; ++ ++ /* write out free descriptors */ ++ void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head; ++ ++ if (new_head < old_head) { ++ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, ++ max_head - old_head); ++ memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE, ++ new_head); ++ } else { ++ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - ++ old_head); ++ } ++ rx_queue->nb_processed_responses = 0; ++ rx_queue->csr_head = new_head; ++ ++ /* write current head to CSR */ ++ WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr, ++ rx_queue->hw_bundle_number, rx_queue->hw_queue_number, ++ new_head); ++ } ++ ++ dp_ctx->cached_dequeue = 0; ++ return 0; ++} ++ + static int + qat_sym_crypto_set_session_gen4(void *cdev, void *session) + { +@@ -380,11 +456,51 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx) + { + struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx; + struct qat_sym_session *ctx = _ctx; +- int ret; + +- ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx); +- if (ret < 0) +- return ret; ++ raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4; ++ raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1; ++ raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1; ++ raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4; ++ ++ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || ++ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && ++ !ctx->is_gmac) { ++ /* AES-GCM or AES-CCM */ ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || ++ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || ++ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 ++ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE ++ && ctx->qat_hash_alg == ++ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_aead_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_aead_gen1; ++ } else { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_chain_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_chain_gen1; ++ } ++ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { ++ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1; ++ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1; ++ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { ++ if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE || ++ ctx->qat_cipher_alg == ++ ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_aead_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_aead_gen1; ++ } else { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_cipher_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_cipher_gen1; ++ } ++ } else ++ return -1; + + if (ctx->is_single_pass && ctx->is_ucs) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4; diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h -index 524c291340..7972c7cfeb 100644 +index 524c291340..266ab74a01 100644 --- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h @@ -290,7 +290,8 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op, @@ -33190,7 +39645,7 @@ index 524c291340..7972c7cfeb 100644 return 0; } -@@ -366,7 +371,8 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, +@@ -366,10 +371,11 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, struct rte_crypto_sgl *in_sgl, struct rte_crypto_sgl *out_sgl, struct rte_crypto_va_iova_ptr *cipher_iv, struct rte_crypto_va_iova_ptr *auth_iv_or_aad, @@ -33199,7 +39654,11 @@ index 524c291340..7972c7cfeb 100644 + struct qat_sym_op_cookie *cookie) { union rte_crypto_sym_ofs ofs; - uint32_t max_len = 0; +- uint32_t max_len = 0; ++ uint32_t max_len = 0, oop_offset = 0; + uint32_t cipher_len = 0, cipher_ofs = 0; + uint32_t auth_len = 0, auth_ofs = 0; + int is_oop = (op->sym->m_dst != NULL) && @@ -390,7 +396,11 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, auth_iv_or_aad->iova = rte_crypto_op_ctophys_offset(op, ctx->auth_iv.offset); @@ -33213,7 +39672,44 @@ index 524c291340..7972c7cfeb 100644 ret = qat_cipher_is_len_in_bits(ctx, op); switch (ret) { -@@ -682,7 +692,8 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx, +@@ -428,6 +438,16 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + + max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len); + ++ /* If OOP, we need to keep in mind that offset needs to start where ++ * cipher/auth starts, namely no offset on the smaller one ++ */ ++ if (is_oop) { ++ oop_offset = RTE_MIN(auth_ofs, cipher_ofs); ++ auth_ofs -= oop_offset; ++ cipher_ofs -= oop_offset; ++ max_len -= oop_offset; ++ } ++ + /* digest in buffer check. Needed only for wireless algos */ + if (ret == 1) { + /* Handle digest-encrypted cases, i.e. +@@ -461,9 +481,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + max_len = RTE_MAX(max_len, auth_ofs + auth_len + + ctx->digest_length); + } +- +- /* Passing 0 as cipher & auth offsets are assigned into ofs later */ +- n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, 0, max_len, ++ n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, oop_offset, max_len, + in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); + if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; +@@ -473,7 +491,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + + if (unlikely((op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src))) { +- int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, 0, ++ int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, oop_offset, + max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); + + if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { +@@ -682,7 +700,8 @@ enqueue_one_chain_job_gen1(struct qat_sym_session *ctx, while (remaining_off >= cvec->len && i >= 1) { i--; remaining_off -= cvec->len; @@ -33223,6 +39719,36 @@ index 524c291340..7972c7cfeb 100644 } auth_iova_end = cvec->iova + remaining_off; +@@ -801,10 +820,12 @@ enqueue_one_aead_job_gen1(struct qat_sym_session *ctx, + *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = + q - ICP_QAT_HW_CCM_NONCE_OFFSET; + +- rte_memcpy((uint8_t *)aad->va + +- ICP_QAT_HW_CCM_NONCE_OFFSET, +- (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, +- ctx->cipher_iv.length); ++ if (ctx->aad_len > 0) { ++ rte_memcpy((uint8_t *)aad->va + ++ ICP_QAT_HW_CCM_NONCE_OFFSET, ++ (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, ++ ctx->cipher_iv.length); ++ } + break; + default: + break; +@@ -914,6 +935,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); + int + qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); + ++int ++qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n); ++ ++int ++qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n); ++ + int + qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx); + diff --git a/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index 91d5cfa71d..888dea4ad9 100644 --- a/dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -33347,7 +39873,7 @@ index 91d5cfa71d..888dea4ad9 100644 break; diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c -index 08e92191a3..18f99089e8 100644 +index 08e92191a3..f68d96f4f5 100644 --- a/dpdk/drivers/crypto/qat/qat_sym.c +++ b/dpdk/drivers/crypto/qat/qat_sym.c @@ -51,6 +51,11 @@ qat_sym_init_op_cookie(void *op_cookie) @@ -33371,6 +39897,15 @@ index 08e92191a3..18f99089e8 100644 { int i = 0, ret = 0; uint16_t slice_map = 0; +@@ -261,7 +266,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, + } + + cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; +- QAT_LOG(INFO, "Device %s rte_security support ensabled", name); ++ QAT_LOG(INFO, "Device %s rte_security support enabled", name); + } else { + QAT_LOG(INFO, "Device %s rte_security support disabled", name); + } diff --git a/dpdk/drivers/crypto/qat/qat_sym.h b/dpdk/drivers/crypto/qat/qat_sym.h index 9a4251e08b..a45bddf848 100644 --- a/dpdk/drivers/crypto/qat/qat_sym.h @@ -33648,10 +40183,20 @@ index e1f5694f50..d58554787f 100644 #endif diff --git a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c -index d5a5f08ecc..8968bb853b 100644 +index d5a5f08ecc..2c91ceec13 100644 --- a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c +++ b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c -@@ -117,6 +117,7 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle, +@@ -16,9 +16,6 @@ + + #define DPAA2_QDMA_PREFETCH "prefetch" + +-/* Dynamic log type identifier */ +-int dpaa2_qdma_logtype; +- + uint32_t dpaa2_coherent_no_alloc_cache; + uint32_t dpaa2_coherent_alloc_cache; + +@@ -117,6 +114,7 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle, /* source */ sdd->read_cmd.portid = rbp->sportid; sdd->rbpcmd_simple.pfid = rbp->spfid; @@ -33659,7 +40204,7 @@ index d5a5f08ecc..8968bb853b 100644 sdd->rbpcmd_simple.vfid = rbp->svfid; if (rbp->srbp) { -@@ -129,6 +130,7 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle, +@@ -129,6 +127,7 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle, /* destination */ sdd->write_cmd.portid = rbp->dportid; sdd->rbpcmd_simple.pfid = rbp->dpfid; @@ -33667,6 +40212,12 @@ index d5a5f08ecc..8968bb853b 100644 sdd->rbpcmd_simple.vfid = rbp->dvfid; if (rbp->drbp) { +@@ -1697,4 +1696,4 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = { + RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd); + RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma, + "no_prefetch= "); +-RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO); ++RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO); diff --git a/dpdk/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/dpdk/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h index dc8acb4aec..5a8da46d12 100644 --- a/dpdk/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h @@ -33682,6 +40233,194 @@ index dc8acb4aec..5a8da46d12 100644 }; /** Determines a QDMA job */ +diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c +index 0e11ca14cc..4db3b0554c 100644 +--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c ++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c +@@ -39,8 +39,6 @@ hisi_dma_queue_base(struct hisi_dma_dev *hw) + { + if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) + return HISI_DMA_HIP08_QUEUE_BASE; +- else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) +- return HISI_DMA_HIP09_QUEUE_BASE; + else + return 0; + } +@@ -216,25 +214,6 @@ hisi_dma_init_hw(struct hisi_dma_dev *hw) + HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); + hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG, + HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); +- } else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) { +- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_CTRL0_REG, +- HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M, false); +- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG, +- HISI_DMA_HIP09_QUEUE_INT_MASK_M, true); +- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG, +- HISI_DMA_HIP09_QUEUE_INT_MASK_M, true); +- hisi_dma_update_queue_mbit(hw, +- HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG, +- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true); +- hisi_dma_update_queue_mbit(hw, +- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG, +- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true); +- hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG, +- HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B, true); +- hisi_dma_update_bit(hw, +- HISI_DMA_HIP09_QUEUE_CFG_REG(hw->queue_id), +- HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B, +- true); + } + } + +@@ -256,8 +235,6 @@ hisi_dma_reg_layout(uint8_t revision) + { + if (revision == HISI_DMA_REVISION_HIP08B) + return HISI_DMA_REG_LAYOUT_HIP08; +- else if (revision >= HISI_DMA_REVISION_HIP09A) +- return HISI_DMA_REG_LAYOUT_HIP09; + else + return HISI_DMA_REG_LAYOUT_INVALID; + } +@@ -328,14 +305,11 @@ hisi_dma_info_get(const struct rte_dma_dev *dev, + struct rte_dma_info *dev_info, + uint32_t info_sz) + { +- struct hisi_dma_dev *hw = dev->data->dev_private; ++ RTE_SET_USED(dev); + RTE_SET_USED(info_sz); + + dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | + RTE_DMA_CAPA_OPS_COPY; +- if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) +- dev_info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS; +- + dev_info->max_vchans = 1; + dev_info->max_desc = HISI_DMA_MAX_DESC_NUM; + dev_info->min_desc = HISI_DMA_MIN_DESC_NUM; +@@ -514,18 +488,6 @@ hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f) + { HISI_DMA_REG_LAYOUT_HIP08, + HISI_DMA_HIP08_DUMP_START_REG, + HISI_DMA_HIP08_DUMP_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_A_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_A_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_B_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_B_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_C_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_C_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_D_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_D_END_REG }, + }; + uint32_t i; + +diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h +index 5a17f9f69e..a57b5c759a 100644 +--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h ++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h +@@ -25,22 +25,14 @@ + #define HISI_DMA_DEVICE_ID 0xA122 + #define HISI_DMA_PCI_REVISION_ID_REG 0x08 + #define HISI_DMA_REVISION_HIP08B 0x21 +-#define HISI_DMA_REVISION_HIP09A 0x30 + + #define HISI_DMA_MAX_HW_QUEUES 4 + #define HISI_DMA_MAX_DESC_NUM 8192 + #define HISI_DMA_MIN_DESC_NUM 32 + +-/** +- * The HIP08B(HiSilicon IP08) and HIP09B(HiSilicon IP09) are DMA iEPs, they +- * have the same pci device id but different pci revision. +- * Unfortunately, they have different register layouts, so two layout +- * enumerations are defined. +- */ + enum { + HISI_DMA_REG_LAYOUT_INVALID = 0, +- HISI_DMA_REG_LAYOUT_HIP08, +- HISI_DMA_REG_LAYOUT_HIP09 ++ HISI_DMA_REG_LAYOUT_HIP08 + }; + + /** +@@ -69,9 +61,6 @@ enum { + * length of queue-region. The global offset for a single queue register is + * calculated by: + * offset = queue-base + (queue-id * queue-region) + reg-offset-in-region. +- * +- * The first part of queue region is basically the same for HIP08 and HIP09 +- * register layouts, therefore, HISI_QUEUE_* registers are defined for it. + */ + #define HISI_DMA_QUEUE_SQ_BASE_L_REG 0x0 + #define HISI_DMA_QUEUE_SQ_BASE_H_REG 0x4 +@@ -110,28 +99,6 @@ enum { + #define HISI_DMA_HIP08_DUMP_START_REG 0x2000 + #define HISI_DMA_HIP08_DUMP_END_REG 0x2280 + +-/** +- * HiSilicon IP09 DMA register and field define: +- */ +-#define HISI_DMA_HIP09_QUEUE_BASE 0x2000 +-#define HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M GENMASK(31, 28) +-#define HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B 2 +-#define HISI_DMA_HIP09_QUEUE_INT_MASK_M 0x1 +-#define HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG 0x48 +-#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG 0x4C +-#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M GENMASK(18, 1) +-#define HISI_DMA_HIP09_QUEUE_CFG_REG(queue_id) (0x800 + \ +- (queue_id) * 0x20) +-#define HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B 16 +-#define HISI_DMA_HIP09_DUMP_REGION_A_START_REG 0x0 +-#define HISI_DMA_HIP09_DUMP_REGION_A_END_REG 0x368 +-#define HISI_DMA_HIP09_DUMP_REGION_B_START_REG 0x800 +-#define HISI_DMA_HIP09_DUMP_REGION_B_END_REG 0xA08 +-#define HISI_DMA_HIP09_DUMP_REGION_C_START_REG 0x1800 +-#define HISI_DMA_HIP09_DUMP_REGION_C_END_REG 0x1A4C +-#define HISI_DMA_HIP09_DUMP_REGION_D_START_REG 0x1C00 +-#define HISI_DMA_HIP09_DUMP_REGION_D_END_REG 0x1CC4 +- + /** + * In fact, there are multiple states, but it need to pay attention to + * the following three states for the driver: +diff --git a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py +index 41b4d13b9c..3f0de59e91 100755 +--- a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py ++++ b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py +@@ -98,8 +98,10 @@ def configure_dsa(dsa_id, args): + "priority": 1, + "max_batch_size": 1024, + "size": int(max_work_queues_size / nb_queues)} +- wqcfg.update(parse_wq_opts(args.wq_option)) + wq_dir = SysfsDir(os.path.join(dsa_dir.path, f"wq{dsa_id}.{q}")) ++ if os.path.exists(os.path.join(wq_dir.path, f"driver_name")): ++ wqcfg.update({"driver_name": "user"}) ++ wqcfg.update(parse_wq_opts(args.wq_option)) + wq_dir.write_values(wqcfg) + + # enable device and then queues +diff --git a/dpdk/drivers/dma/idxd/idxd_bus.c b/dpdk/drivers/dma/idxd/idxd_bus.c +index 3b2d4c2b65..ba8076715d 100644 +--- a/dpdk/drivers/dma/idxd/idxd_bus.c ++++ b/dpdk/drivers/dma/idxd/idxd_bus.c +@@ -261,9 +261,15 @@ static int + is_for_this_process_use(struct rte_dsa_device *dev, const char *name) + { + char *runtime_dir = strdup(rte_eal_get_runtime_dir()); +- char *prefix = basename(runtime_dir); +- int prefixlen = strlen(prefix); + int retval = 0; ++ int prefixlen; ++ char *prefix; ++ ++ if (runtime_dir == NULL) ++ return retval; ++ ++ prefix = basename(runtime_dir); ++ prefixlen = strlen(prefix); + + if (strncmp(name, "dpdk_", 5) == 0) + retval = 1; diff --git a/dpdk/drivers/dma/ioat/ioat_dmadev.c b/dpdk/drivers/dma/ioat/ioat_dmadev.c index 5906eb45aa..57c18c081d 100644 --- a/dpdk/drivers/dma/ioat/ioat_dmadev.c @@ -34049,10 +40788,33 @@ index 4c3932da47..0ccdb7baf3 100644 /* Write CPT instruction to lmt line */ vst1q_u64(lmt_addr, cmd01); diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -index db62d32a81..d2f1708297 100644 +index db62d32a81..205cc76572 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c +++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -@@ -319,9 +319,9 @@ int +@@ -150,16 +150,17 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev) + + deq_tmo_ns = conf->dequeue_timeout_ns; + +- if (deq_tmo_ns == 0) +- deq_tmo_ns = dev->min_dequeue_timeout_ns; +- if (deq_tmo_ns < dev->min_dequeue_timeout_ns || +- deq_tmo_ns > dev->max_dequeue_timeout_ns) { ++ if (deq_tmo_ns && (deq_tmo_ns < dev->min_dequeue_timeout_ns || ++ deq_tmo_ns > dev->max_dequeue_timeout_ns)) { + plt_err("Unsupported dequeue timeout requested"); + return -EINVAL; + } + +- if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) ++ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { ++ if (deq_tmo_ns == 0) ++ deq_tmo_ns = dev->min_dequeue_timeout_ns; + dev->is_timeout_deq = 1; ++ } + + dev->deq_tmo_ns = deq_tmo_ns; + +@@ -319,9 +320,9 @@ int cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns, uint64_t *tmo_ticks) { @@ -34064,7 +40826,17 @@ index db62d32a81..d2f1708297 100644 return 0; } -@@ -551,7 +551,7 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) +@@ -500,6 +501,9 @@ parse_qos_list(const char *value, void *opaque) + char *end = NULL; + char *f = s; + ++ if (s == NULL) ++ return; ++ + while (*s) { + if (*s == '[') + start = s; +@@ -551,7 +555,7 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) &dev->force_ena_bp); rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag, &single_ws); @@ -34073,7 +40845,16 @@ index db62d32a81..d2f1708297 100644 &dev->gw_mode); dev->dual_ws = !single_ws; rte_kvargs_free(kvlist); -@@ -613,9 +613,8 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) +@@ -587,7 +591,7 @@ cnxk_sso_init(struct rte_eventdev *event_dev) + } + + dev->is_timeout_deq = 0; +- dev->min_dequeue_timeout_ns = 0; ++ dev->min_dequeue_timeout_ns = USEC2NSEC(1); + dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF); + dev->max_num_events = -1; + dev->nb_event_queues = 0; +@@ -613,9 +617,8 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) cnxk_tim_fini(); roc_sso_rsrc_fini(&dev->sso); @@ -34294,7 +41075,7 @@ index eda84c6f31..6be31f6f9d 100644 chunk = (struct cnxk_tim_ent *)mirr_bkt->current_chunk; chunk += (tim_ring->nb_chunk_slots - chunk_remainder); diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c -index 60c5cd4804..fa1ccb25ba 100644 +index 60c5cd4804..d5981afa6a 100644 --- a/dpdk/drivers/event/dlb2/dlb2.c +++ b/dpdk/drivers/event/dlb2/dlb2.c @@ -72,6 +72,7 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { @@ -34305,6 +41086,23 @@ index 60c5cd4804..fa1ccb25ba 100644 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | RTE_EVENT_DEV_CAP_BURST_MODE | +@@ -215,7 +216,6 @@ static int + dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) + { + struct dlb2_hw_dev *handle = &dlb2->qm_instance; +- struct dlb2_hw_resource_info *dlb2_info = &handle->info; + int num_ldb_ports; + int ret; + +@@ -277,8 +277,6 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) + handle->info.hw_rsrc_max.reorder_window_size = + dlb2->hw_rsrc_query_results.num_hist_list_entries; + +- rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info)); +- + return 0; + } + diff --git a/dpdk/drivers/event/dlb2/dlb2_selftest.c b/dpdk/drivers/event/dlb2/dlb2_selftest.c index 1863ffe049..62aa11d981 100644 --- a/dpdk/drivers/event/dlb2/dlb2_selftest.c @@ -34521,6 +41319,19 @@ index 9ce8b39b60..08ace84bbe 100644 if (dev == NULL) { PMD_DRV_LOG(ERR, "eventdev vdev init() failed"); +diff --git a/dpdk/drivers/event/opdl/opdl_ring.c b/dpdk/drivers/event/opdl/opdl_ring.c +index 69392b56bb..da5ea02d19 100644 +--- a/dpdk/drivers/event/opdl/opdl_ring.c ++++ b/dpdk/drivers/event/opdl/opdl_ring.c +@@ -910,7 +910,7 @@ opdl_ring_create(const char *name, uint32_t num_slots, uint32_t slot_size, + RTE_CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct opdl_ring, slots) & + RTE_CACHE_LINE_MASK) != 0); +- RTE_BUILD_BUG_ON(!rte_is_power_of_2(OPDL_DISCLAIMS_PER_LCORE)); ++ RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(OPDL_DISCLAIMS_PER_LCORE)); + + /* Parameter checking */ + if (name == NULL) { diff --git a/dpdk/drivers/event/skeleton/skeleton_eventdev.c b/dpdk/drivers/event/skeleton/skeleton_eventdev.c index 8513b9a013..45c13c62c7 100644 --- a/dpdk/drivers/event/skeleton/skeleton_eventdev.c @@ -34549,11 +41360,44 @@ index 8513b9a013..45c13c62c7 100644 } static int +diff --git a/dpdk/drivers/event/sw/iq_chunk.h b/dpdk/drivers/event/sw/iq_chunk.h +index 31d013eab7..7820815c38 100644 +--- a/dpdk/drivers/event/sw/iq_chunk.h ++++ b/dpdk/drivers/event/sw/iq_chunk.h +@@ -9,8 +9,6 @@ + #include + #include + +-#define IQ_ROB_NAMESIZE 12 +- + struct sw_queue_chunk { + struct rte_event events[SW_EVS_PER_Q_CHUNK]; + struct sw_queue_chunk *next; diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c -index 3531821dd4..2a2763ee9b 100644 +index 3531821dd4..09200c9188 100644 --- a/dpdk/drivers/event/sw/sw_evdev.c +++ b/dpdk/drivers/event/sw/sw_evdev.c -@@ -1074,7 +1074,7 @@ sw_probe(struct rte_vdev_device *vdev) +@@ -228,9 +228,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, + const struct rte_event_queue_conf *queue_conf) + { + unsigned int i; +- int dev_id = sw->data->dev_id; + int socket_id = sw->data->socket_id; +- char buf[IQ_ROB_NAMESIZE]; + struct sw_qid *qid = &sw->qids[idx]; + + /* Initialize the FID structures to no pinning (-1), and zero packets */ +@@ -260,8 +258,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, + goto cleanup; + } + +- snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i); +- qid->reorder_buffer = rte_zmalloc_socket(buf, ++ qid->reorder_buffer = rte_zmalloc_socket(NULL, + window_size * sizeof(qid->reorder_buffer[0]), + 0, socket_id); + if (!qid->reorder_buffer) { +@@ -1074,7 +1071,7 @@ sw_probe(struct rte_vdev_device *vdev) min_burst_size, deq_burst_size, refill_once); dev = rte_event_pmd_vdev_init(name, @@ -34673,11 +41517,50 @@ index ba826f0f01..9d6982fdab 100644 if (unlikely(count != n)) { /* No partial alloc allowed. Free up allocated pointers */ cn10k_mempool_enq(mp, obj_table, count); +diff --git a/dpdk/drivers/meson.build b/dpdk/drivers/meson.build +index 5188302057..b4d9252888 100644 +--- a/dpdk/drivers/meson.build ++++ b/dpdk/drivers/meson.build +@@ -159,7 +159,7 @@ foreach subpath:subdirs + # component disable printout in those cases + if reason != '' + dpdk_drvs_disabled += drv_path +- set_variable(drv_path.underscorify() + '_disable_reason', reason) ++ set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason) + endif + continue + endif diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c -index c13a0942aa..397a32db58 100644 +index c13a0942aa..6b7b16f348 100644 --- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c -@@ -313,7 +313,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -6,6 +6,7 @@ + * All rights reserved. + */ + ++#include + #include + #include + #include +@@ -39,7 +40,7 @@ + #define DFLT_FRAME_SIZE (1 << 11) + #define DFLT_FRAME_COUNT (1 << 9) + +-struct pkt_rx_queue { ++struct __rte_cache_aligned pkt_rx_queue { + int sockfd; + + struct iovec *rd; +@@ -55,7 +56,7 @@ struct pkt_rx_queue { + volatile unsigned long rx_bytes; + }; + +-struct pkt_tx_queue { ++struct __rte_cache_aligned pkt_tx_queue { + int sockfd; + unsigned int frame_data_size; + +@@ -313,7 +314,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static int eth_dev_start(struct rte_eth_dev *dev) { @@ -34692,7 +41575,7 @@ index c13a0942aa..397a32db58 100644 return 0; } -@@ -341,6 +348,8 @@ eth_dev_stop(struct rte_eth_dev *dev) +@@ -341,6 +349,8 @@ eth_dev_stop(struct rte_eth_dev *dev) internals->rx_queue[i].sockfd = -1; internals->tx_queue[i].sockfd = -1; @@ -34701,11 +41584,94 @@ index c13a0942aa..397a32db58 100644 } dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; +diff --git a/dpdk/drivers/net/af_xdp/meson.build b/dpdk/drivers/net/af_xdp/meson.build +index 979b914bb6..1182ce5325 100644 +--- a/dpdk/drivers/net/af_xdp/meson.build ++++ b/dpdk/drivers/net/af_xdp/meson.build +@@ -7,6 +7,12 @@ if is_windows + subdir_done() + endif + ++if arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_32') ++ build = false ++ reason = 'not supported on 32-bit x86' ++ subdir_done() ++endif ++ + sources = files('rte_eth_af_xdp.c') + + libxdp_ver = '>=1.2.2' diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c -index b6ec9bf490..738f4158e0 100644 +index b6ec9bf490..343b4c40c9 100644 --- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c -@@ -672,7 +672,13 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +@@ -104,7 +104,7 @@ struct xsk_umem_info { + struct rx_stats { + uint64_t rx_pkts; + uint64_t rx_bytes; +- uint64_t rx_dropped; ++ uint64_t imissed_offset; + }; + + struct pkt_rx_queue { +@@ -112,6 +112,7 @@ struct pkt_rx_queue { + struct xsk_umem_info *umem; + struct xsk_socket *xsk; + struct rte_mempool *mb_pool; ++ uint16_t port; + + struct rx_stats stats; + +@@ -289,6 +290,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + unsigned long rx_bytes = 0; + int i; + struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; ++ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; + + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); + +@@ -316,6 +318,8 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; ++ dev->data->rx_mbuf_alloc_failed += nb_pkts; ++ + return 0; + } + +@@ -338,6 +342,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + bufs[i]->data_off = offset - sizeof(struct rte_mbuf) - + rte_pktmbuf_priv_size(umem->mb_pool) - + umem->mb_pool->header_size; ++ bufs[i]->port = rxq->port; + + rte_pktmbuf_pkt_len(bufs[i]) = len; + rte_pktmbuf_data_len(bufs[i]) = len; +@@ -366,6 +371,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + int i; + uint32_t free_thresh = fq->size >> 1; + struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; ++ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; + + if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) + (void)reserve_fill_queue(umem, nb_pkts, NULL, fq); +@@ -384,6 +390,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; ++ dev->data->rx_mbuf_alloc_failed += nb_pkts; + return 0; + } + +@@ -404,6 +411,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + rte_pktmbuf_data_len(mbufs[i]) = len; + rx_bytes += len; + bufs[i] = mbufs[i]; ++ bufs[i]->port = rxq->port; + } + + xsk_ring_cons__release(rx, nb_pkts); +@@ -672,7 +680,13 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) static int eth_dev_start(struct rte_eth_dev *dev) { @@ -34719,7 +41685,7 @@ index b6ec9bf490..738f4158e0 100644 return 0; } -@@ -681,7 +687,14 @@ eth_dev_start(struct rte_eth_dev *dev) +@@ -681,7 +695,14 @@ eth_dev_start(struct rte_eth_dev *dev) static int eth_dev_stop(struct rte_eth_dev *dev) { @@ -34734,6 +41700,98 @@ index b6ec9bf490..738f4158e0 100644 return 0; } +@@ -832,7 +853,6 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + + stats->ipackets += stats->q_ipackets[i]; + stats->ibytes += stats->q_ibytes[i]; +- stats->imissed += rxq->stats.rx_dropped; + stats->oerrors += txq->stats.tx_dropped; + fd = process_private->rxq_xsk_fds[i]; + ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS, +@@ -841,7 +861,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n"); + return -1; + } +- stats->imissed += xdp_stats.rx_dropped; ++ stats->imissed += xdp_stats.rx_dropped - rxq->stats.imissed_offset; + + stats->opackets += stats->q_opackets[i]; + stats->obytes += stats->q_obytes[i]; +@@ -854,13 +874,25 @@ static int + eth_stats_reset(struct rte_eth_dev *dev) + { + struct pmd_internals *internals = dev->data->dev_private; +- int i; ++ struct pmd_process_private *process_private = dev->process_private; ++ struct xdp_statistics xdp_stats; ++ socklen_t optlen; ++ int i, ret, fd; + + for (i = 0; i < internals->queue_cnt; i++) { + memset(&internals->rx_queues[i].stats, 0, + sizeof(struct rx_stats)); + memset(&internals->tx_queues[i].stats, 0, + sizeof(struct tx_stats)); ++ fd = process_private->rxq_xsk_fds[i]; ++ optlen = sizeof(struct xdp_statistics); ++ ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS, ++ &xdp_stats, &optlen) : -1; ++ if (ret != 0) { ++ AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n"); ++ return -1; ++ } ++ internals->rx_queues[i].stats.imissed_offset = xdp_stats.rx_dropped; + } + + return 0; +@@ -925,6 +957,9 @@ remove_xdp_program(struct pmd_internals *internals) + static void + xdp_umem_destroy(struct xsk_umem_info *umem) + { ++ (void)xsk_umem__delete(umem->umem); ++ umem->umem = NULL; ++ + #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + umem->mb_pool = NULL; + #else +@@ -957,11 +992,8 @@ eth_dev_close(struct rte_eth_dev *dev) + break; + xsk_socket__delete(rxq->xsk); + +- if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) +- == 0) { +- (void)xsk_umem__delete(rxq->umem->umem); ++ if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) + xdp_umem_destroy(rxq->umem); +- } + + /* free pkt_tx_queue */ + rte_free(rxq->pair); +@@ -1198,6 +1230,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n"); + goto err; + } ++ umem->mz = mz; + + ret = xsk_umem__create(&umem->umem, mz->addr, + ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, +@@ -1208,7 +1241,6 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + AF_XDP_LOG(ERR, "Failed to create umem\n"); + goto err; + } +- umem->mz = mz; + + return umem; + +@@ -1482,6 +1514,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, + + process_private->rxq_xsk_fds[rx_queue_id] = rxq->fds[0].fd; + ++ rxq->port = dev->data->port_id; ++ + dev->data->rx_queues[rx_queue_id] = rxq; + return 0; + diff --git a/dpdk/drivers/net/ark/ark_ethdev.c b/dpdk/drivers/net/ark/ark_ethdev.c index c654a229f7..c1681e8ecd 100644 --- a/dpdk/drivers/net/ark/ark_ethdev.c @@ -34806,10 +41864,70 @@ index cbc0416bc2..38bc69dff4 100644 if (unlikely(status != 0)) { struct rte_mbuf **mbuf; diff --git a/dpdk/drivers/net/ark/ark_ethdev_tx.c b/dpdk/drivers/net/ark/ark_ethdev_tx.c -index 5940a592a2..4792754f19 100644 +index 5940a592a2..8f1f90b1a4 100644 --- a/dpdk/drivers/net/ark/ark_ethdev_tx.c +++ b/dpdk/drivers/net/ark/ark_ethdev_tx.c -@@ -229,7 +229,7 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev, +@@ -39,8 +39,8 @@ struct ark_tx_queue { + uint32_t queue_mask; + + /* 3 indexes to the paired data rings. */ +- int32_t prod_index; /* where to put the next one */ +- int32_t free_index; /* mbuf has been freed */ ++ uint32_t prod_index; /* where to put the next one */ ++ uint32_t free_index; /* mbuf has been freed */ + + /* The queue Id is used to identify the HW Q */ + uint16_t phys_qid; +@@ -49,7 +49,7 @@ struct ark_tx_queue { + + /* next cache line - fields written by device */ + RTE_MARKER cacheline1 __rte_cache_min_aligned; +- volatile int32_t cons_index; /* hw is done, can be freed */ ++ volatile uint32_t cons_index; /* hw is done, can be freed */ + } __rte_cache_aligned; + + /* Forward declarations */ +@@ -108,7 +108,7 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + uint32_t user_meta[5]; + + int stat; +- int32_t prod_index_limit; ++ uint32_t prod_index_limit; + uint16_t nb; + uint8_t user_len = 0; + const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN; +@@ -123,8 +123,13 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + /* leave 4 elements mpu data */ + prod_index_limit = queue->queue_size + queue->free_index - 4; + ++ /* Populate the buffer bringing prod_index up to or slightly beyond ++ * prod_index_limit. Prod_index will increment by 2 or more each ++ * iteration. Note: indexes are uint32_t, cast to (signed) int32_t ++ * to catch the slight overage case; e.g. (200 - 201) ++ */ + for (nb = 0; +- (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0; ++ (nb < nb_pkts) && (int32_t)(prod_index_limit - queue->prod_index) > 0; + ++nb) { + mbuf = tx_pkts[nb]; + +@@ -194,13 +199,13 @@ eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf, + uint32_t *user_meta, uint8_t meta_cnt) + { + struct rte_mbuf *next; +- int32_t free_queue_space; ++ uint32_t free_queue_space; + uint8_t flags = ARK_DDM_SOP; + + free_queue_space = queue->queue_mask - + (queue->prod_index - queue->free_index); + /* We need up to 4 mbufs for first header and 2 for subsequent ones */ +- if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs)))) ++ if (unlikely(free_queue_space < (2U + (2U * mbuf->nb_segs)))) + return -1; + + while (mbuf != NULL) { +@@ -229,7 +234,7 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev, struct ark_tx_queue *queue; int status; @@ -34818,6 +41936,20 @@ index 5940a592a2..4792754f19 100644 if (!rte_is_power_of_2(nb_desc)) { ARK_PMD_LOG(ERR, +@@ -392,10 +397,11 @@ free_completed_tx(struct ark_tx_queue *queue) + { + struct rte_mbuf *mbuf; + union ark_tx_meta *meta; +- int32_t top_index; ++ uint32_t top_index; + + top_index = queue->cons_index; /* read once */ +- while ((top_index - queue->free_index) > 0) { ++ ++ while ((int32_t)(top_index - queue->free_index) > 0) { + meta = &queue->meta_q[queue->free_index & queue->queue_mask]; + if (likely((meta->flags & ARK_DDM_SOP) != 0)) { + mbuf = queue->bufs[queue->free_index & diff --git a/dpdk/drivers/net/ark/ark_global.h b/dpdk/drivers/net/ark/ark_global.h index 71d0b53e03..2f198edfe4 100644 --- a/dpdk/drivers/net/ark/ark_global.h @@ -34877,8 +42009,156 @@ index b2a08f5635..53d9e38c93 100644 unlock: rte_spinlock_unlock(&avp->lock); return ret; +diff --git a/dpdk/drivers/net/axgbe/axgbe_common.h b/dpdk/drivers/net/axgbe/axgbe_common.h +index a5d11c5832..51532fb34a 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_common.h ++++ b/dpdk/drivers/net/axgbe/axgbe_common.h +@@ -407,8 +407,6 @@ + #define MAC_MDIOSCAR_PA_WIDTH 5 + #define MAC_MDIOSCAR_RA_INDEX 0 + #define MAC_MDIOSCAR_RA_WIDTH 16 +-#define MAC_MDIOSCAR_REG_INDEX 0 +-#define MAC_MDIOSCAR_REG_WIDTH 21 + #define MAC_MDIOSCCDR_BUSY_INDEX 22 + #define MAC_MDIOSCCDR_BUSY_WIDTH 1 + #define MAC_MDIOSCCDR_CMD_INDEX 16 +diff --git a/dpdk/drivers/net/axgbe/axgbe_dev.c b/dpdk/drivers/net/axgbe/axgbe_dev.c +index 6a7fddffca..5233633a53 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_dev.c ++++ b/dpdk/drivers/net/axgbe/axgbe_dev.c +@@ -63,15 +63,27 @@ static int mdio_complete(struct axgbe_port *pdata) + return 0; + } + ++static unsigned int axgbe_create_mdio_sca(int port, int reg) ++{ ++ unsigned int mdio_sca, da; ++ ++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; ++ ++ mdio_sca = 0; ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); ++ ++ return mdio_sca; ++} ++ + static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg, u16 val) + { + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + +- mdio_sca = 0; +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = axgbe_create_mdio_sca(addr, reg); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; +@@ -97,9 +109,7 @@ static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; + +- mdio_sca = 0; +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = axgbe_create_mdio_sca(addr, reg); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + + mdio_sccd = 0; +@@ -259,20 +269,28 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed) + return 0; + } + ++static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata) ++{ ++ unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; ++ ++ /* From MAC ver 30H the TFCR is per priority, instead of per queue */ ++ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) ++ return max_q_count; ++ else ++ return (RTE_MIN(pdata->tx_q_count, max_q_count)); ++} ++ + static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) + { +- unsigned int max_q_count, q_count; + unsigned int reg, reg_val; +- unsigned int i; ++ unsigned int i, q_count; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); + + /* Clear MAC flow control */ +- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; +- q_count = RTE_MIN(pdata->tx_q_count, +- max_q_count); ++ q_count = axgbe_get_fc_queue_count(pdata); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); +@@ -287,9 +305,8 @@ static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) + + static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) + { +- unsigned int max_q_count, q_count; + unsigned int reg, reg_val; +- unsigned int i; ++ unsigned int i, q_count; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { +@@ -306,9 +323,7 @@ static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) + } + + /* Set MAC flow control */ +- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; +- q_count = RTE_MIN(pdata->tx_q_count, +- max_q_count); ++ q_count = axgbe_get_fc_queue_count(pdata); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); +@@ -637,23 +652,21 @@ static void axgbe_config_dma_cache(struct axgbe_port *pdata) + unsigned int arcache, awcache, arwcache; + + arcache = 0; +- AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); + + awcache = 0; +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); + + arwcache = 0; +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1); +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3); +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3); ++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf); ++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); + } + diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c -index b071e4e460..da48a3ac42 100644 +index b071e4e460..2156b1d17a 100644 --- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c +++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c @@ -12,6 +12,12 @@ @@ -34912,7 +42192,23 @@ index b071e4e460..da48a3ac42 100644 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 -@@ -2122,29 +2133,6 @@ static void axgbe_default_config(struct axgbe_port *pdata) +@@ -194,6 +205,7 @@ static struct axgbe_version_data axgbe_v2a = { + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, ++ .enable_rrc = 1, + }; + + static struct axgbe_version_data axgbe_v2b = { +@@ -206,6 +218,7 @@ static struct axgbe_version_data axgbe_v2b = { + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, ++ .enable_rrc = 1, + }; + + static const struct rte_eth_desc_lim rx_desc_lim = { +@@ -2122,29 +2135,6 @@ static void axgbe_default_config(struct axgbe_port *pdata) pdata->power_down = 0; } @@ -34942,7 +42238,7 @@ index b071e4e460..da48a3ac42 100644 /* * It returns 0 on success. */ -@@ -2158,6 +2146,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) +@@ -2158,6 +2148,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) uint32_t len; int ret; @@ -34952,7 +42248,7 @@ index b071e4e460..da48a3ac42 100644 eth_dev->dev_ops = &axgbe_eth_dev_ops; eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; -@@ -2196,26 +2187,55 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) +@@ -2196,26 +2189,58 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) pdata->vdata = &axgbe_v2b; /* @@ -35007,6 +42303,9 @@ index b071e4e460..da48a3ac42 100644 + + /* Yellow Carp devices do not need cdr workaround */ + pdata->vdata->an_cdr_workaround = 0; ++ ++ /* Yellow Carp devices do not need rrc */ ++ pdata->vdata->enable_rrc = 0; + } else { + unknown_cpu = 1; + } @@ -35025,6 +42324,321 @@ index b071e4e460..da48a3ac42 100644 } /* Configure the PCS indirect addressing support */ +@@ -2341,12 +2366,14 @@ static int + axgbe_dev_close(struct rte_eth_dev *eth_dev) + { + struct rte_pci_device *pci_dev; ++ struct axgbe_port *pdata; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + ++ pdata = eth_dev->data->dev_private; + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + axgbe_dev_clear_queues(eth_dev); + +@@ -2356,6 +2383,9 @@ axgbe_dev_close(struct rte_eth_dev *eth_dev) + axgbe_dev_interrupt_handler, + (void *)eth_dev); + ++ /* Disable all interrupts in the hardware */ ++ XP_IOWRITE(pdata, XP_INT_EN, 0x0); ++ + return 0; + } + +diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/dpdk/drivers/net/axgbe/axgbe_ethdev.h +index 7f19321d88..b4bd56e239 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.h ++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.h +@@ -111,6 +111,7 @@ + /* Auto-negotiation */ + #define AXGBE_AN_MS_TIMEOUT 500 + #define AXGBE_LINK_TIMEOUT 5 ++#define AXGBE_KR_TRAINING_WAIT_ITER 50 + + #define AXGBE_SGMII_AN_LINK_STATUS BIT(1) + #define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +@@ -463,6 +464,7 @@ struct axgbe_version_data { + unsigned int ecc_support; + unsigned int i2c_support; + unsigned int an_cdr_workaround; ++ unsigned int enable_rrc; + }; + + struct axgbe_mmc_stats { +@@ -653,6 +655,7 @@ struct axgbe_port { + unsigned int parallel_detect; + unsigned int fec_ability; + unsigned long an_start; ++ unsigned long kr_start_time; + enum axgbe_an_mode an_mode; + + /* I2C support */ +diff --git a/dpdk/drivers/net/axgbe/axgbe_mdio.c b/dpdk/drivers/net/axgbe/axgbe_mdio.c +index 913ceada0d..d95a52659e 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_mdio.c ++++ b/dpdk/drivers/net/axgbe/axgbe_mdio.c +@@ -200,13 +200,14 @@ static void axgbe_switch_mode(struct axgbe_port *pdata) + axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); + } + +-static void axgbe_set_mode(struct axgbe_port *pdata, ++static bool axgbe_set_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) + { + if (mode == axgbe_cur_mode(pdata)) +- return; ++ return false; + + axgbe_change_mode(pdata, mode); ++ return true; + } + + static bool axgbe_use_mode(struct axgbe_port *pdata, +@@ -357,6 +358,7 @@ static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata, + reg |= AXGBE_KR_TRAINING_ENABLE; + reg |= AXGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); ++ pdata->kr_start_time = rte_get_timer_cycles(); + + PMD_DRV_LOG(DEBUG, "KR training initiated\n"); + if (pdata->phy_if.phy_impl.kr_training_post) +@@ -487,6 +489,7 @@ static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata) + + axgbe_an_disable(pdata); + axgbe_switch_mode(pdata); ++ pdata->an_result = AXGBE_AN_READY; + axgbe_an_restart(pdata); + + return AXGBE_AN_INCOMPAT_LINK; +@@ -967,11 +970,34 @@ static void axgbe_check_link_timeout(struct axgbe_port *pdata) + { + unsigned long link_timeout; + unsigned long ticks; ++ unsigned long kr_time; ++ int wait; + + link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT * + 2 * rte_get_timer_hz()); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, link_timeout)) { ++ if ((axgbe_cur_mode(pdata) == AXGBE_MODE_KR) && ++ pdata->phy.autoneg == AUTONEG_ENABLE) { ++ /* AN restart should not happen while KR training is in progress. ++ * The while loop ensures no AN restart during KR training, ++ * waits up to 500ms and AN restart is triggered only if KR ++ * training is failed. ++ */ ++ wait = AXGBE_KR_TRAINING_WAIT_ITER; ++ while (wait--) { ++ kr_time = pdata->kr_start_time + ++ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT); ++ ticks = rte_get_timer_cycles(); ++ if (time_after(ticks, kr_time)) ++ break; ++ /* AN restart is not required, if AN result is COMPLETE */ ++ if (pdata->an_result == AXGBE_AN_COMPLETE) ++ return; ++ rte_delay_us(10500); ++ } ++ } ++ + PMD_DRV_LOG(NOTICE, "AN link timeout\n"); + axgbe_phy_config_aneg(pdata); + } +@@ -982,7 +1008,7 @@ static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata) + return pdata->phy_if.phy_impl.an_outcome(pdata); + } + +-static void axgbe_phy_status_result(struct axgbe_port *pdata) ++static bool axgbe_phy_status_result(struct axgbe_port *pdata) + { + enum axgbe_mode mode; + +@@ -1016,7 +1042,10 @@ static void axgbe_phy_status_result(struct axgbe_port *pdata) + + pdata->phy.duplex = DUPLEX_FULL; + +- axgbe_set_mode(pdata, mode); ++ if (axgbe_set_mode(pdata, mode)) ++ return true; ++ else ++ return false; + } + + static int autoneg_time_out(unsigned long autoneg_start_time) +@@ -1051,7 +1080,7 @@ static void axgbe_phy_status(struct axgbe_port *pdata) + &an_restart); + if (an_restart) { + axgbe_phy_config_aneg(pdata); +- return; ++ goto adjust_link; + } + + if (pdata->phy.link) { +@@ -1083,7 +1112,10 @@ static void axgbe_phy_status(struct axgbe_port *pdata) + return; + } + } +- axgbe_phy_status_result(pdata); ++ ++ if (axgbe_phy_status_result(pdata)) ++ return; ++ + if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) + rte_bit_relaxed_clear32(AXGBE_LINK_INIT, + &pdata->dev_state); +diff --git a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c +index d97fbbfddd..12908d4e6f 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c ++++ b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c +@@ -69,6 +69,7 @@ enum axgbe_sfp_cable { + AXGBE_SFP_CABLE_UNKNOWN = 0, + AXGBE_SFP_CABLE_ACTIVE, + AXGBE_SFP_CABLE_PASSIVE, ++ AXGBE_SFP_CABLE_FIBER, + }; + + enum axgbe_sfp_base { +@@ -116,9 +117,7 @@ enum axgbe_sfp_speed { + + #define AXGBE_SFP_BASE_BR 12 + #define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a +-#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d + #define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64 +-#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68 + + #define AXGBE_SFP_BASE_CU_CABLE_LEN 18 + +@@ -535,25 +534,22 @@ static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata) + static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom, + enum axgbe_sfp_speed sfp_speed) + { +- u8 *sfp_base, min, max; ++ u8 *sfp_base, min; + + sfp_base = sfp_eeprom->base; + + switch (sfp_speed) { + case AXGBE_SFP_SPEED_1000: + min = AXGBE_SFP_BASE_BR_1GBE_MIN; +- max = AXGBE_SFP_BASE_BR_1GBE_MAX; + break; + case AXGBE_SFP_SPEED_10000: + min = AXGBE_SFP_BASE_BR_10GBE_MIN; +- max = AXGBE_SFP_BASE_BR_10GBE_MAX; + break; + default: + return false; + } + +- return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) && +- (sfp_base[AXGBE_SFP_BASE_BR] <= max)); ++ return sfp_base[AXGBE_SFP_BASE_BR] >= min; + } + + static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata) +@@ -578,6 +574,9 @@ static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata) + AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR))) + return false; + ++ /* Reset PHY - wait for self-clearing reset bit to clear */ ++ pdata->phy_if.phy_impl.reset(pdata); ++ + if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN], + AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) { + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; +@@ -613,16 +612,21 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) + + axgbe_phy_sfp_parse_quirks(pdata); + +- /* Assume ACTIVE cable unless told it is PASSIVE */ ++ /* Assume FIBER cable unless told otherwise */ + if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE; + phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN]; +- } else { ++ } else if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_ACTIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; ++ } else { ++ phy_data->sfp_cable = AXGBE_SFP_CABLE_FIBER; + } + + /* Determine the type of SFP */ +- if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) ++ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_FIBER && ++ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) ++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; ++ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR; +@@ -639,9 +643,6 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_T; +- else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) && +- axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) +- phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; + + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: +@@ -1225,6 +1226,10 @@ static void axgbe_phy_rx_reset(struct axgbe_port *pdata) + + static void axgbe_phy_pll_ctrl(struct axgbe_port *pdata, bool enable) + { ++ /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */ ++ if (pdata->phy.autoneg != AUTONEG_DISABLE) ++ return; ++ + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0, + XGBE_PMA_PLL_CTRL_MASK, + enable ? XGBE_PMA_PLL_CTRL_SET +@@ -1269,8 +1274,9 @@ static void axgbe_phy_perform_ratechange(struct axgbe_port *pdata, + axgbe_phy_rx_reset(pdata); + + reenable_pll: +- /* Re-enable the PLL control */ +- axgbe_phy_pll_ctrl(pdata, true); ++ /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */ ++ if (cmd != 0 && cmd != 5) ++ axgbe_phy_pll_ctrl(pdata, true); + + PMD_DRV_LOG(NOTICE, "firmware mailbox command did not complete\n"); + } +@@ -1697,8 +1703,15 @@ static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart) + if (reg & MDIO_STAT1_LSTATUS) + return 1; + ++ if (pdata->phy.autoneg == AUTONEG_ENABLE && ++ phy_data->port_mode == AXGBE_PORT_MODE_BACKPLANE) { ++ if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) { ++ *an_restart = 1; ++ } ++ } ++ + /* No link, attempt a receiver reset cycle */ +- if (phy_data->rrc_count++) { ++ if (pdata->vdata->enable_rrc && phy_data->rrc_count++) { + phy_data->rrc_count = 0; + axgbe_phy_rrc(pdata); + } +diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c +index 74e3018eab..55a91fad78 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x.c +@@ -2389,7 +2389,7 @@ int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) + static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) + { + sc->ilt->lines = rte_calloc("", +- sizeof(struct ilt_line), ILT_MAX_LINES, ++ ILT_MAX_LINES, sizeof(struct ilt_line), + RTE_CACHE_LINE_SIZE); + return sc->ilt->lines == NULL; + } diff --git a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c index 4448cf2de2..1327cbe912 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c @@ -35069,8 +42683,130 @@ index 4448cf2de2..1327cbe912 100644 return 0; } +diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c +index c07b01510a..69132c7c80 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.c +@@ -114,7 +114,7 @@ bnx2x_hw_stats_post(struct bnx2x_softc *sc) + + /* Update MCP's statistics if possible */ + if (sc->func_stx) { +- rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, ++ memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, + sizeof(sc->func_stats)); + } + +@@ -817,10 +817,10 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc) + etherstatspktsover1522octets); + } + +- rte_memcpy(old, new, sizeof(struct nig_stats)); ++ memcpy(old, new, sizeof(struct nig_stats)); + +- rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), +- sizeof(struct mac_stx)); ++ memcpy(RTE_PTR_ADD(estats, offsetof(struct bnx2x_eth_stats, rx_stat_ifhcinbadoctets_hi)), ++ &pstats->mac_stx[1], sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + +@@ -1492,9 +1492,11 @@ bnx2x_stats_init(struct bnx2x_softc *sc) + REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + if (!CHIP_IS_E3(sc)) { + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, +- &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); ++ RTE_PTR_ADD(&sc->port.old_nig_stats, ++ offsetof(struct nig_stats, egress_mac_pkt0_lo)), 2); + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, +- &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); ++ RTE_PTR_ADD(&sc->port.old_nig_stats, ++ offsetof(struct nig_stats, egress_mac_pkt1_lo)), 2); + } + + /* function stats */ +diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c +index 63953c2979..5411df3a38 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c +@@ -52,9 +52,9 @@ bnx2x_check_bull(struct bnx2x_softc *sc) + + /* check the mac address and VLAN and allocate memory if valid */ + if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN)) +- rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); ++ memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); + if (valid_bitmap & (1 << VLAN_VALID)) +- rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, RTE_VLAN_HLEN); ++ memcpy(&bull->vlan, &sc->old_bulletin.vlan, sizeof(bull->vlan)); + + sc->old_bulletin = *bull; + +@@ -569,7 +569,7 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) + + bnx2x_check_bull(sc); + +- rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); ++ memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, +@@ -583,9 +583,9 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) + while (BNX2X_VF_STATUS_FAILURE == reply->status && + bnx2x_check_bull(sc)) { + /* A new mac was configured by PF for us */ +- rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, ++ memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, + ETH_ALEN); +- rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, ++ memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, + ETH_ALEN); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); +@@ -622,10 +622,10 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + +- rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); ++ memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); + query->rss_key_size = T_ETH_RSS_KEY; + +- rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); ++ memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + + query->rss_result_mask = params->rss_result_mask; +diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h +index c9aa45ed3b..3044c9e079 100644 +--- a/dpdk/drivers/net/bnxt/bnxt.h ++++ b/dpdk/drivers/net/bnxt/bnxt.h +@@ -441,8 +441,8 @@ struct bnxt_ring_mem_info { + + struct bnxt_ctx_pg_info { + uint32_t entries; +- void *ctx_pg_arr[MAX_CTX_PAGES]; +- rte_iova_t ctx_dma_arr[MAX_CTX_PAGES]; ++ void **ctx_pg_arr; ++ rte_iova_t *ctx_dma_arr; + struct bnxt_ring_mem_info ring_mem; + }; + +@@ -542,7 +542,6 @@ struct bnxt_mark_info { + + struct bnxt_rep_info { + struct rte_eth_dev *vfr_eth_dev; +- pthread_mutex_t vfr_lock; + pthread_mutex_t vfr_start_lock; + bool conduit_valid; + }; +@@ -867,6 +866,7 @@ struct bnxt { + struct rte_ether_addr *mcast_addr_list; + rte_iova_t mc_list_dma_addr; + uint32_t nb_mc_addr; ++#define BNXT_DFLT_MAX_MC_ADDR 16 /* for compatibility with older firmware */ + uint32_t max_mcast_addr; /* maximum number of mcast filters supported */ + + struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c -index b3de490d36..e3ba48ac0b 100644 +index b3de490d36..cb52ef2f84 100644 --- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c +++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c @@ -1017,7 +1017,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, @@ -35101,7 +42837,108 @@ index b3de490d36..e3ba48ac0b 100644 return 0; } -@@ -5859,6 +5864,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) +@@ -1643,10 +1648,8 @@ bnxt_uninit_locks(struct bnxt *bp) + pthread_mutex_destroy(&bp->def_cp_lock); + pthread_mutex_destroy(&bp->health_check_lock); + pthread_mutex_destroy(&bp->err_recovery_lock); +- if (bp->rep_info) { +- pthread_mutex_destroy(&bp->rep_info->vfr_lock); ++ if (bp->rep_info) + pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); +- } + } + + static void bnxt_drv_uninit(struct bnxt *bp) +@@ -4697,7 +4700,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + { + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + const struct rte_memzone *mz = NULL; +- char mz_name[RTE_MEMZONE_NAMESIZE]; ++ char name[RTE_MEMZONE_NAMESIZE]; + rte_iova_t mz_phys_addr; + uint64_t valid_bits = 0; + uint32_t sz; +@@ -4709,6 +4712,19 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / + BNXT_PAGE_SIZE; + rmem->page_size = BNXT_PAGE_SIZE; ++ ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d", ++ suffix, idx, bp->eth_dev->data->port_id); ++ ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0); ++ if (ctx_pg->ctx_pg_arr == NULL) ++ return -ENOMEM; ++ ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d", ++ suffix, idx, bp->eth_dev->data->port_id); ++ ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0); ++ if (ctx_pg->ctx_dma_arr == NULL) ++ return -ENOMEM; ++ + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; +@@ -4716,13 +4732,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + valid_bits = PTU_PTE_VALID; + + if (rmem->nr_pages > 1) { +- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, ++ snprintf(name, RTE_MEMZONE_NAMESIZE, + "bnxt_ctx_pg_tbl%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); +- mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; +- mz = rte_memzone_lookup(mz_name); ++ name[RTE_MEMZONE_NAMESIZE - 1] = 0; ++ mz = rte_memzone_lookup(name); + if (!mz) { +- mz = rte_memzone_reserve_aligned(mz_name, ++ mz = rte_memzone_reserve_aligned(name, + rmem->nr_pages * 8, + bp->eth_dev->device->numa_node, + RTE_MEMZONE_2MB | +@@ -4741,11 +4757,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + rmem->pg_tbl_mz = mz; + } + +- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); +- mz = rte_memzone_lookup(mz_name); ++ mz = rte_memzone_lookup(name); + if (!mz) { +- mz = rte_memzone_reserve_aligned(mz_name, ++ mz = rte_memzone_reserve_aligned(name, + mem_size, + bp->eth_dev->device->numa_node, + RTE_MEMZONE_1GB | +@@ -4791,6 +4807,17 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) + return; + + bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; ++ rte_free(bp->ctx->qp_mem.ctx_pg_arr); ++ rte_free(bp->ctx->srq_mem.ctx_pg_arr); ++ rte_free(bp->ctx->cq_mem.ctx_pg_arr); ++ rte_free(bp->ctx->vnic_mem.ctx_pg_arr); ++ rte_free(bp->ctx->stat_mem.ctx_pg_arr); ++ rte_free(bp->ctx->qp_mem.ctx_dma_arr); ++ rte_free(bp->ctx->srq_mem.ctx_dma_arr); ++ rte_free(bp->ctx->cq_mem.ctx_dma_arr); ++ rte_free(bp->ctx->vnic_mem.ctx_dma_arr); ++ rte_free(bp->ctx->stat_mem.ctx_dma_arr); ++ + rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); +@@ -4803,6 +4830,8 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) + rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); + + for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { ++ rte_free(bp->ctx->tqm_mem[i]->ctx_pg_arr); ++ rte_free(bp->ctx->tqm_mem[i]->ctx_dma_arr); + if (bp->ctx->tqm_mem[i]) + rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); + } +@@ -5859,6 +5888,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; @@ -35109,6 +42946,169 @@ index b3de490d36..e3ba48ac0b 100644 bp = eth_dev->data->dev_private; +@@ -6067,13 +6097,6 @@ static int bnxt_init_rep_info(struct bnxt *bp) + for (i = 0; i < BNXT_MAX_CFA_CODE; i++) + bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; + +- rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); +- if (rc) { +- PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); +- bnxt_free_rep_info(bp); +- return rc; +- } +- + rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); + if (rc) { + PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); +diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c +index 51e1e2d6b3..ad756337da 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c ++++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c +@@ -907,7 +907,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) + bp->max_l2_ctx, bp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters); +- ++ if (!bp->max_mcast_addr) ++ bp->max_mcast_addr = BNXT_DFLT_MAX_MC_ADDR; + if (BNXT_PF(bp)) { + bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics); + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { +@@ -2972,6 +2973,8 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) + static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, + struct bnxt_link_info *link_info) + { ++ uint16_t support_pam4_speeds = link_info->support_pam4_speeds; ++ uint16_t support_speeds = link_info->support_speeds; + uint16_t eth_link_speed = 0; + + if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG) +@@ -3003,29 +3006,30 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, + case RTE_ETH_LINK_SPEED_25G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; + break; + case RTE_ETH_LINK_SPEED_40G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; + break; + case RTE_ETH_LINK_SPEED_50G: +- if (link_info->support_pam4_speeds & +- HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { +- eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; +- link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; +- } else { ++ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; + link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; ++ } else if (support_pam4_speeds & ++ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { ++ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; + } + break; + case RTE_ETH_LINK_SPEED_100G: +- if (link_info->support_pam4_speeds & +- HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { +- eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; +- link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; +- } else { ++ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; + link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; ++ } else if (support_pam4_speeds & ++ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { ++ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; + } + break; + case RTE_ETH_LINK_SPEED_200G: +diff --git a/dpdk/drivers/net/bnxt/bnxt_reps.c b/dpdk/drivers/net/bnxt/bnxt_reps.c +index 8a5b777793..d014714b93 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_reps.c ++++ b/dpdk/drivers/net/bnxt/bnxt_reps.c +@@ -32,6 +32,14 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = { + .flow_ops_get = bnxt_flow_ops_get_op + }; + ++static bool bnxt_rep_check_parent(struct bnxt_representor *rep) ++{ ++ if (!rep->parent_dev->data->dev_private) ++ return false; ++ ++ return true; ++} ++ + uint16_t + bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf) + { +@@ -124,8 +132,8 @@ bnxt_rep_tx_burst(void *tx_queue, + qid = vfr_txq->txq->queue_id; + vf_rep_bp = vfr_txq->bp; + parent = vf_rep_bp->parent_dev->data->dev_private; +- pthread_mutex_lock(&parent->rep_info->vfr_lock); + ptxq = parent->tx_queues[qid]; ++ pthread_mutex_lock(&ptxq->txq_lock); + + ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action; + +@@ -134,9 +142,9 @@ bnxt_rep_tx_burst(void *tx_queue, + vf_rep_bp->tx_pkts[qid]++; + } + +- rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); ++ rc = _bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); + ptxq->vfr_tx_cfa_action = 0; +- pthread_mutex_unlock(&parent->rep_info->vfr_lock); ++ pthread_mutex_unlock(&ptxq->txq_lock); + + return rc; + } +@@ -266,12 +274,12 @@ int bnxt_representor_uninit(struct rte_eth_dev *eth_dev) + PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id); + eth_dev->data->mac_addrs = NULL; + +- parent_bp = rep->parent_dev->data->dev_private; +- if (!parent_bp) { ++ if (!bnxt_rep_check_parent(rep)) { + PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n", + eth_dev->data->port_id); + return 0; + } ++ parent_bp = rep->parent_dev->data->dev_private; + + parent_bp->num_reps--; + vf_id = rep->vf_id; +@@ -539,11 +547,12 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev, + int rc = 0; + + /* MAC Specifics */ +- parent_bp = rep_bp->parent_dev->data->dev_private; +- if (!parent_bp) { +- PMD_DRV_LOG(ERR, "Rep parent NULL!\n"); ++ if (!bnxt_rep_check_parent(rep_bp)) { ++ /* Need not be an error scenario, if parent is closed first */ ++ PMD_DRV_LOG(INFO, "Rep parent port does not exist.\n"); + return rc; + } ++ parent_bp = rep_bp->parent_dev->data->dev_private; + PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n"); + dev_info->max_mac_addrs = parent_bp->max_l2_ctx; + dev_info->max_hash_mac_addrs = 0; +@@ -730,10 +739,10 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + struct bnxt_tx_queue *parent_txq, *txq; + struct bnxt_vf_rep_tx_queue *vfr_txq; + +- if (queue_idx >= rep_bp->rx_nr_rings) { ++ if (queue_idx >= rep_bp->tx_nr_rings) { + PMD_DRV_LOG(ERR, + "Cannot create Tx rings %d. %d rings available\n", +- queue_idx, rep_bp->rx_nr_rings); ++ queue_idx, rep_bp->tx_nr_rings); + return -EINVAL; + } + diff --git a/dpdk/drivers/net/bnxt/bnxt_rxq.c b/dpdk/drivers/net/bnxt/bnxt_rxq.c index fabbbd4560..99758dd304 100644 --- a/dpdk/drivers/net/bnxt/bnxt_rxq.c @@ -35138,11 +43138,67 @@ index daaf9ffc1e..0eebddb05d 100644 return 0; } +diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c +index c8745add5e..0f41193038 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txq.c ++++ b/dpdk/drivers/net/bnxt/bnxt_txq.c +@@ -111,6 +111,7 @@ void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) + txq->mz = NULL; + + rte_free(txq->free); ++ pthread_mutex_destroy(&txq->txq_lock); + rte_free(txq); + dev->data->tx_queues[queue_idx] = NULL; + } +@@ -194,6 +195,11 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + goto err; + } + ++ rc = pthread_mutex_init(&txq->txq_lock, NULL); ++ if (rc != 0) { ++ PMD_DRV_LOG(ERR, "TxQ mutex init failed!"); ++ goto err; ++ } + return 0; + err: + bnxt_tx_queue_release_op(eth_dev, queue_idx); +diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.h b/dpdk/drivers/net/bnxt/bnxt_txq.h +index f3a03812ad..6e2d87de09 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txq.h ++++ b/dpdk/drivers/net/bnxt/bnxt_txq.h +@@ -26,6 +26,7 @@ struct bnxt_tx_queue { + int index; + int tx_wake_thresh; + uint32_t vfr_tx_cfa_action; ++ pthread_mutex_t txq_lock; + struct bnxt_tx_ring_info *tx_ring; + + unsigned int cp_nr_rings; diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c -index 67e016775c..21c2217092 100644 +index 67e016775c..60bb3eea0c 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txr.c +++ b/dpdk/drivers/net/bnxt/bnxt_txr.c -@@ -560,6 +560,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -515,6 +515,19 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) + + uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) ++{ ++ struct bnxt_tx_queue *txq = tx_queue; ++ uint16_t rc; ++ ++ pthread_mutex_lock(&txq->txq_lock); ++ rc = _bnxt_xmit_pkts(tx_queue, tx_pkts, nb_pkts); ++ pthread_mutex_unlock(&txq->txq_lock); ++ ++ return rc; ++} ++ ++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts) + { + int rc; + uint16_t nb_tx_pkts = 0; +@@ -560,6 +573,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (rc) return rc; @@ -35155,8 +43211,35 @@ index 67e016775c..21c2217092 100644 bnxt_free_hwrm_tx_ring(bp, tx_queue_id); rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id); if (rc) +diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.h b/dpdk/drivers/net/bnxt/bnxt_txr.h +index e11343c082..2be3ba4cac 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txr.h ++++ b/dpdk/drivers/net/bnxt/bnxt_txr.h +@@ -46,7 +46,9 @@ void bnxt_free_tx_rings(struct bnxt *bp); + int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq); + int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); + uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, +- uint16_t nb_pkts); ++ uint16_t nb_pkts); ++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts); + #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) + uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c +index 0030a487f5..897410cc0a 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c ++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c +@@ -171,6 +171,7 @@ ulp_ha_mgr_timer_cb(void *arg) + + myclient_cnt = bnxt_ulp_cntxt_num_shared_clients_get(ulp_ctx); + if (myclient_cnt == 0) { ++ bnxt_ulp_cntxt_entry_release(); + BNXT_TF_DBG(ERR, + "PANIC Client Count is zero kill timer\n."); + return; diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c -index 4a266bb2ca..928dfca7af 100644 +index 4a266bb2ca..2a81ab5bf3 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -654,12 +654,9 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id) @@ -35174,6 +43257,58 @@ index 4a266bb2ca..928dfca7af 100644 uint64_t max = a[0]; for (i = 1; i < n; ++i) { +@@ -868,7 +865,6 @@ bond_mode_8023ad_periodic_cb(void *arg) + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct port *port; + struct rte_eth_link link_info; +- struct rte_ether_addr slave_addr; + struct rte_mbuf *lacp_pkt = NULL; + uint16_t slave_id; + uint16_t i; +@@ -895,7 +891,6 @@ bond_mode_8023ad_periodic_cb(void *arg) + key = 0; + } + +- rte_eth_macaddr_get(slave_id, &slave_addr); + port = &bond_mode_8023ad_ports[slave_id]; + + key = rte_cpu_to_be_16(key); +@@ -907,8 +902,8 @@ bond_mode_8023ad_periodic_cb(void *arg) + SM_FLAG_SET(port, NTT); + } + +- if (!rte_is_same_ether_addr(&port->actor.system, &slave_addr)) { +- rte_ether_addr_copy(&slave_addr, &port->actor.system); ++ if (!rte_is_same_ether_addr(&internals->mode4.mac_addr, &port->actor.system)) { ++ rte_ether_addr_copy(&internals->mode4.mac_addr, &port->actor.system); + if (port->aggregator_port_id == slave_id) + SM_FLAG_SET(port, NTT); + } +@@ -1174,21 +1169,20 @@ void + bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) + { + struct bond_dev_private *internals = bond_dev->data->dev_private; +- struct rte_ether_addr slave_addr; + struct port *slave, *agg_slave; + uint16_t slave_id, i, j; + + bond_mode_8023ad_stop(bond_dev); + ++ rte_eth_macaddr_get(internals->port_id, &internals->mode4.mac_addr); + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + slave = &bond_mode_8023ad_ports[slave_id]; +- rte_eth_macaddr_get(slave_id, &slave_addr); + +- if (rte_is_same_ether_addr(&slave_addr, &slave->actor.system)) ++ if (rte_is_same_ether_addr(&internals->mode4.mac_addr, &slave->actor.system)) + continue; + +- rte_ether_addr_copy(&slave_addr, &slave->actor.system); ++ rte_ether_addr_copy(&internals->mode4.mac_addr, &slave->actor.system); + /* Do nothing if this port is not an aggregator. In other case + * Set NTT flag on every port that use this aggregator. */ + if (slave->aggregator_port_id != slave_id) diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h index 7eb392f8c8..025bd0ec54 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -35237,6 +43372,32 @@ index 6553166f5c..c137efd55f 100644 /* validate socket id value */ if (socket_id >= 0 && socket_id < RTE_MAX_NUMA_NODES) { *(int *)extra_args = (int)socket_id; +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c +index 65b77faae7..b38a9f89d0 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c +@@ -180,6 +180,8 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + + count->bytes = 0; + count->hits = 0; ++ count->bytes_set = 0; ++ count->hits_set = 0; + rte_memcpy(&slave_count, count, sizeof(slave_count)); + for (i = 0; i < internals->slave_count; i++) { + ret = rte_flow_query(internals->slaves[i].port_id, +@@ -192,8 +194,12 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + } + count->bytes += slave_count.bytes; + count->hits += slave_count.hits; ++ count->bytes_set |= slave_count.bytes_set; ++ count->hits_set |= slave_count.hits_set; + slave_count.bytes = 0; + slave_count.hits = 0; ++ slave_count.bytes_set = 0; ++ slave_count.hits_set = 0; + } + return 0; + } diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c index b9bcebc6cb..8df632fa6e 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c @@ -35354,10 +43515,88 @@ index 721127dddd..b60c158d55 100644 nix_mbuf_validate_next(mbuf1); nix_mbuf_validate_next(mbuf2); diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h -index 815cd2ff1f..84b5faa137 100644 +index 815cd2ff1f..cd9b1f225e 100644 --- a/dpdk/drivers/net/cnxk/cn10k_tx.h +++ b/dpdk/drivers/net/cnxk/cn10k_tx.h -@@ -1696,10 +1696,12 @@ cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr, +@@ -898,7 +898,7 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr, + struct nix_send_mem_s *send_mem; + + send_mem = (struct nix_send_mem_s *)(lmt + off); +- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp ++ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp + * should not be recorded, hence changing the alg type to + * NIX_SENDMEMALG_SUB and also changing send mem addr field to + * next 8 bytes as it corrupts the actual Tx tstamp registered +@@ -943,6 +943,7 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) + len -= sg_u & 0xFFFF; + nb_segs = m->nb_segs - 1; + m_next = m->next; ++ m->nb_segs = 1; + slist = &cmd[3 + off + 1]; + + /* Set invert df if buffer is not to be freed by H/W */ +@@ -1387,6 +1388,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + len -= dlen; + sg_u = sg_u | ((uint64_t)dlen); + ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ + nb_segs = m->nb_segs - 1; + m_next = m->next; + +@@ -1401,6 +1405,7 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + #endif + + m->next = NULL; ++ m->nb_segs = 1; + m = m_next; + /* Fill mbuf segments */ + do { +@@ -1433,6 +1438,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + slist++; + } + m->next = NULL; ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ + m = m_next; + } while (nb_segs); + +@@ -1469,6 +1477,8 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, + RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); + rte_io_wmb(); + #endif ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); + return; + } + +@@ -1513,6 +1523,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct rte_mbuf **mbufs, uint64x2_t *cmd0, + *data128 |= ((__uint128_t)7) << *shift; + *shift += 3; + ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0); + return 1; + } + } +@@ -1539,6 +1554,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct rte_mbuf **mbufs, uint64x2_t *cmd0, + vst1q_u64(lmt_addr + 10, cmd2[j + 1]); + vst1q_u64(lmt_addr + 12, cmd1[j + 1]); + vst1q_u64(lmt_addr + 14, cmd3[j + 1]); ++ ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool, ++ (void **)&mbufs[j + 1], 1, 0); + } else if (flags & NIX_TX_NEED_EXT_HDR) { + /* EXT header take 3 each, space for 2 segs.*/ + cn10k_nix_prepare_mseg_vec(mbufs[j], +@@ -1696,10 +1716,12 @@ cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr, vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2); vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1); } @@ -35370,7 +43609,17 @@ index 815cd2ff1f..84b5faa137 100644 } } -@@ -1907,13 +1909,13 @@ again: +@@ -1821,7 +1843,8 @@ again: + } + + for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) { +- if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) { ++ if (flags & NIX_TX_OFFLOAD_SECURITY_F && ++ (((int)((16 - c_lnum) << 1) - c_loff) < 4)) { + burst = i; + break; + } +@@ -1907,13 +1930,13 @@ again: vsetq_lane_u64(((struct rte_mbuf *)mbuf0)->data_off, vld1q_u64(mbuf0), 1); len_olflags0 = vld1q_u64(mbuf0 + 3); dataoff_iova1 = @@ -35458,9 +43707,18 @@ index 1a9f920b41..0e23609df5 100644 } diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.h b/dpdk/drivers/net/cnxk/cn9k_tx.h -index 404edd6aed..7362025a34 100644 +index 404edd6aed..33db781abe 100644 --- a/dpdk/drivers/net/cnxk/cn9k_tx.h +++ b/dpdk/drivers/net/cnxk/cn9k_tx.h +@@ -355,7 +355,7 @@ cn9k_nix_xmit_prepare_tstamp(struct cn9k_eth_txq *txq, uint64_t *cmd, + + send_mem = (struct nix_send_mem_s *)(cmd + off); + +- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp ++ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp + * should not be recorded, hence changing the alg type to + * NIX_SENDMEMALG_SUB and also changing send mem addr field to + * next 8 bytes as it corrupts the actual Tx tstamp registered @@ -388,6 +388,16 @@ cn9k_nix_xmit_prep_lmt(uint64_t *cmd, void *lmt_addr, const uint32_t flags) roc_lmt_mov(lmt_addr, cmd, cn9k_nix_tx_ext_subs(flags)); } @@ -35478,8 +43736,70 @@ index 404edd6aed..7362025a34 100644 static __rte_always_inline uint64_t cn9k_nix_xmit_submit_lmt(const rte_iova_t io_addr) { +@@ -438,6 +448,10 @@ cn9k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) + if (!(sg_u & (1ULL << 55))) + RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); + rte_io_wmb(); ++#endif ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++ m->nb_segs = 1; + #endif + m = m_next; + if (!m) +@@ -473,6 +487,9 @@ cn9k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) + sg_u = sg->u; + slist++; + } ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++#endif + m = m_next; + } while (nb_segs); + +@@ -486,6 +503,9 @@ done: + segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + send_hdr->w0.sizem1 = segdw - 1; + ++#ifdef RTE_ENABLE_ASSERT ++ rte_io_wmb(); ++#endif + return segdw; + } + +@@ -689,6 +709,10 @@ cn9k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + rte_io_wmb(); + #endif + ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++ m->nb_segs = 1; ++#endif + m = m_next; + /* Fill mbuf segments */ + do { +@@ -718,6 +742,9 @@ cn9k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + sg_u = sg->u; + slist++; + } ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++#endif + m = m_next; + } while (nb_segs); + +@@ -733,6 +760,9 @@ cn9k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + sh->sizem1 = segdw - 1; + ++#ifdef RTE_ENABLE_ASSERT ++ rte_io_wmb(); ++#endif + return segdw; + } + diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c -index bf1585fe67..f9245258cb 100644 +index bf1585fe67..56b26a9650 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c @@ -884,6 +884,27 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, @@ -35518,7 +43838,21 @@ index bf1585fe67..f9245258cb 100644 roc_nix_lf_free(nix); } -@@ -1431,6 +1453,7 @@ tm_fini: +@@ -1312,6 +1334,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) + goto free_nix_lf; + } + ++ /* Overwrite default RSS setup if requested by user */ ++ rc = cnxk_nix_rss_hash_update(eth_dev, &conf->rx_adv_conf.rss_conf); ++ if (rc) { ++ plt_err("Failed to configure rss rc=%d", rc); ++ goto free_nix_lf; ++ } ++ + /* Init the default TM scheduler hierarchy */ + rc = roc_nix_tm_init(nix); + if (rc) { +@@ -1431,6 +1460,7 @@ tm_fini: roc_nix_tm_fini(nix); free_nix_lf: nix_free_queue_mem(dev); @@ -35526,7 +43860,7 @@ index bf1585fe67..f9245258cb 100644 rc |= roc_nix_lf_free(nix); fail_configure: dev->configured = 0; -@@ -1980,6 +2003,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) +@@ -1980,6 +2010,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) /* Free ROC RQ's, SQ's and CQ's memory */ nix_free_queue_mem(dev); @@ -35566,11 +43900,48 @@ index d5e647c64d..a7ccdfb756 100644 return 0; } +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c +index d28509dbda..0c89e0424f 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c +@@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args) + if (errno) + val = 0; + +- *(uint16_t *)extra_args = val; ++ *(uint32_t *)extra_args = val; + + return 0; + } diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -index 8f7287161b..7a7478cda8 100644 +index 8f7287161b..5fd39149cb 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -@@ -463,6 +463,44 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) +@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) + devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; + devinfo->max_mac_addrs = dev->max_mac_entries; + devinfo->max_vfs = pci_dev->max_vfs; +- devinfo->max_mtu = devinfo->max_rx_pktlen - +- (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); ++ devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD; + devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD; + + devinfo->rx_offload_capa = dev->rx_offload_capa; +@@ -414,6 +413,13 @@ cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) + roc_nix_npc_mac_addr_set(nix, dev->mac_addr); + goto exit; + } ++ ++ if (eth_dev->data->promiscuous) { ++ rc = roc_nix_mac_promisc_mode_enable(nix, true); ++ if (rc) ++ plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc, ++ roc_error_msg_get(rc)); ++ } + } + + /* Update mac address to cnxk ethernet device */ +@@ -463,6 +469,44 @@ cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index) dev->dmac_filter_count--; } @@ -35615,10 +43986,57 @@ index 8f7287161b..7a7478cda8 100644 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { -@@ -506,6 +544,15 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +@@ -470,8 +514,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct roc_nix *nix = &dev->nix; ++ struct cnxk_eth_rxq_sp *rxq_sp; ++ uint32_t buffsz = 0; + int rc = -EINVAL; +- uint32_t buffsz; + + frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en; + +@@ -487,8 +532,24 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) goto exit; } +- buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; +- old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; ++ if (!eth_dev->data->nb_rx_queues) ++ goto skip_buffsz_check; ++ ++ /* Perform buff size check */ ++ if (data->min_rx_buf_size) { ++ buffsz = data->min_rx_buf_size; ++ } else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) { ++ rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]); ++ ++ if (rxq_sp->qconf.mp) ++ buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp); ++ } ++ ++ /* Skip validation if RQ's are not yet setup */ ++ if (!buffsz) ++ goto skip_buffsz_check; ++ ++ buffsz -= RTE_PKTMBUF_HEADROOM; + + /* Refuse MTU that requires the support of scattered packets + * when this feature has not been enabled before. +@@ -506,21 +567,22 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + goto exit; + } + +- frame_size -= RTE_ETHER_CRC_LEN; +- +- /* Update mtu on Tx */ +- rc = roc_nix_mac_mtu_set(nix, frame_size); +- if (rc) { +- plt_err("Failed to set MTU, rc=%d", rc); +- goto exit; ++skip_buffsz_check: ++ old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; + /* if new MTU was smaller than old one, then flush all SQs before MTU change */ + if (old_frame_size > frame_size) { + if (data->dev_started) { @@ -35626,16 +44044,49 @@ index 8f7287161b..7a7478cda8 100644 + goto exit; + } + cnxk_nix_sq_flush(eth_dev); -+ } -+ - frame_size -= RTE_ETHER_CRC_LEN; + } - /* Update mtu on Tx */ +- /* Sync same frame size on Rx */ ++ frame_size -= RTE_ETHER_CRC_LEN; ++ ++ /* Set frame size on Rx */ + rc = roc_nix_mac_max_rx_len_set(nix, frame_size); + if (rc) { +- /* Rollback to older mtu */ +- roc_nix_mac_mtu_set(nix, +- old_frame_size - RTE_ETHER_CRC_LEN); + plt_err("Failed to max Rx frame length, rc=%d", rc); + goto exit; + } diff --git a/dpdk/drivers/net/cnxk/cnxk_flow.c b/dpdk/drivers/net/cnxk/cnxk_flow.c -index 6d155d924c..422c5d74df 100644 +index 6d155d924c..ee0c4e5550 100644 --- a/dpdk/drivers/net/cnxk/cnxk_flow.c +++ b/dpdk/drivers/net/cnxk/cnxk_flow.c -@@ -115,14 +115,15 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -98,15 +98,19 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev, + } + + static void +-npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, +- const struct roc_npc_action *rss_action, +- uint32_t *flowkey_cfg) ++npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, const struct roc_npc_action *rss_action, ++ uint32_t *flowkey_cfg, uint64_t default_rss_types) + { + const struct roc_npc_action_rss *rss; ++ uint64_t rss_types; + + rss = (const struct roc_npc_action_rss *)rss_action->conf; ++ rss_types = rss->types; ++ /* If no RSS types are specified, use default one */ ++ if (rss_types == 0) ++ rss_types = default_rss_types; + +- *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss->types, rss->level); ++ *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss_types, rss->level); + } + + static int +@@ -115,14 +119,15 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, struct roc_npc_action in_actions[], uint32_t *flowkey_cfg) { struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); @@ -35652,7 +44103,7 @@ index 6d155d924c..422c5d74df 100644 int i = 0, rc = 0; int rq; -@@ -156,6 +157,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -156,6 +161,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_VF: in_actions[i].type = ROC_NPC_ACTION_TYPE_VF; in_actions[i].conf = actions->conf; @@ -35660,7 +44111,7 @@ index 6d155d924c..422c5d74df 100644 break; case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: -@@ -193,13 +195,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -193,13 +199,7 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_QUEUE: @@ -35675,7 +44126,17 @@ index 6d155d924c..422c5d74df 100644 in_actions[i].type = ROC_NPC_ACTION_TYPE_QUEUE; in_actions[i].conf = actions->conf; break; -@@ -245,6 +241,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, +@@ -210,7 +210,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + goto err_exit; + in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; + in_actions[i].conf = actions->conf; +- npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg); ++ npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, ++ eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); + break; + + case RTE_FLOW_ACTION_TYPE_SECURITY: +@@ -245,6 +246,14 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, } i++; } @@ -35715,10 +44176,91 @@ index 45bbeaef0c..8cc3d9f257 100644 } diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -index a6c86113d1..ef4c06db6a 100644 +index a6c86113d1..bcb28f33ee 100644 --- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c +++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -@@ -399,6 +399,7 @@ static void dpaa_interrupt_handler(void *param) +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -165,9 +166,15 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; + uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; ++ struct fman_if *fif = dev->process_private; + + PMD_INIT_FUNC_TRACE(); + ++ if (fif->is_shared_mac) { ++ DPAA_PMD_ERR("Cannot configure mtu from DPDK in VSP mode."); ++ return -ENOTSUP; ++ } ++ + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. +@@ -206,7 +213,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) + struct rte_intr_handle *intr_handle; + uint32_t max_rx_pktlen; + int speed, duplex; +- int ret, rx_status; ++ int ret, rx_status, socket_fd; ++ struct ifreq ifr; + + PMD_INIT_FUNC_TRACE(); + +@@ -222,6 +230,26 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) + dpaa_intf->name); + return -EHOSTDOWN; + } ++ ++ socket_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); ++ if (socket_fd == -1) { ++ DPAA_PMD_ERR("Cannot open IF socket"); ++ return -errno; ++ } ++ ++ strncpy(ifr.ifr_name, dpaa_intf->name, IFNAMSIZ - 1); ++ ++ if (ioctl(socket_fd, SIOCGIFMTU, &ifr) < 0) { ++ DPAA_PMD_ERR("Cannot get interface mtu"); ++ close(socket_fd); ++ return -errno; ++ } ++ ++ close(socket_fd); ++ DPAA_PMD_INFO("Using kernel configured mtu size(%u)", ++ ifr.ifr_mtu); ++ ++ eth_conf->rxmode.mtu = ifr.ifr_mtu; + } + + /* Rx offloads which are enabled by default */ +@@ -249,7 +277,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) + max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; + } + +- fman_if_set_maxfrm(dev->process_private, max_rx_pktlen); ++ if (!fif->is_shared_mac) ++ fman_if_set_maxfrm(dev->process_private, max_rx_pktlen); + + if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { + DPAA_PMD_DEBUG("enabling scatter mode"); +@@ -363,7 +392,8 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, +- RTE_PTYPE_TUNNEL_ESP ++ RTE_PTYPE_TUNNEL_ESP, ++ RTE_PTYPE_UNKNOWN + }; + + PMD_INIT_FUNC_TRACE(); +@@ -399,6 +429,7 @@ static void dpaa_interrupt_handler(void *param) static int dpaa_eth_dev_start(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; @@ -35726,7 +44268,7 @@ index a6c86113d1..ef4c06db6a 100644 PMD_INIT_FUNC_TRACE(); -@@ -413,12 +414,18 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) +@@ -413,12 +444,18 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev) fman_if_enable_rx(dev->process_private); @@ -35745,7 +44287,7 @@ index a6c86113d1..ef4c06db6a 100644 PMD_INIT_FUNC_TRACE(); dev->data->dev_started = 0; -@@ -427,6 +434,11 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) +@@ -427,6 +464,11 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev) fman_if_disable_rx(fif); dev->tx_pkt_burst = dpaa_eth_tx_drop_all; @@ -35825,6 +44367,19 @@ index f60e78e1fd..85910bbd8f 100644 if (dpaa2_enable_ts[mbuf->port]) { *dpaa2_timestamp_dynfield(mbuf) = annotation->word2; +diff --git a/dpdk/drivers/net/e1000/base/e1000_base.c b/dpdk/drivers/net/e1000/base/e1000_base.c +index ab73e1e59e..3ec32e7240 100644 +--- a/dpdk/drivers/net/e1000/base/e1000_base.c ++++ b/dpdk/drivers/net/e1000/base/e1000_base.c +@@ -107,7 +107,7 @@ void e1000_power_down_phy_copper_base(struct e1000_hw *hw) + return; + + /* If the management interface is not enabled, then power down */ +- if (phy->ops.check_reset_block(hw)) ++ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + } + diff --git a/dpdk/drivers/net/e1000/em_ethdev.c b/dpdk/drivers/net/e1000/em_ethdev.c index 8ee9be12ad..18efa78ac3 100644 --- a/dpdk/drivers/net/e1000/em_ethdev.c @@ -35951,11 +44506,106 @@ index f32dee46df..6027cfbfb1 100644 } } +diff --git a/dpdk/drivers/net/ena/base/ena_com.c b/dpdk/drivers/net/ena/base/ena_com.c +index 5ca36ab6d9..98035f3cd4 100644 +--- a/dpdk/drivers/net/ena/base/ena_com.c ++++ b/dpdk/drivers/net/ena/base/ena_com.c +@@ -34,6 +34,8 @@ + + #define ENA_REGS_ADMIN_INTR_MASK 1 + ++#define ENA_MAX_BACKOFF_DELAY_EXP 16U ++ + #define ENA_MIN_ADMIN_POLL_US 100 + + #define ENA_MAX_ADMIN_POLL_US 5000 +@@ -171,6 +173,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, + static void comp_ctxt_release(struct ena_com_admin_queue *queue, + struct ena_comp_ctx *comp_ctx) + { ++ comp_ctx->user_cqe = NULL; + comp_ctx->occupied = false; + ATOMIC32_DEC(&queue->outstanding_cmds); + } +@@ -464,6 +467,9 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a + return; + } + ++ if (!comp_ctx->occupied) ++ return; ++ + comp_ctx->status = ENA_CMD_COMPLETED; + comp_ctx->comp_status = cqe->acq_common_descriptor.status; + +@@ -539,8 +545,9 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, + + static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) + { ++ exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp); + delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); +- delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); ++ delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp)); + ENA_USLEEP(delay_us); + } + diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c -index efcb163027..7345e480f8 100644 +index efcb163027..e640bbae3d 100644 --- a/dpdk/drivers/net/ena/ena_ethdev.c +++ b/dpdk/drivers/net/ena/ena_ethdev.c -@@ -1171,6 +1171,7 @@ static int ena_start(struct rte_eth_dev *dev) +@@ -37,10 +37,10 @@ + #define ENA_MIN_RING_DESC 128 + + /* +- * We should try to keep ENA_CLEANUP_BUF_SIZE lower than ++ * We should try to keep ENA_CLEANUP_BUF_THRESH lower than + * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache. + */ +-#define ENA_CLEANUP_BUF_SIZE 256 ++#define ENA_CLEANUP_BUF_THRESH 256 + + #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) + +@@ -590,18 +590,13 @@ static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, + packet_type |= RTE_PTYPE_L3_IPV6; + } + +- if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { ++ if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag || ++ !(packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP))) { + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; + } else { + if (unlikely(ena_rx_ctx->l4_csum_err)) { + ++rx_stats->l4_csum_bad; +- /* +- * For the L4 Rx checksum offload the HW may indicate +- * bad checksum although it's valid. Because of that, +- * we're setting the UNKNOWN flag to let the app +- * re-verify the checksum. +- */ +- ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; ++ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + } else { + ++rx_stats->l4_csum_good; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; +@@ -739,7 +734,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { +- if (rc == -ENA_COM_UNSUPPORTED) ++ if (rc == ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); +@@ -779,7 +774,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter) + + rc = ena_com_set_host_attributes(&adapter->ena_dev); + if (rc) { +- if (rc == -ENA_COM_UNSUPPORTED) ++ if (rc == ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); +@@ -1171,6 +1166,7 @@ static int ena_start(struct rte_eth_dev *dev) struct ena_adapter *adapter = dev->data->dev_private; uint64_t ticks; int rc = 0; @@ -35963,7 +44613,7 @@ index efcb163027..7345e480f8 100644 /* Cannot allocate memory in secondary process */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { -@@ -1208,6 +1209,11 @@ static int ena_start(struct rte_eth_dev *dev) +@@ -1208,6 +1204,11 @@ static int ena_start(struct rte_eth_dev *dev) ++adapter->dev_stats.dev_start; adapter->state = ENA_ADAPTER_STATE_RUNNING; @@ -35975,7 +44625,7 @@ index efcb163027..7345e480f8 100644 return 0; err_rss_init: -@@ -1223,6 +1229,7 @@ static int ena_stop(struct rte_eth_dev *dev) +@@ -1223,6 +1224,7 @@ static int ena_stop(struct rte_eth_dev *dev) struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = pci_dev->intr_handle; @@ -35983,7 +44633,7 @@ index efcb163027..7345e480f8 100644 int rc; /* Cannot free memory in secondary process */ -@@ -1254,6 +1261,11 @@ static int ena_stop(struct rte_eth_dev *dev) +@@ -1254,6 +1256,11 @@ static int ena_stop(struct rte_eth_dev *dev) adapter->state = ENA_ADAPTER_STATE_STOPPED; dev->data->dev_started = 0; @@ -35995,6 +44645,68 @@ index efcb163027..7345e480f8 100644 return 0; } +@@ -3006,33 +3013,12 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) + return 0; + } + +-static __rte_always_inline size_t +-ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean, +- struct rte_mbuf *mbuf, +- size_t mbuf_cnt, +- size_t buf_size) +-{ +- struct rte_mbuf *m_next; +- +- while (mbuf != NULL) { +- m_next = mbuf->next; +- mbufs_to_clean[mbuf_cnt++] = mbuf; +- if (mbuf_cnt == buf_size) { +- rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean, +- (unsigned int)mbuf_cnt); +- mbuf_cnt = 0; +- } +- mbuf = m_next; +- } +- +- return mbuf_cnt; +-} +- + static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) + { +- struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE]; ++ struct rte_mbuf *pkts_to_clean[ENA_CLEANUP_BUF_THRESH]; + struct ena_ring *tx_ring = (struct ena_ring *)txp; + size_t mbuf_cnt = 0; ++ size_t pkt_cnt = 0; + unsigned int total_tx_descs = 0; + unsigned int total_tx_pkts = 0; + uint16_t cleanup_budget; +@@ -3063,8 +3049,13 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) + + mbuf = tx_info->mbuf; + if (fast_free) { +- mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt, +- ENA_CLEANUP_BUF_SIZE); ++ pkts_to_clean[pkt_cnt++] = mbuf; ++ mbuf_cnt += mbuf->nb_segs; ++ if (mbuf_cnt >= ENA_CLEANUP_BUF_THRESH) { ++ rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); ++ mbuf_cnt = 0; ++ pkt_cnt = 0; ++ } + } else { + rte_pktmbuf_free(mbuf); + } +@@ -3088,8 +3079,7 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) + } + + if (mbuf_cnt != 0) +- rte_mempool_put_bulk(mbufs_to_clean[0]->pool, +- (void **)mbufs_to_clean, mbuf_cnt); ++ rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); + + /* Notify completion handler that full cleanup was performed */ + if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) diff --git a/dpdk/drivers/net/ena/ena_rss.c b/dpdk/drivers/net/ena/ena_rss.c index b682d01c20..d0ba9d5c0a 100644 --- a/dpdk/drivers/net/ena/ena_rss.c @@ -36088,6 +44800,109 @@ index 19a99a82c5..a6aaa760ca 100644 /* * The device has started, re-do RQs on the fly. In the process, we +diff --git a/dpdk/drivers/net/failsafe/failsafe_args.c b/dpdk/drivers/net/failsafe/failsafe_args.c +index b203e02d9a..3b867437d7 100644 +--- a/dpdk/drivers/net/failsafe/failsafe_args.c ++++ b/dpdk/drivers/net/failsafe/failsafe_args.c +@@ -248,7 +248,7 @@ fs_parse_device_param(struct rte_eth_dev *dev, const char *param, + goto free_args; + } else { + ERROR("Unrecognized device type: %.*s", (int)b, param); +- return -EINVAL; ++ ret = -EINVAL; + } + free_args: + free(args); +diff --git a/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/dpdk/drivers/net/fm10k/fm10k_ethdev.c +index 8b83063f0a..a4e06a0cfa 100644 +--- a/dpdk/drivers/net/fm10k/fm10k_ethdev.c ++++ b/dpdk/drivers/net/fm10k/fm10k_ethdev.c +@@ -3055,7 +3055,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pdev->intr_handle; +- int diag, i; ++ int diag, i, ret; + struct fm10k_macvlan_filter_info *macvlan; + + PMD_INIT_FUNC_TRACE(); +@@ -3144,21 +3144,24 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + diag = fm10k_stats_reset(dev); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag); +- return diag; ++ ret = diag; ++ goto err_stat; + } + + /* Reset the hw */ + diag = fm10k_reset_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_reset_hw; + } + + /* Setup mailbox service */ + diag = fm10k_setup_mbx_service(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_mbx; + } + + /*PF/VF has different interrupt handling mechanism */ +@@ -3197,7 +3200,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + + if (switch_ready == false) { + PMD_INIT_LOG(ERR, "switch is not ready"); +- return -1; ++ ret = -1; ++ goto err_switch_ready; + } + } + +@@ -3232,7 +3236,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + + if (!hw->mac.default_vid) { + PMD_INIT_LOG(ERR, "default VID is not ready"); +- return -1; ++ ret = -1; ++ goto err_vid; + } + } + +@@ -3241,6 +3246,28 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + MAIN_VSI_POOL_NUMBER); + + return 0; ++ ++err_vid: ++err_switch_ready: ++ rte_intr_disable(intr_handle); ++ ++ if (hw->mac.type == fm10k_mac_pf) { ++ fm10k_dev_disable_intr_pf(dev); ++ rte_intr_callback_unregister(intr_handle, ++ fm10k_dev_interrupt_handler_pf, (void *)dev); ++ } else { ++ fm10k_dev_disable_intr_vf(dev); ++ rte_intr_callback_unregister(intr_handle, ++ fm10k_dev_interrupt_handler_vf, (void *)dev); ++ } ++ ++err_mbx: ++err_reset_hw: ++err_stat: ++ rte_free(dev->data->mac_addrs); ++ dev->data->mac_addrs = NULL; ++ ++ return ret; + } + + static int diff --git a/dpdk/drivers/net/gve/gve_ethdev.c b/dpdk/drivers/net/gve/gve_ethdev.c index 97781f0ed3..0796d37760 100644 --- a/dpdk/drivers/net/gve/gve_ethdev.c @@ -36209,7 +45024,7 @@ index 7aa5e7d8e9..adc9f75c81 100644 } diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c -index bdfc85f934..7bdf7740c1 100644 +index bdfc85f934..fb515ed0ee 100644 --- a/dpdk/drivers/net/hns3/hns3_cmd.c +++ b/dpdk/drivers/net/hns3/hns3_cmd.c @@ -507,6 +507,8 @@ hns3_parse_capability(struct hns3_hw *hw, @@ -36221,7 +45036,7 @@ index bdfc85f934..7bdf7740c1 100644 } static uint32_t -@@ -519,6 +521,41 @@ hns3_build_api_caps(void) +@@ -519,6 +521,43 @@ hns3_build_api_caps(void) return rte_cpu_to_le_32(api_caps); } @@ -36243,7 +45058,9 @@ index bdfc85f934..7bdf7740c1 100644 + if (device_id == HNS3_DEV_ID_25GE_RDMA || + device_id == HNS3_DEV_ID_50GE_RDMA || + device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || -+ device_id == HNS3_DEV_ID_200G_RDMA) ++ device_id == HNS3_DEV_ID_200G_RDMA || ++ device_id == HNS3_DEV_ID_100G_ROH || ++ device_id == HNS3_DEV_ID_200G_ROH) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); +} + @@ -36263,7 +45080,7 @@ index bdfc85f934..7bdf7740c1 100644 static int hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) { -@@ -536,6 +573,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) +@@ -536,6 +575,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) return ret; hw->fw_version = rte_le_to_cpu_32(resp->firmware); @@ -36273,7 +45090,7 @@ index bdfc85f934..7bdf7740c1 100644 /* * Make sure mask the capability before parse capability because it * may overwrite resp's data. -@@ -659,9 +699,6 @@ hns3_cmd_init(struct hns3_hw *hw) +@@ -659,9 +701,6 @@ hns3_cmd_init(struct hns3_hw *hw) hw->cmq.csq.next_to_use = 0; hw->cmq.crq.next_to_clean = 0; hw->cmq.crq.next_to_use = 0; @@ -36304,7 +45121,7 @@ index 994dfc48cc..0a4d59bd9b 100644 /* Configure the indirection table, opcode:0x0D07 */ struct hns3_rss_indirection_table_cmd { diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c -index 7adc6a4972..5d9df03733 100644 +index 7adc6a4972..c51af055d3 100644 --- a/dpdk/drivers/net/hns3/hns3_common.c +++ b/dpdk/drivers/net/hns3/hns3_common.c @@ -10,6 +10,7 @@ @@ -36325,7 +45142,14 @@ index 7adc6a4972..5d9df03733 100644 info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM | -@@ -90,13 +90,16 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) +@@ -84,19 +84,22 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | + RTE_ETH_TX_OFFLOAD_VLAN_INSERT); + +- if (!hw->port_base_vlan_cfg.state) ++ if (!hns->is_vf && !hw->port_base_vlan_cfg.state) + info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT; + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; @@ -36374,7 +45198,12 @@ index 7adc6a4972..5d9df03733 100644 val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL); *(uint64_t *)extra_args = val; -@@ -214,6 +223,9 @@ hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) +@@ -210,10 +219,13 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) + static int + hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + { +- uint32_t val; ++ uint64_t val; RTE_SET_USED(key); @@ -36518,7 +45347,7 @@ index 5aa001f0cc..8eaeda26e7 100644 #endif /* HNS3_COMMON_H */ diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c -index af045b22f7..2831d3dc62 100644 +index af045b22f7..915e4eb768 100644 --- a/dpdk/drivers/net/hns3/hns3_dcb.c +++ b/dpdk/drivers/net/hns3/hns3_dcb.c @@ -237,9 +237,9 @@ hns3_dcb_qs_weight_cfg(struct hns3_hw *hw, uint16_t qs_id, uint8_t dwrr) @@ -36555,6 +45384,29 @@ index af045b22f7..2831d3dc62 100644 return ret; } +@@ -1506,7 +1499,6 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc) + static int + hns3_dcb_hw_configure(struct hns3_adapter *hns) + { +- struct rte_eth_dcb_rx_conf *dcb_rx_conf; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + enum hns3_fc_status fc_status = hw->current_fc_status; +@@ -1526,12 +1518,8 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns) + } + + if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) { +- dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; +- if (dcb_rx_conf->nb_tcs == 0) +- hw->dcb_info.pfc_en = 1; /* tc0 only */ +- else +- hw->dcb_info.pfc_en = +- RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t); ++ hw->dcb_info.pfc_en = ++ RTE_LEN2MASK((uint8_t)HNS3_MAX_USER_PRIO, uint8_t); + + hw->dcb_info.hw_pfc_map = + hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en); diff --git a/dpdk/drivers/net/hns3/hns3_dump.c b/dpdk/drivers/net/hns3/hns3_dump.c index ae62bb56c8..bac4427227 100644 --- a/dpdk/drivers/net/hns3/hns3_dump.c @@ -36660,7 +45512,7 @@ index ae62bb56c8..bac4427227 100644 } diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c -index d326f70129..27f9dd2eb2 100644 +index d326f70129..0050d46ae7 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev.c @@ -15,6 +15,7 @@ @@ -36925,7 +45777,7 @@ index d326f70129..27f9dd2eb2 100644 static void hns3_interrupt_handler(void *param) { -@@ -293,24 +359,25 @@ hns3_interrupt_handler(void *param) +@@ -293,39 +359,45 @@ hns3_interrupt_handler(void *param) struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; enum hns3_evt_cause event_cause; @@ -36958,7 +45810,11 @@ index d326f70129..27f9dd2eb2 100644 hns3_handle_mac_tnl(hw); hns3_handle_error(hns); } else if (event_cause == HNS3_VECTOR0_EVENT_RST) { -@@ -321,11 +388,16 @@ hns3_interrupt_handler(void *param) + hns3_warn(hw, "received reset interrupt"); + hns3_schedule_reset(hns); + } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { +- hns3_dev_handle_mbx_msg(hw); ++ hns3pf_handle_mbx_msg(hw); } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " "ras_int_stat:0x%x cmdq_int_stat:0x%x", @@ -37087,7 +45943,23 @@ index d326f70129..27f9dd2eb2 100644 ret = hns3_query_mac_stats_reg_num(hw); if (ret) return ret; -@@ -3677,7 +3669,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) +@@ -2750,6 +2742,7 @@ hns3_get_capability(struct hns3_hw *hw) + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; + pf->support_multi_tc_pause = false; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; + return 0; + } + +@@ -2770,6 +2763,7 @@ hns3_get_capability(struct hns3_hw *hw) + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; + pf->support_multi_tc_pause = true; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + + return 0; + } +@@ -3677,7 +3671,7 @@ hns3_get_mac_ethertype_cmd_status(uint16_t cmdq_resp, uint8_t resp_code) if (cmdq_resp) { PMD_INIT_LOG(ERR, @@ -37096,7 +45968,7 @@ index d326f70129..27f9dd2eb2 100644 cmdq_resp); return -EIO; } -@@ -4451,6 +4443,12 @@ hns3_init_hardware(struct hns3_adapter *hns) +@@ -4451,6 +4445,12 @@ hns3_init_hardware(struct hns3_adapter *hns) goto err_mac_init; } @@ -37109,7 +45981,7 @@ index d326f70129..27f9dd2eb2 100644 return 0; err_mac_init: -@@ -4590,6 +4588,10 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4590,6 +4590,10 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) /* Get hardware io base address from pcie BAR2 IO space */ hw->io_base = pci_dev->mem_resource[2].addr; @@ -37120,7 +45992,7 @@ index d326f70129..27f9dd2eb2 100644 /* Firmware command queue initialize */ ret = hns3_cmd_init_queue(hw); if (ret) { -@@ -4630,10 +4632,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) +@@ -4630,10 +4634,6 @@ hns3_init_pf(struct rte_eth_dev *eth_dev) goto err_intr_callback_register; } @@ -37131,7 +46003,7 @@ index d326f70129..27f9dd2eb2 100644 /* Enable interrupt */ rte_intr_enable(pci_dev->intr_handle); hns3_pf_enable_irq0(hw); -@@ -4690,6 +4688,7 @@ err_enable_intr: +@@ -4690,6 +4690,7 @@ err_enable_intr: hns3_fdir_filter_uninit(hns); err_fdir: hns3_uninit_umv_space(hw); @@ -37139,7 +46011,7 @@ index d326f70129..27f9dd2eb2 100644 err_init_hw: hns3_stats_uninit(hw); err_get_config: -@@ -4725,6 +4724,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) +@@ -4725,6 +4726,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev) hns3_flow_uninit(eth_dev); hns3_fdir_filter_uninit(hns); hns3_uninit_umv_space(hw); @@ -37147,7 +46019,7 @@ index d326f70129..27f9dd2eb2 100644 hns3_stats_uninit(hw); hns3_config_mac_tnl_int(hw, false); hns3_pf_disable_irq0(hw); -@@ -5115,8 +5115,7 @@ hns3_dev_start(struct rte_eth_dev *dev) +@@ -5115,8 +5117,7 @@ hns3_dev_start(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); hns3_rx_scattered_calc(dev); @@ -37157,7 +46029,7 @@ index d326f70129..27f9dd2eb2 100644 /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); -@@ -5194,12 +5193,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) +@@ -5194,12 +5195,7 @@ hns3_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; @@ -37171,7 +46043,7 @@ index d326f70129..27f9dd2eb2 100644 rte_spinlock_lock(&hw->lock); if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -5373,16 +5367,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) +@@ -5373,16 +5369,7 @@ hns3_check_fc_autoneg_valid(struct hns3_hw *hw, uint8_t autoneg) if (!pf->support_fc_autoneg) { if (autoneg != 0) { @@ -37189,35 +46061,21 @@ index d326f70129..27f9dd2eb2 100644 return -EOPNOTSUPP; } -@@ -5591,31 +5576,60 @@ is_pf_reset_done(struct hns3_hw *hw) +@@ -5591,31 +5578,50 @@ is_pf_reset_done(struct hns3_hw *hw) return true; } +static enum hns3_reset_level +hns3_detect_reset_event(struct hns3_hw *hw) +{ -+ struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + enum hns3_reset_level new_req = HNS3_NONE_RESET; -+ enum hns3_reset_level last_req; + uint32_t vector0_intr_state; + -+ last_req = hns3_get_reset_level(hns, &hw->reset.pending); + vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); -+ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { -+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) + new_req = HNS3_IMP_RESET; -+ } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { -+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) + new_req = HNS3_GLOBAL_RESET; -+ } -+ -+ if (new_req == HNS3_NONE_RESET) -+ return HNS3_NONE_RESET; -+ -+ if (last_req == HNS3_NONE_RESET || last_req < new_req) { -+ hns3_schedule_delayed_reset(hns); -+ hns3_warn(hw, "High level reset detected, delay do reset"); -+ } + + return new_req; +} @@ -37242,16 +46100,20 @@ index d326f70129..27f9dd2eb2 100644 - hns3_check_event_cause(hns, NULL); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return false; ++ ++ new_req = hns3_detect_reset_event(hw); ++ if (new_req == HNS3_NONE_RESET) ++ return false; - reset = hns3_get_reset_level(hns, &hw->reset.pending); - if (reset != HNS3_NONE_RESET && hw->reset.level != HNS3_NONE_RESET && - hw->reset.level < reset) { - hns3_warn(hw, "High level reset %d is pending", reset); -+ new_req = hns3_detect_reset_event(hw); + last_req = hns3_get_reset_level(hns, &hw->reset.pending); -+ if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && -+ new_req < last_req) { -+ hns3_warn(hw, "High level reset %d is pending", last_req); ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); return true; } - reset = hns3_get_reset_level(hns, &hw->reset.request); @@ -37265,7 +46127,7 @@ index d326f70129..27f9dd2eb2 100644 return true; } return false; -@@ -5662,17 +5676,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) +@@ -5662,17 +5668,6 @@ hns3_func_reset_cmd(struct hns3_hw *hw, int func_id) return hns3_cmd_send(hw, &desc, 1); } @@ -37283,7 +46145,7 @@ index d326f70129..27f9dd2eb2 100644 static void hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) { -@@ -5690,7 +5693,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) +@@ -5690,7 +5685,9 @@ hns3_msix_process(struct hns3_adapter *hns, enum hns3_reset_level reset_level) switch (reset_level) { case HNS3_IMP_RESET: @@ -37294,7 +46156,7 @@ index d326f70129..27f9dd2eb2 100644 hns3_warn(hw, "IMP Reset requested time=%ld.%.6ld", tv.tv_sec, tv.tv_usec); break; -@@ -5815,12 +5820,7 @@ hns3_stop_service(struct hns3_adapter *hns) +@@ -5815,12 +5812,7 @@ hns3_stop_service(struct hns3_adapter *hns) rte_eal_alarm_cancel(hns3_service_handler, eth_dev); hns3_update_linkstatus_and_event(hw, false); } @@ -37308,7 +46170,7 @@ index d326f70129..27f9dd2eb2 100644 rte_spinlock_lock(&hw->lock); if (hns->hw.adapter_state == HNS3_NIC_STARTED || -@@ -5853,8 +5853,7 @@ hns3_start_service(struct hns3_adapter *hns) +@@ -5853,8 +5845,7 @@ hns3_start_service(struct hns3_adapter *hns) hw->reset.level == HNS3_GLOBAL_RESET) hns3_set_rst_done(hw); eth_dev = &rte_eth_devices[hw->data->port_id]; @@ -37318,7 +46180,7 @@ index d326f70129..27f9dd2eb2 100644 if (hw->adapter_state == HNS3_NIC_STARTED) { /* * This API parent function already hold the hns3_hw.lock, the -@@ -6003,56 +6002,27 @@ hns3_reset_service(void *param) +@@ -6003,56 +5994,27 @@ hns3_reset_service(void *param) hns3_msix_process(hns, reset_level); } @@ -37391,7 +46253,7 @@ index d326f70129..27f9dd2eb2 100644 } static int -@@ -6061,28 +6031,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, +@@ -6061,28 +6023,28 @@ hns3_fec_get_capability(struct rte_eth_dev *dev, unsigned int num) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -37435,7 +46297,16 @@ index d326f70129..27f9dd2eb2 100644 static int get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state) { -@@ -6220,61 +6190,27 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) +@@ -6118,7 +6080,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) + { + struct hns3_sfp_info_cmd *resp; + uint32_t tmp_fec_capa; +- uint8_t auto_state; ++ uint8_t auto_state = 0; + struct hns3_cmd_desc desc; + int ret; + +@@ -6220,61 +6182,27 @@ hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode) } static uint32_t @@ -37506,7 +46377,7 @@ index d326f70129..27f9dd2eb2 100644 return -EINVAL; } -@@ -6282,12 +6218,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) +@@ -6282,12 +6210,27 @@ hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode) * Check whether the configured mode is within the FEC capability. * If not, the configured mode will not be supported. */ @@ -37537,7 +46408,7 @@ index d326f70129..27f9dd2eb2 100644 rte_spinlock_lock(&hw->lock); ret = hns3_set_fec_hw(hw, mode); if (ret) { -@@ -6342,7 +6293,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) +@@ -6342,7 +6285,7 @@ hns3_optical_module_existed(struct hns3_hw *hw) ret = hns3_cmd_send(hw, &desc, 1); if (ret) { hns3_err(hw, @@ -37546,7 +46417,7 @@ index d326f70129..27f9dd2eb2 100644 ret); return false; } -@@ -6380,7 +6331,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, +@@ -6380,7 +6323,7 @@ hns3_get_module_eeprom_data(struct hns3_hw *hw, uint32_t offset, ret = hns3_cmd_send(hw, desc, HNS3_SFP_INFO_CMD_NUM); if (ret) { @@ -37555,7 +46426,7 @@ index d326f70129..27f9dd2eb2 100644 ret); return ret; } -@@ -6417,7 +6368,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, +@@ -6417,7 +6360,7 @@ hns3_get_module_eeprom(struct rte_eth_dev *dev, return -ENOTSUP; if (!hns3_optical_module_existed(hw)) { @@ -37564,7 +46435,7 @@ index d326f70129..27f9dd2eb2 100644 return -EIO; } -@@ -6480,7 +6431,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, +@@ -6480,7 +6423,7 @@ hns3_get_module_info(struct rte_eth_dev *dev, modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN; break; default: @@ -37573,11 +46444,52 @@ index d326f70129..27f9dd2eb2 100644 sfp_type.type, sfp_type.ext_type); return -EINVAL; } +@@ -6707,6 +6650,8 @@ static const struct rte_pci_id pci_id_hns3_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, ++ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_ROH) }, ++ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_ROH) }, + { .vendor_id = 0, }, /* sentinel */ + }; + diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h -index 2457754b3d..9e67e93d3f 100644 +index 2457754b3d..1afe4c4ff7 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.h +++ b/dpdk/drivers/net/hns3/hns3_ethdev.h -@@ -871,13 +871,6 @@ struct hns3_adapter { +@@ -28,7 +28,9 @@ + #define HNS3_DEV_ID_25GE_RDMA 0xA222 + #define HNS3_DEV_ID_50GE_RDMA 0xA224 + #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 ++#define HNS3_DEV_ID_100G_ROH 0xA227 + #define HNS3_DEV_ID_200G_RDMA 0xA228 ++#define HNS3_DEV_ID_200G_ROH 0xA22C + #define HNS3_DEV_ID_100G_VF 0xA22E + #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F + +@@ -483,6 +485,9 @@ struct hns3_queue_intr { + #define HNS3_PKTS_DROP_STATS_MODE1 0 + #define HNS3_PKTS_DROP_STATS_MODE2 1 + ++#define HNS3_RX_DMA_ADDR_ALIGN_128 128 ++#define HNS3_RX_DMA_ADDR_ALIGN_64 64 ++ + struct hns3_hw { + struct rte_eth_dev_data *data; + void *io_base; +@@ -550,6 +555,11 @@ struct hns3_hw { + * direction. + */ + uint8_t min_tx_pkt_len; ++ /* ++ * The required alignment of the DMA address of the RX buffer. ++ * See HNS3_RX_DMA_ADDR_ALIGN_XXX for available values. ++ */ ++ uint16_t rx_dma_addr_align; + + struct hns3_queue_intr intr; + /* +@@ -871,13 +881,6 @@ struct hns3_adapter { struct hns3_ptype_table ptype_tbl __rte_cache_aligned; }; @@ -37591,7 +46503,7 @@ index 2457754b3d..9e67e93d3f 100644 enum hns3_dev_cap { HNS3_DEV_SUPPORT_DCB_B, HNS3_DEV_SUPPORT_COPPER_B, -@@ -891,6 +884,7 @@ enum hns3_dev_cap { +@@ -891,6 +894,7 @@ enum hns3_dev_cap { HNS3_DEV_SUPPORT_RAS_IMP_B, HNS3_DEV_SUPPORT_TM_B, HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, @@ -37599,7 +46511,7 @@ index 2457754b3d..9e67e93d3f 100644 }; #define hns3_dev_get_support(hw, _name) \ -@@ -996,15 +990,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) +@@ -996,15 +1000,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) #define hns3_read_dev(a, reg) \ hns3_read_reg((a)->io_base, (reg)) @@ -37615,7 +46527,7 @@ index 2457754b3d..9e67e93d3f 100644 static inline uint64_t hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) { -@@ -1045,22 +1030,9 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); +@@ -1045,22 +1040,9 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, uint32_t link_speed, uint8_t link_duplex); void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); @@ -37640,7 +46552,7 @@ index 2457754b3d..9e67e93d3f 100644 static inline bool is_reset_pending(struct hns3_adapter *hns) -@@ -1073,4 +1045,15 @@ is_reset_pending(struct hns3_adapter *hns) +@@ -1073,4 +1055,15 @@ is_reset_pending(struct hns3_adapter *hns) return ret; } @@ -37657,10 +46569,67 @@ index 2457754b3d..9e67e93d3f 100644 + #endif /* HNS3_ETHDEV_H */ diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -index d220522c43..702a61aad9 100644 +index d220522c43..6d7654206b 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -@@ -250,6 +250,8 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, +@@ -172,11 +172,13 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, +- RTE_ETHER_ADDR_LEN, false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_ADD); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -191,12 +193,13 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_REMOVE, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, +- false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_REMOVE); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -215,6 +218,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *old_addr; + uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + + /* +@@ -227,9 +231,10 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes, + RTE_ETHER_ADDR_LEN); + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes, +- HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_MODIFY); ++ memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) { + /* + * The hns3 VF PMD depends on the hns3 PF kernel ethdev +@@ -250,6 +255,8 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, hns3_err(hw, "Failed to set mac addr(%s) for vf: %d", mac_str, ret); } @@ -37669,7 +46638,118 @@ index d220522c43..702a61aad9 100644 } rte_ether_addr_copy(mac_addr, -@@ -610,6 +612,19 @@ hns3vf_enable_irq0(struct hns3_hw *hw) +@@ -264,12 +271,13 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, +- HNS3_MBX_MAC_VLAN_MC_ADD, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, ++ HNS3_MBX_MAC_VLAN_MC_ADD); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -285,12 +293,13 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, +- HNS3_MBX_MAC_VLAN_MC_REMOVE, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, ++ HNS3_MBX_MAC_VLAN_MC_REMOVE); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -333,11 +342,12 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, + * the packets with vlan tag in promiscuous mode. + */ + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); +- req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; +- req->msg[1] = en_bc_pmc ? 1 : 0; +- req->msg[2] = en_uc_pmc ? 1 : 0; +- req->msg[3] = en_mc_pmc ? 1 : 0; +- req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; ++ req->msg.code = HNS3_MBX_SET_PROMISC_MODE; ++ req->msg.en_bc = en_bc_pmc ? 1 : 0; ++ req->msg.en_uc = en_uc_pmc ? 1 : 0; ++ req->msg.en_mc = en_mc_pmc ? 1 : 0; ++ req->msg.en_limit_promisc = ++ hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) +@@ -426,30 +436,26 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, + bool mmap, enum hns3_ring_type queue_type, + uint16_t queue_id) + { +- struct hns3_vf_bind_vector_msg bind_msg; ++ struct hns3_vf_to_pf_msg req = {0}; + const char *op_str; +- uint16_t code; + int ret; + +- memset(&bind_msg, 0, sizeof(bind_msg)); +- code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : ++ req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : + HNS3_MBX_UNMAP_RING_TO_VECTOR; +- bind_msg.vector_id = (uint8_t)vector_id; ++ req.vector_id = (uint8_t)vector_id; ++ req.ring_num = 1; + + if (queue_type == HNS3_RING_TYPE_RX) +- bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; ++ req.ring_param[0].int_gl_index = HNS3_RING_GL_RX; + else +- bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX; +- +- bind_msg.param[0].ring_type = queue_type; +- bind_msg.ring_num = 1; +- bind_msg.param[0].tqp_index = queue_id; ++ req.ring_param[0].int_gl_index = HNS3_RING_GL_TX; ++ req.ring_param[0].ring_type = queue_type; ++ req.ring_param[0].tqp_index = queue_id; + op_str = mmap ? "Map" : "Unmap"; +- ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, +- sizeof(bind_msg), false, NULL, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) +- hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", +- op_str, queue_id, bind_msg.vector_id, ret); ++ hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.", ++ op_str, queue_id, req.vector_id, ret); + + return ret; + } +@@ -532,10 +538,12 @@ cfg_err: + static int + hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu) + { ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu, +- sizeof(mtu), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0); ++ memcpy(req.data, &mtu, sizeof(mtu)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret); + +@@ -610,6 +618,19 @@ hns3vf_enable_irq0(struct hns3_hw *hw) hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1); } @@ -37689,7 +46769,42 @@ index d220522c43..702a61aad9 100644 static enum hns3vf_evt_cause hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) { -@@ -684,69 +699,10 @@ hns3vf_interrupt_handler(void *param) +@@ -629,13 +650,8 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + val = hns3_read_dev(hw, HNS3_VF_RST_ING); + hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); + val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); +- if (clearval) { +- hw->reset.stats.global_cnt++; +- hns3_warn(hw, "Global reset detected, clear reset status"); +- } else { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, "Global reset detected, don't clear reset status"); +- } ++ hw->reset.stats.global_cnt++; ++ hns3_warn(hw, "Global reset detected, clear reset status"); + + ret = HNS3VF_VECTOR0_EVENT_RST; + goto out; +@@ -650,9 +666,9 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + + val = 0; + ret = HNS3VF_VECTOR0_EVENT_OTHER; ++ + out: +- if (clearval) +- *clearval = val; ++ *clearval = val; + return ret; + } + +@@ -678,75 +694,16 @@ hns3vf_interrupt_handler(void *param) + hns3_schedule_reset(hns); + break; + case HNS3VF_VECTOR0_EVENT_MBX: +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); + break; + default: break; } @@ -37763,7 +46878,32 @@ index d220522c43..702a61aad9 100644 } void -@@ -821,12 +777,8 @@ hns3vf_get_capability(struct hns3_hw *hw) +@@ -772,12 +729,13 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) + uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED; + uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); ++ struct hns3_vf_to_pf_msg req; + + __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, + __ATOMIC_RELEASE); + +- (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); ++ (void)hns3vf_mbx_send(hw, &req, false, NULL, 0); + + while (remain_ms > 0) { + rte_delay_ms(HNS3_POLL_RESPONE_MS); +@@ -788,7 +746,7 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) + * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE + * mailbox from PF driver to get this capability. + */ +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); + if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != + HNS3_PF_PUSH_LSC_CAP_UNKNOWN) + break; +@@ -821,12 +779,8 @@ hns3vf_get_capability(struct hns3_hw *hw) { int ret; @@ -37777,7 +46917,11 @@ index d220522c43..702a61aad9 100644 hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US; hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM; -@@ -837,7 +789,7 @@ hns3vf_get_capability(struct hns3_hw *hw) +@@ -834,10 +788,11 @@ hns3vf_get_capability(struct hns3_hw *hw) + hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; return 0; } @@ -37786,7 +46930,209 @@ index d220522c43..702a61aad9 100644 if (ret) { PMD_INIT_LOG(ERR, "failed to query dev specifications, ret = %d", -@@ -1477,6 +1429,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) +@@ -851,6 +806,7 @@ hns3vf_get_capability(struct hns3_hw *hw) + hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + + return 0; + } +@@ -876,12 +832,13 @@ hns3vf_check_tqp_info(struct hns3_hw *hw) + static int + hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t resp_msg; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, +- HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0, +- true, &resp_msg, sizeof(resp_msg)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_GET_PORT_BASE_VLAN_STATE); ++ ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg)); + if (ret) { + if (ret == -ETIME) { + /* +@@ -922,10 +879,12 @@ hns3vf_get_queue_info(struct hns3_hw *hw) + { + #define HNS3VF_TQPS_RSS_INFO_LEN 6 + uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true, +- resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, ++ resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret); + return ret; +@@ -963,10 +922,11 @@ hns3vf_get_basic_info(struct hns3_hw *hw) + { + uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE]; + struct hns3_basic_info *basic_info; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0, +- true, resp_msg, sizeof(resp_msg)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg)); + if (ret) { + hns3_err(hw, "failed to get basic info from PF, ret = %d.", + ret); +@@ -986,10 +946,11 @@ static int + hns3vf_get_host_mac_addr(struct hns3_hw *hw) + { + uint8_t host_mac[RTE_ETHER_ADDR_LEN]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0, +- true, host_mac, RTE_ETHER_ADDR_LEN); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN); + if (ret) { + hns3_err(hw, "Failed to get mac addr from PF: %d", ret); + return ret; +@@ -1038,6 +999,7 @@ static void + hns3vf_request_link_info(struct hns3_hw *hw) + { + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); ++ struct hns3_vf_to_pf_msg req; + bool send_req; + int ret; + +@@ -1049,8 +1011,8 @@ hns3vf_request_link_info(struct hns3_hw *hw) + if (!send_req) + return; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_err(hw, "failed to fetch link status, ret = %d", ret); + return; +@@ -1094,19 +1056,18 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + static int + hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) + { +-#define HNS3VF_VLAN_MBX_MSG_LEN 5 ++ struct hns3_mbx_vlan_filter *vlan_filter; ++ struct hns3_vf_to_pf_msg req = {0}; + struct hns3_hw *hw = &hns->hw; +- uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN]; +- uint16_t proto = htons(RTE_ETHER_TYPE_VLAN); +- uint8_t is_kill = on ? 0 : 1; + +- msg_data[0] = is_kill; +- memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); +- memcpy(&msg_data[3], &proto, sizeof(proto)); ++ req.code = HNS3_MBX_SET_VLAN; ++ req.subcode = HNS3_MBX_VLAN_FILTER; ++ vlan_filter = (struct hns3_mbx_vlan_filter *)req.data; ++ vlan_filter->is_kill = on ? 0 : 1; ++ vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN); ++ vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id); + +- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER, +- msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL, +- 0); ++ return hns3vf_mbx_send(hw, &req, true, NULL, 0); + } + + static int +@@ -1135,6 +1096,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + static int + hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + int ret; + +@@ -1142,9 +1104,10 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + return 0; + + msg_data = enable ? 1 : 0; +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, +- HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data, +- sizeof(msg_data), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_ENABLE_VLAN_FILTER); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "%s vlan filter failed, ret = %d.", + enable ? "enable" : "disable", ret); +@@ -1155,12 +1118,15 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + static int + hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + int ret; + + msg_data = enable ? 1 : 0; +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, +- &msg_data, sizeof(msg_data), false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_VLAN_RX_OFF_CFG); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) + hns3_err(hw, "vf %s strip failed, ret = %d.", + enable ? "enable" : "disable", ret); +@@ -1304,11 +1270,13 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) + static int + hns3vf_set_alive(struct hns3_hw *hw, bool alive) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + + msg_data = alive ? 1 : 0; +- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data, +- sizeof(msg_data), false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ return hns3vf_mbx_send(hw, &req, false, NULL, 0); + } + + static void +@@ -1316,11 +1284,12 @@ hns3vf_keep_alive_handler(void *param) + { + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = eth_dev->data->dev_private; ++ struct hns3_vf_to_pf_msg req; + struct hns3_hw *hw = &hns->hw; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, +- false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) + hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", + ret); +@@ -1459,9 +1428,11 @@ err_init_hardware: + static int + hns3vf_clear_vport_list(struct hns3_hw *hw) + { +- return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, +- HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, +- NULL, 0); ++ struct hns3_vf_to_pf_msg req; ++ ++ hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL, ++ HNS3_MBX_VPORT_LIST_CLEAR); ++ return hns3vf_mbx_send(hw, &req, false, NULL, 0); + } + + static int +@@ -1477,6 +1448,10 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev) /* Get hardware io base address from pcie BAR2 IO space */ hw->io_base = pci_dev->mem_resource[2].addr; @@ -37797,7 +47143,7 @@ index d220522c43..702a61aad9 100644 /* Firmware command queue initialize */ ret = hns3_cmd_init_queue(hw); if (ret) { -@@ -1633,12 +1589,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) +@@ -1633,12 +1608,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev) dev->data->dev_started = 0; hw->adapter_state = HNS3_NIC_STOPPING; @@ -37811,7 +47157,7 @@ index d220522c43..702a61aad9 100644 rte_spinlock_lock(&hw->lock); if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -1740,8 +1691,10 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) +@@ -1740,8 +1710,10 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue) hns3_enable_rxd_adv_layout(hw); ret = hns3_init_queues(hns, reset_queue); @@ -37823,7 +47169,7 @@ index d220522c43..702a61aad9 100644 return hns3_restore_filter(hns); } -@@ -1792,8 +1745,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) +@@ -1792,8 +1764,7 @@ hns3vf_dev_start(struct rte_eth_dev *dev) rte_spinlock_unlock(&hw->lock); hns3_rx_scattered_calc(dev); @@ -37833,7 +47179,34 @@ index d220522c43..702a61aad9 100644 /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); -@@ -1859,14 +1811,13 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) +@@ -1838,11 +1809,25 @@ is_vf_reset_done(struct hns3_hw *hw) + return true; + } + ++static enum hns3_reset_level ++hns3vf_detect_reset_event(struct hns3_hw *hw) ++{ ++ enum hns3_reset_level reset = HNS3_NONE_RESET; ++ uint32_t cmdq_stat_reg; ++ ++ cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); ++ if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) ++ reset = HNS3_VF_RESET; ++ ++ return reset; ++} ++ + bool + hns3vf_is_reset_pending(struct hns3_adapter *hns) + { ++ enum hns3_reset_level last_req; + struct hns3_hw *hw = &hns->hw; +- enum hns3_reset_level reset; ++ enum hns3_reset_level new_req; + + /* + * According to the protocol of PCIe, FLR to a PF device resets the PF +@@ -1859,20 +1844,24 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) return false; /* @@ -37848,12 +47221,43 @@ index d220522c43..702a61aad9 100644 - hns3vf_check_event_cause(hns, NULL); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return false; ++ ++ new_req = hns3vf_detect_reset_event(hw); ++ if (new_req == HNS3_NONE_RESET) ++ return false; + +- reset = hns3vf_get_reset_level(hw, &hw->reset.pending); +- if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && +- hw->reset.level < reset) { +- hns3_warn(hw, "High level reset %d is pending", reset); ++ last_req = hns3vf_get_reset_level(hw, &hw->reset.pending); ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); + return true; + } ++ + return false; + } -+ hns3vf_check_event_cause(hns, NULL); - reset = hns3vf_get_reset_level(hw, &hw->reset.pending); - if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && - hw->reset.level < reset) { -@@ -1963,11 +1914,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) +@@ -1931,12 +1920,13 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) + static int + hns3vf_prepare_reset(struct hns3_adapter *hns) + { ++ struct hns3_vf_to_pf_msg req; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (hw->reset.level == HNS3_VF_FUNC_RESET) { +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, +- 0, true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + return ret; + } +@@ -1963,11 +1953,7 @@ hns3vf_stop_service(struct hns3_adapter *hns) } hw->mac.link_status = RTE_ETH_LINK_DOWN; @@ -37866,7 +47270,7 @@ index d220522c43..702a61aad9 100644 rte_spinlock_lock(&hw->lock); if (hw->adapter_state == HNS3_NIC_STARTED || -@@ -1999,8 +1946,7 @@ hns3vf_start_service(struct hns3_adapter *hns) +@@ -1999,8 +1985,7 @@ hns3vf_start_service(struct hns3_adapter *hns) struct rte_eth_dev *eth_dev; eth_dev = &rte_eth_devices[hw->data->port_id]; @@ -37876,7 +47280,7 @@ index d220522c43..702a61aad9 100644 rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler, eth_dev); -@@ -2231,8 +2177,11 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) +@@ -2231,8 +2216,11 @@ hns3vf_reinit_dev(struct hns3_adapter *hns) */ if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO || pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) { @@ -39378,10 +48782,34 @@ index 44a1119415..916bf30dcb 100644 hw->reset.stage = RESET_STAGE_PREWAIT; hns3_schedule_reset(hns); diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c -index 8e0a58aa02..f1743c195e 100644 +index 8e0a58aa02..9cdbc1668a 100644 --- a/dpdk/drivers/net/hns3/hns3_mbx.c +++ b/dpdk/drivers/net/hns3/hns3_mbx.c -@@ -40,23 +40,6 @@ hns3_resp_to_errno(uint16_t resp_code) +@@ -11,8 +11,6 @@ + #include "hns3_intr.h" + #include "hns3_rxtx.h" + +-#define HNS3_CMD_CODE_OFFSET 2 +- + static const struct errno_respcode_map err_code_map[] = { + {0, 0}, + {1, -EPERM}, +@@ -26,6 +24,14 @@ static const struct errno_respcode_map err_code_map[] = { + {95, -EOPNOTSUPP}, + }; + ++void ++hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, uint8_t code, uint8_t subcode) ++{ ++ memset(req, 0, sizeof(struct hns3_vf_to_pf_msg)); ++ req->code = code; ++ req->subcode = subcode; ++} ++ + static int + hns3_resp_to_errno(uint16_t resp_code) + { +@@ -40,23 +46,6 @@ hns3_resp_to_errno(uint16_t resp_code) return -EIO; } @@ -39405,7 +48833,7 @@ index 8e0a58aa02..f1743c195e 100644 static int hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, uint8_t *resp_data, uint16_t resp_len) -@@ -67,7 +50,6 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +@@ -67,7 +56,6 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_mbx_resp_status *mbx_resp; uint32_t wait_time = 0; @@ -39413,8 +48841,12 @@ index 8e0a58aa02..f1743c195e 100644 if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) { hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)", -@@ -93,20 +75,14 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - hns3_dev_handle_mbx_msg(hw); +@@ -90,23 +78,17 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + return -EIO; + } + +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); rte_delay_us(HNS3_WAIT_RESP_US); - if (hw->mbx_resp.matching_scheme == @@ -39436,7 +48868,7 @@ index 8e0a58aa02..f1743c195e 100644 return -ETIME; } rte_io_rmb(); -@@ -132,7 +108,6 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) +@@ -132,7 +114,6 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) * we get the exact scheme which is used. */ hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode; @@ -39444,15 +48876,115 @@ index 8e0a58aa02..f1743c195e 100644 /* Update match_id and ensure the value of match_id is not zero */ hw->mbx_resp.match_id++; -@@ -185,7 +160,6 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - req->match_id = hw->mbx_resp.match_id; +@@ -145,54 +126,34 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) + } + + int +-hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, +- uint8_t *resp_data, uint16_t resp_len) ++hns3vf_mbx_send(struct hns3_hw *hw, ++ struct hns3_vf_to_pf_msg *req, bool need_resp, ++ uint8_t *resp_data, uint16_t resp_len) + { +- struct hns3_mbx_vf_to_pf_cmd *req; ++ struct hns3_mbx_vf_to_pf_cmd *cmd; + struct hns3_cmd_desc desc; +- bool is_ring_vector_msg; +- int offset; + int ret; + +- req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; +- +- /* first two bytes are reserved for code & subcode */ +- if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { +- hns3_err(hw, +- "VF send mbx msg fail, msg len %u exceeds max payload len %d", +- msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); +- return -EINVAL; +- } +- + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); +- req->msg[0] = code; +- is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) || +- (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) || +- (code == HNS3_MBX_GET_RING_VECTOR_MAP); +- if (!is_ring_vector_msg) +- req->msg[1] = subcode; +- if (msg_data) { +- offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET; +- memcpy(&req->msg[offset], msg_data, msg_len); +- } ++ cmd = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; ++ cmd->msg = *req; + + /* synchronous send */ + if (need_resp) { +- req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; ++ cmd->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; + rte_spinlock_lock(&hw->mbx_resp.lock); +- hns3_mbx_prepare_resp(hw, code, subcode); +- req->match_id = hw->mbx_resp.match_id; ++ hns3_mbx_prepare_resp(hw, req->code, req->subcode); ++ cmd->match_id = hw->mbx_resp.match_id; ret = hns3_cmd_send(hw, &desc, 1); if (ret) { - hw->mbx_resp.head--; rte_spinlock_unlock(&hw->mbx_resp.lock); hns3_err(hw, "VF failed(=%d) to send mbx message to PF", ret); -@@ -254,41 +228,10 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + return ret; + } + +- ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len); ++ ret = hns3_get_mbx_resp(hw, req->code, req->subcode, ++ resp_data, resp_len); + rte_spinlock_unlock(&hw->mbx_resp.lock); + } else { + /* asynchronous send */ +@@ -219,17 +180,17 @@ static void + hns3vf_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { ++ struct hns3_mbx_link_status *link_info = ++ (struct hns3_mbx_link_status *)req->msg.msg_data; + uint8_t link_status, link_duplex; +- uint16_t *msg_q = req->msg; + uint8_t support_push_lsc; + uint32_t link_speed; + +- memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); +- link_status = rte_le_to_cpu_16(msg_q[1]); +- link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); +- hns3vf_update_link_status(hw, link_status, link_speed, +- link_duplex); +- support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u; ++ link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status); ++ link_speed = rte_le_to_cpu_32(link_info->speed); ++ link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex); ++ hns3vf_update_link_status(hw, link_status, link_speed, link_duplex); ++ support_push_lsc = (link_info->flag) & 1u; + hns3vf_update_push_lsc_cap(hw, support_push_lsc); + } + +@@ -238,7 +199,6 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { + enum hns3_reset_level reset_level; +- uint16_t *msg_q = req->msg; + + /* + * PF has asserted reset hence VF should go in pending +@@ -246,7 +206,7 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + * has been completely reset. After this stack should + * eventually be re-initialized. + */ +- reset_level = rte_le_to_cpu_16(msg_q[1]); ++ reset_level = rte_le_to_cpu_16(req->msg.reset_level); + hns3_atomic_set_bit(reset_level, &hw->reset.pending); + + hns3_warn(hw, "PF inform reset level %d", reset_level); +@@ -254,41 +214,10 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw)); } @@ -39495,7 +49027,7 @@ index 8e0a58aa02..f1743c195e 100644 struct hns3_mbx_resp_status *resp = &hw->mbx_resp; uint32_t msg_data; -@@ -298,12 +241,6 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) +@@ -298,15 +227,10 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) * match_id to its response. So VF could use the match_id * to match the request. */ @@ -39506,13 +49038,22 @@ index 8e0a58aa02..f1743c195e 100644 - hns3_info(hw, "detect mailbox support match id!"); - } if (req->match_id == resp->match_id) { - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], -@@ -319,11 +256,19 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) +- resp->resp_status = hns3_resp_to_errno(req->msg[3]); +- memcpy(resp->additional_info, &req->msg[4], ++ resp->resp_status = ++ hns3_resp_to_errno(req->msg.resp_status); ++ memcpy(resp->additional_info, &req->msg.resp_data, + HNS3_MBX_MAX_RESP_DATA_SIZE); + rte_io_wmb(); + resp->received_match_resp = true; +@@ -319,11 +243,20 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) * support copy request's match_id to its response. So VF follows the * original scheme to process. */ -+ msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; +- resp->resp_status = hns3_resp_to_errno(req->msg[3]); +- memcpy(resp->additional_info, &req->msg[4], ++ msg_data = (uint32_t)req->msg.vf_mbx_msg_code << ++ HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode; + if (resp->req_msg_data != msg_data) { + hns3_warn(hw, + "received response tag (%u) is mismatched with requested tag (%u)", @@ -39520,8 +49061,8 @@ index 8e0a58aa02..f1743c195e 100644 + return; + } + - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], ++ resp->resp_status = hns3_resp_to_errno(req->msg.resp_status); ++ memcpy(resp->additional_info, &req->msg.resp_data, HNS3_MBX_MAX_RESP_DATA_SIZE); - msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2]; - hns3_update_resp_position(hw, msg_data); @@ -39530,11 +49071,162 @@ index 8e0a58aa02..f1743c195e 100644 } static void +@@ -351,11 +284,8 @@ static void + hns3pf_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_vf_to_pf_cmd *req) + { +-#define LINK_STATUS_OFFSET 1 +-#define LINK_FAIL_CODE_OFFSET 2 +- +- if (!req->msg[LINK_STATUS_OFFSET]) +- hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]); ++ if (!req->msg.link_status) ++ hns3_link_fail_parse(hw, req->msg.link_fail_code); + + hns3_update_linkstatus_and_event(hw, true); + } +@@ -364,8 +294,7 @@ static void + hns3_update_port_base_vlan_info(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { +-#define PVID_STATE_OFFSET 1 +- uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ? ++ uint16_t new_pvid_state = req->msg.pvid_state ? + HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; + /* + * Currently, hardware doesn't support more than two layers VLAN offload +@@ -414,7 +343,7 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) + while (next_to_use != tail) { + desc = &crq->desc[next_to_use]; + req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; +- opcode = req->msg[0] & 0xff; ++ opcode = req->msg.code & 0xff; + + flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag); + if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B)) +@@ -443,9 +372,57 @@ scan_next: + } + + void +-hns3_dev_handle_mbx_msg(struct hns3_hw *hw) ++hns3pf_handle_mbx_msg(struct hns3_hw *hw) ++{ ++ struct hns3_cmq_ring *crq = &hw->cmq.crq; ++ struct hns3_mbx_vf_to_pf_cmd *req; ++ struct hns3_cmd_desc *desc; ++ uint16_t flag; ++ ++ rte_spinlock_lock(&hw->cmq.crq.lock); ++ ++ while (!hns3_cmd_crq_empty(hw)) { ++ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { ++ rte_spinlock_unlock(&hw->cmq.crq.lock); ++ return; ++ } ++ desc = &crq->desc[crq->next_to_use]; ++ req = (struct hns3_mbx_vf_to_pf_cmd *)desc->data; ++ ++ flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); ++ if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { ++ hns3_warn(hw, ++ "dropped invalid mailbox message, code = %u", ++ req->msg.code); ++ ++ /* dropping/not processing this invalid message */ ++ crq->desc[crq->next_to_use].flag = 0; ++ hns3_mbx_ring_ptr_move_crq(crq); ++ continue; ++ } ++ ++ switch (req->msg.code) { ++ case HNS3_MBX_PUSH_LINK_STATUS: ++ hns3pf_handle_link_change_event(hw, req); ++ break; ++ default: ++ hns3_err(hw, "received unsupported(%u) mbx msg", ++ req->msg.code); ++ break; ++ } ++ crq->desc[crq->next_to_use].flag = 0; ++ hns3_mbx_ring_ptr_move_crq(crq); ++ } ++ ++ /* Write back CMDQ_RQ header pointer, IMP need this pointer */ ++ hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); ++ ++ rte_spinlock_unlock(&hw->cmq.crq.lock); ++} ++ ++void ++hns3vf_handle_mbx_msg(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_cmq_ring *crq = &hw->cmq.crq; + struct hns3_mbx_pf_to_vf_cmd *req; + struct hns3_cmd_desc *desc; +@@ -456,7 +433,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + rte_spinlock_lock(&hw->cmq.crq.lock); + + handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY || +- !rte_thread_is_intr()) && hns->is_vf; ++ !rte_thread_is_intr()); + if (handle_out) { + /* + * Currently, any threads in the primary and secondary processes +@@ -487,7 +464,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + + desc = &crq->desc[crq->next_to_use]; + req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; +- opcode = req->msg[0] & 0xff; ++ opcode = req->msg.code & 0xff; + + flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); + if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { +@@ -501,8 +478,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + continue; + } + +- handle_out = hns->is_vf && desc->opcode == 0; +- if (handle_out) { ++ if (desc->opcode == 0) { + /* Message already processed by other thread */ + crq->desc[crq->next_to_use].flag = 0; + hns3_mbx_ring_ptr_move_crq(crq); +@@ -519,16 +495,6 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + case HNS3_MBX_ASSERTING_RESET: + hns3_handle_asserting_reset(hw, req); + break; +- case HNS3_MBX_PUSH_LINK_STATUS: +- /* +- * This message is reported by the firmware and is +- * reported in 'struct hns3_mbx_vf_to_pf_cmd' format. +- * Therefore, we should cast the req variable to +- * 'struct hns3_mbx_vf_to_pf_cmd' and then process it. +- */ +- hns3pf_handle_link_change_event(hw, +- (struct hns3_mbx_vf_to_pf_cmd *)req); +- break; + case HNS3_MBX_PUSH_VLAN_INFO: + /* + * When the PVID configuration status of VF device is +@@ -543,7 +509,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + * hns3 PF kernel driver, VF driver will receive this + * mailbox message from PF driver. + */ +- hns3_handle_promisc_info(hw, req->msg[1]); ++ hns3_handle_promisc_info(hw, req->msg.promisc_en); + break; + default: + hns3_err(hw, "received unsupported(%u) mbx msg", diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h -index c378783c6c..4a328802b9 100644 +index c378783c6c..2b6cb8f513 100644 --- a/dpdk/drivers/net/hns3/hns3_mbx.h +++ b/dpdk/drivers/net/hns3/hns3_mbx.h -@@ -93,21 +93,11 @@ enum hns3_mbx_link_fail_subcode { +@@ -89,25 +89,14 @@ enum hns3_mbx_link_fail_subcode { + HNS3_MBX_LF_XSFP_ABSENT, + }; + +-#define HNS3_MBX_MAX_MSG_SIZE 16 #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 #define HNS3_MBX_DEF_TIME_LIMIT_MS 500 @@ -39556,6 +49248,123 @@ index c378783c6c..4a328802b9 100644 /* The following fields used in the matching scheme for match_id */ uint16_t match_id; +@@ -117,6 +106,69 @@ struct hns3_mbx_resp_status { + uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE]; + }; + ++struct hns3_ring_chain_param { ++ uint8_t ring_type; ++ uint8_t tqp_index; ++ uint8_t int_gl_index; ++}; ++ ++struct hns3_mbx_vlan_filter { ++ uint8_t is_kill; ++ uint16_t vlan_id; ++ uint16_t proto; ++} __rte_packed; ++ ++struct hns3_mbx_link_status { ++ uint16_t link_status; ++ uint32_t speed; ++ uint16_t duplex; ++ uint8_t flag; ++} __rte_packed; ++ ++#define HNS3_MBX_MSG_MAX_DATA_SIZE 14 ++#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 ++struct hns3_vf_to_pf_msg { ++ uint8_t code; ++ union { ++ struct { ++ uint8_t subcode; ++ uint8_t data[HNS3_MBX_MSG_MAX_DATA_SIZE]; ++ }; ++ struct { ++ uint8_t en_bc; ++ uint8_t en_uc; ++ uint8_t en_mc; ++ uint8_t en_limit_promisc; ++ }; ++ struct { ++ uint8_t vector_id; ++ uint8_t ring_num; ++ struct hns3_ring_chain_param ++ ring_param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; ++ }; ++ struct { ++ uint8_t link_status; ++ uint8_t link_fail_code; ++ }; ++ }; ++}; ++ ++struct hns3_pf_to_vf_msg { ++ uint16_t code; ++ union { ++ struct { ++ uint16_t vf_mbx_msg_code; ++ uint16_t vf_mbx_msg_subcode; ++ uint16_t resp_status; ++ uint8_t resp_data[HNS3_MBX_MAX_RESP_DATA_SIZE]; ++ }; ++ uint16_t promisc_en; ++ uint16_t reset_level; ++ uint16_t pvid_state; ++ uint8_t msg_data[HNS3_MBX_MSG_MAX_DATA_SIZE]; ++ }; ++}; ++ + struct errno_respcode_map { + uint16_t resp_code; + int err_no; +@@ -132,7 +184,7 @@ struct hns3_mbx_vf_to_pf_cmd { + uint8_t msg_len; + uint8_t rsv2; + uint16_t match_id; +- uint8_t msg[HNS3_MBX_MAX_MSG_SIZE]; ++ struct hns3_vf_to_pf_msg msg; + }; + + struct hns3_mbx_pf_to_vf_cmd { +@@ -141,20 +193,7 @@ struct hns3_mbx_pf_to_vf_cmd { + uint8_t msg_len; + uint8_t rsv1; + uint16_t match_id; +- uint16_t msg[8]; +-}; +- +-struct hns3_ring_chain_param { +- uint8_t ring_type; +- uint8_t tqp_index; +- uint8_t int_gl_index; +-}; +- +-#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 +-struct hns3_vf_bind_vector_msg { +- uint8_t vector_id; +- uint8_t ring_num; +- struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; ++ struct hns3_pf_to_vf_msg msg; + }; + + struct hns3_pf_rst_done_cmd { +@@ -168,8 +207,11 @@ struct hns3_pf_rst_done_cmd { + ((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num) + + struct hns3_hw; +-void hns3_dev_handle_mbx_msg(struct hns3_hw *hw); +-int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, +- uint8_t *resp_data, uint16_t resp_len); ++void hns3pf_handle_mbx_msg(struct hns3_hw *hw); ++void hns3vf_handle_mbx_msg(struct hns3_hw *hw); ++void hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, ++ uint8_t code, uint8_t subcode); ++int hns3vf_mbx_send(struct hns3_hw *hw, ++ struct hns3_vf_to_pf_msg *req_msg, bool need_resp, ++ uint8_t *resp_data, uint16_t resp_len); + #endif /* HNS3_MBX_H */ diff --git a/dpdk/drivers/net/hns3/hns3_mp.c b/dpdk/drivers/net/hns3/hns3_mp.c index 7184f9ad58..556f1941c6 100644 --- a/dpdk/drivers/net/hns3/hns3_mp.c @@ -39794,7 +49603,7 @@ index 459bbaf773..6b037f81c1 100644 #define HNS3_GL_USEC_TO_REG(gl_usec) ((gl_usec) >> 1) /* rl_usec convert to hardware count, as writing each 1 represents 4us */ diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c -index ca5a129234..eeeca71a5c 100644 +index ca5a129234..ac88ca04ce 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.c +++ b/dpdk/drivers/net/hns3/hns3_rss.c @@ -18,56 +18,11 @@ const uint8_t hns3_hash_key[HNS3_RSS_KEY_SIZE] = { @@ -39859,7 +49668,7 @@ index ca5a129234..eeeca71a5c 100644 }; enum hns3_rss_tuple_type { -@@ -79,243 +34,285 @@ static const struct { +@@ -79,243 +34,283 @@ static const struct { uint64_t rss_types; uint16_t tuple_type; uint64_t rss_field; @@ -40005,9 +49814,9 @@ index ca5a129234..eeeca71a5c 100644 BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) }, -+ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER), ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D), + HNS3_RSS_TUPLE_IPV4_SCTP_M }, /* IPV6-FRAG */ @@ -40150,9 +49959,9 @@ index ca5a129234..eeeca71a5c 100644 BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) | BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) }, -+ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER), ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S), + HNS3_RSS_TUPLE_IPV6_SCTP_M }, }; @@ -40218,7 +50027,7 @@ index ca5a129234..eeeca71a5c 100644 ret = hns3_cmd_send(hw, &desc, 1); if (ret) { -@@ -323,8 +320,49 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) +@@ -323,8 +318,49 @@ hns3_rss_set_algo_key(struct hns3_hw *hw, const uint8_t *key) return ret; } } @@ -40270,7 +50079,7 @@ index ca5a129234..eeeca71a5c 100644 return 0; } -@@ -336,6 +374,7 @@ int +@@ -336,6 +372,7 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) { struct hns3_rss_indirection_table_cmd *req; @@ -40278,7 +50087,7 @@ index ca5a129234..eeeca71a5c 100644 struct hns3_cmd_desc desc; uint8_t qid_msb_off; uint8_t qid_msb_val; -@@ -344,14 +383,20 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) +@@ -344,14 +381,20 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) int ret; req = (struct hns3_rss_indirection_table_cmd *)desc.data; @@ -40302,7 +50111,7 @@ index ca5a129234..eeeca71a5c 100644 q_id = indir[i * HNS3_RSS_CFG_TBL_SIZE + j]; req->rss_result_l[j] = q_id & 0xff; -@@ -372,9 +417,53 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) +@@ -372,9 +415,53 @@ hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, uint16_t size) } } @@ -40359,7 +50168,7 @@ index ca5a129234..eeeca71a5c 100644 return 0; } -@@ -393,41 +482,72 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) +@@ -393,41 +480,72 @@ hns3_rss_reset_indir_table(struct hns3_hw *hw) } ret = hns3_set_rss_indir_table(hw, lut, hw->rss_ind_tbl_size); @@ -40455,7 +50264,7 @@ index ca5a129234..eeeca71a5c 100644 { uint64_t l3_only_mask = RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY; -@@ -456,34 +576,40 @@ hns3_rss_calc_tuple_filed(struct hns3_hw *hw, uint64_t rss_hf) +@@ -456,34 +574,40 @@ hns3_rss_calc_tuple_filed(struct hns3_hw *hw, uint64_t rss_hf) !has_l3_l4_only) tuple |= hns3_set_tuple_table[i].rss_field; } @@ -40509,7 +50318,7 @@ index ca5a129234..eeeca71a5c 100644 } /* -@@ -500,28 +626,35 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev, +@@ -500,28 +624,35 @@ hns3_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -40549,7 +50358,7 @@ index ca5a129234..eeeca71a5c 100644 rte_spinlock_unlock(&hw->lock); return 0; -@@ -533,6 +666,96 @@ set_tuple_fail: +@@ -533,6 +664,96 @@ set_tuple_fail: return ret; } @@ -40646,7 +50455,7 @@ index ca5a129234..eeeca71a5c 100644 /* * Get rss key and rss_hf types set of RSS hash configuration. * @param dev -@@ -548,19 +771,32 @@ hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, +@@ -548,19 +769,32 @@ hns3_dev_rss_hash_conf_get(struct rte_eth_dev *dev, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; @@ -40685,7 +50494,7 @@ index ca5a129234..eeeca71a5c 100644 } /* -@@ -600,12 +836,12 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -600,12 +834,12 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, idx = i / RTE_ETH_RETA_GROUP_SIZE; shift = i % RTE_ETH_RETA_GROUP_SIZE; if (reta_conf[idx].reta[shift] >= hw->alloc_rss_size) { @@ -40700,7 +50509,7 @@ index ca5a129234..eeeca71a5c 100644 } if (reta_conf[idx].mask & (1ULL << shift)) -@@ -614,7 +850,13 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, +@@ -614,7 +848,13 @@ hns3_dev_rss_reta_update(struct rte_eth_dev *dev, ret = hns3_set_rss_indir_table(hw, indirection_tbl, hw->rss_ind_tbl_size); @@ -40714,7 +50523,7 @@ index ca5a129234..eeeca71a5c 100644 rte_spinlock_unlock(&hw->lock); return ret; } -@@ -636,10 +878,11 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, +@@ -636,10 +876,11 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size) { struct hns3_adapter *hns = dev->data->dev_private; @@ -40727,7 +50536,7 @@ index ca5a129234..eeeca71a5c 100644 if (reta_size != hw->rss_ind_tbl_size) { hns3_err(hw, "The size of hash lookup table configured (%u)" -@@ -648,14 +891,22 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, +@@ -648,14 +889,22 @@ hns3_dev_rss_reta_query(struct rte_eth_dev *dev, return -EINVAL; } rte_spinlock_lock(&hw->lock); @@ -40753,7 +50562,7 @@ index ca5a129234..eeeca71a5c 100644 return 0; } -@@ -733,6 +984,52 @@ hns3_set_rss_tc_mode(struct hns3_hw *hw) +@@ -733,6 +982,52 @@ hns3_set_rss_tc_mode(struct hns3_hw *hw) return ret; } @@ -40806,7 +50615,7 @@ index ca5a129234..eeeca71a5c 100644 static void hns3_rss_tuple_uninit(struct hns3_hw *hw) { -@@ -759,10 +1056,11 @@ hns3_rss_set_default_args(struct hns3_hw *hw) +@@ -759,10 +1054,11 @@ hns3_rss_set_default_args(struct hns3_hw *hw) uint16_t i; /* Default hash algorithm */ @@ -40821,7 +50630,7 @@ index ca5a129234..eeeca71a5c 100644 /* Initialize RSS indirection table */ for (i = 0; i < hw->rss_ind_tbl_size; i++) -@@ -783,20 +1081,8 @@ hns3_config_rss(struct hns3_adapter *hns) +@@ -783,20 +1079,8 @@ hns3_config_rss(struct hns3_adapter *hns) enum rte_eth_rx_mq_mode mq_mode = hw->data->dev_conf.rxmode.mq_mode; @@ -40844,7 +50653,7 @@ index ca5a129234..eeeca71a5c 100644 if (ret) return ret; -@@ -810,15 +1096,22 @@ hns3_config_rss(struct hns3_adapter *hns) +@@ -810,15 +1094,22 @@ hns3_config_rss(struct hns3_adapter *hns) return ret; /* @@ -40870,7 +50679,7 @@ index ca5a129234..eeeca71a5c 100644 } /* -@@ -836,5 +1129,5 @@ hns3_rss_uninit(struct hns3_adapter *hns) +@@ -836,5 +1127,5 @@ hns3_rss_uninit(struct hns3_adapter *hns) return; /* Disable RSS */ @@ -40878,10 +50687,10 @@ index ca5a129234..eeeca71a5c 100644 + hw->rss_info.rss_hf = 0; } diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h -index 8e8b056f4e..9d182a8025 100644 +index 8e8b056f4e..0755760b45 100644 --- a/dpdk/drivers/net/hns3/hns3_rss.h +++ b/dpdk/drivers/net/hns3/hns3_rss.h -@@ -8,27 +8,107 @@ +@@ -8,27 +8,105 @@ #include #include @@ -40943,7 +50752,6 @@ index 8e8b056f4e..9d182a8025 100644 + HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, + HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, + HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, -+ HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, + + /* IPV4 ENABLE FIELD */ + HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, @@ -40968,7 +50776,6 @@ index 8e8b056f4e..9d182a8025 100644 + HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, + HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, + HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, -+ HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, + + /* IPV6 ENABLE FIELD */ + HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, @@ -40990,12 +50797,12 @@ index 8e8b056f4e..9d182a8025 100644 + +#define HNS3_RSS_TUPLE_IPV4_TCP_M GENMASK(3, 0) +#define HNS3_RSS_TUPLE_IPV4_UDP_M GENMASK(11, 8) -+#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(20, 16) ++#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(19, 16) +#define HNS3_RSS_TUPLE_IPV4_NONF_M GENMASK(25, 24) +#define HNS3_RSS_TUPLE_IPV4_FLAG_M GENMASK(27, 26) +#define HNS3_RSS_TUPLE_IPV6_TCP_M GENMASK(35, 32) +#define HNS3_RSS_TUPLE_IPV6_UDP_M GENMASK(43, 40) -+#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(52, 48) ++#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(51, 48) +#define HNS3_RSS_TUPLE_IPV6_NONF_M GENMASK(57, 56) +#define HNS3_RSS_TUPLE_IPV6_FLAG_M GENMASK(59, 58) @@ -41006,7 +50813,7 @@ index 8e8b056f4e..9d182a8025 100644 #define HNS3_RSS_SET_BITMAP_MSK 0xffff #define HNS3_RSS_HASH_ALGO_TOEPLITZ 0 -@@ -36,15 +116,13 @@ +@@ -36,15 +114,13 @@ #define HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP 2 #define HNS3_RSS_HASH_ALGO_MASK 0xf @@ -41026,7 +50833,7 @@ index 8e8b056f4e..9d182a8025 100644 /* * For IPv6 SCTP packets type, check whether the NIC hardware support * RSS hash using the src/dst port as the input tuple. For Kunpeng920 -@@ -108,7 +186,12 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, +@@ -108,7 +184,12 @@ int hns3_set_rss_indir_table(struct hns3_hw *hw, uint16_t *indir, int hns3_rss_reset_indir_table(struct hns3_hw *hw); int hns3_config_rss(struct hns3_adapter *hns); void hns3_rss_uninit(struct hns3_adapter *hns); @@ -41041,7 +50848,7 @@ index 8e8b056f4e..9d182a8025 100644 #endif /* HNS3_RSS_H */ diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c -index f1163ce8a9..9d473dbc22 100644 +index f1163ce8a9..16cb174f4d 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx.c +++ b/dpdk/drivers/net/hns3/hns3_rxtx.c @@ -50,6 +50,8 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) @@ -41053,7 +50860,87 @@ index f1163ce8a9..9d473dbc22 100644 } for (i = 0; i < rxq->bulk_mbuf_num; i++) -@@ -584,7 +586,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) +@@ -84,9 +86,14 @@ hns3_rx_queue_release(void *queue) + struct hns3_rx_queue *rxq = queue; + if (rxq) { + hns3_rx_queue_release_mbufs(rxq); +- if (rxq->mz) ++ if (rxq->mz) { + rte_memzone_free(rxq->mz); +- rte_free(rxq->sw_ring); ++ rxq->mz = NULL; ++ } ++ if (rxq->sw_ring) { ++ rte_free(rxq->sw_ring); ++ rxq->sw_ring = NULL; ++ } + rte_free(rxq); + } + } +@@ -97,10 +104,18 @@ hns3_tx_queue_release(void *queue) + struct hns3_tx_queue *txq = queue; + if (txq) { + hns3_tx_queue_release_mbufs(txq); +- if (txq->mz) ++ if (txq->mz) { + rte_memzone_free(txq->mz); +- rte_free(txq->sw_ring); +- rte_free(txq->free); ++ txq->mz = NULL; ++ } ++ if (txq->sw_ring) { ++ rte_free(txq->sw_ring); ++ txq->sw_ring = NULL; ++ } ++ if (txq->free) { ++ rte_free(txq->free); ++ txq->free = NULL; ++ } + rte_free(txq); + } + } +@@ -258,12 +273,27 @@ hns3_free_all_queues(struct rte_eth_dev *dev) + hns3_free_tx_queues(dev); + } + ++static int ++hns3_check_rx_dma_addr(struct hns3_hw *hw, uint64_t dma_addr) ++{ ++ uint64_t rem; ++ ++ rem = dma_addr & (hw->rx_dma_addr_align - 1); ++ if (rem > 0) { ++ hns3_err(hw, "The IO address of the beginning of the mbuf data " ++ "must be %u-byte aligned", hw->rx_dma_addr_align); ++ return -EINVAL; ++ } ++ return 0; ++} ++ + static int + hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) + { + struct rte_mbuf *mbuf; + uint64_t dma_addr; + uint16_t i; ++ int ret; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); +@@ -284,6 +314,12 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxq->rx_ring[i].addr = dma_addr; + rxq->rx_ring[i].rx.bd_base_info = 0; ++ ++ ret = hns3_check_rx_dma_addr(hw, dma_addr); ++ if (ret != 0) { ++ hns3_rx_queue_release_mbufs(rxq); ++ return ret; ++ } + } + + return 0; +@@ -584,7 +620,7 @@ hns3_tqp_enable(struct hns3_hw *hw, uint16_t queue_id, bool enable) ret = hns3_cmd_send(hw, &desc, 1); if (ret) @@ -41062,7 +50949,45 @@ index f1163ce8a9..9d473dbc22 100644 return ret; } -@@ -1637,7 +1639,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, +@@ -684,13 +720,12 @@ tqp_reset_fail: + static int + hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) + { +- uint8_t msg_data[2]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- memcpy(msg_data, &queue_id, sizeof(uint16_t)); +- +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, +- sizeof(msg_data), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); ++ memcpy(req.data, &queue_id, sizeof(uint16_t)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", + queue_id, ret); +@@ -767,15 +802,14 @@ static int + hns3vf_reset_all_tqps(struct hns3_hw *hw) + { + #define HNS3VF_RESET_ALL_TQP_DONE 1U ++ struct hns3_vf_to_pf_msg req; + uint8_t reset_status; +- uint8_t msg_data[2]; + int ret; + uint16_t i; + +- memset(msg_data, 0, sizeof(msg_data)); +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, +- sizeof(msg_data), true, &reset_status, +- sizeof(reset_status)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, ++ &reset_status, sizeof(reset_status)); + if (ret) { + hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret); + return ret; +@@ -1637,7 +1671,7 @@ hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, ret = hns3_fake_tx_queue_config(hw, tx_need_add_nb_q); if (ret) { @@ -41071,7 +50996,7 @@ index f1163ce8a9..9d473dbc22 100644 goto cfg_fake_tx_q_fail; } -@@ -1787,6 +1789,12 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, +@@ -1787,6 +1821,12 @@ hns3_rx_queue_conf_check(struct hns3_hw *hw, const struct rte_eth_rxconf *conf, return -EINVAL; } @@ -41084,7 +51009,34 @@ index f1163ce8a9..9d473dbc22 100644 if (conf->rx_drop_en == 0) hns3_warn(hw, "if no descriptors available, packets are always " "dropped and rx_drop_en (1) is fixed on"); -@@ -2786,6 +2794,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, +@@ -2386,8 +2426,7 @@ hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf, + { + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns); + +- mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | +- RTE_MBUF_F_RX_IEEE1588_TMST; ++ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST; + if (hns3_timestamp_rx_dynflag > 0) { + *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = timestamp; +@@ -2665,6 +2704,7 @@ hns3_recv_scattered_pkts(void *rx_queue, + continue; + } + ++ first_seg->ol_flags = 0; + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) + hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp); + +@@ -2694,7 +2734,7 @@ hns3_recv_scattered_pkts(void *rx_queue, + + first_seg->port = rxq->port_id; + first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); +- first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH; ++ first_seg->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; + if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { + first_seg->hash.fdir.hi = + rte_le_to_cpu_16(rxd.rx.fd_id); +@@ -2786,6 +2826,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, { hns3_recv_scattered_pkts, "Scalar Scattered" }, { hns3_recv_pkts_vec, "Vector Neon" }, { hns3_recv_pkts_vec_sve, "Vector Sve" }, @@ -41092,7 +51044,7 @@ index f1163ce8a9..9d473dbc22 100644 }; eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; -@@ -3115,6 +3124,9 @@ hns3_config_gro(struct hns3_hw *hw, bool en) +@@ -3115,6 +3156,9 @@ hns3_config_gro(struct hns3_hw *hw, bool en) struct hns3_cmd_desc desc; int ret; @@ -41102,7 +51054,121 @@ index f1163ce8a9..9d473dbc22 100644 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GRO_GENERIC_CONFIG, false); req = (struct hns3_cfg_gro_status_cmd *)desc.data; -@@ -4272,24 +4284,31 @@ int +@@ -3604,58 +3648,6 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, + return false; + } + +-static bool +-hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +- uint32_t *l4_proto) +-{ +- struct rte_ipv4_hdr *ipv4_hdr; +- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, +- m->outer_l2_len); +- if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) +- ipv4_hdr->hdr_checksum = 0; +- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { +- struct rte_udp_hdr *udp_hdr; +- /* +- * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo +- * header for TSO packets +- */ +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) +- return true; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + m->outer_l3_len); +- udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +- +- return true; +- } +- *l4_proto = ipv4_hdr->next_proto_id; +- return false; +-} +- +-static bool +-hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +- uint32_t *l4_proto) +-{ +- struct rte_ipv6_hdr *ipv6_hdr; +- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, +- m->outer_l2_len); +- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { +- struct rte_udp_hdr *udp_hdr; +- /* +- * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo +- * header for TSO packets +- */ +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) +- return true; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + m->outer_l3_len); +- udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +- +- return true; +- } +- *l4_proto = ipv6_hdr->proto; +- return false; +-} +- + static void + hns3_outer_header_cksum_prepare(struct rte_mbuf *m) + { +@@ -3663,29 +3655,38 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m) + uint32_t paylen, hdr_len, l4_proto; + struct rte_udp_hdr *udp_hdr; + +- if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6))) ++ if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) && ++ ((ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) || ++ !(ol_flags & RTE_MBUF_F_TX_TCP_SEG))) + return; + + if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { +- if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto)) +- return; ++ struct rte_ipv4_hdr *ipv4_hdr; ++ ++ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, ++ m->outer_l2_len); ++ l4_proto = ipv4_hdr->next_proto_id; + } else { +- if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto)) +- return; ++ struct rte_ipv6_hdr *ipv6_hdr; ++ ++ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, ++ m->outer_l2_len); ++ l4_proto = ipv6_hdr->proto; + } + ++ if (l4_proto != IPPROTO_UDP) ++ return; ++ + /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ +- if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { +- hdr_len = m->l2_len + m->l3_len + m->l4_len; +- hdr_len += m->outer_l2_len + m->outer_l3_len; +- paylen = m->pkt_len - hdr_len; +- if (paylen <= m->tso_segsz) +- return; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + +- m->outer_l3_len); +- udp_hdr->dgram_cksum = 0; +- } ++ hdr_len = m->l2_len + m->l3_len + m->l4_len; ++ hdr_len += m->outer_l2_len + m->outer_l3_len; ++ paylen = m->pkt_len - hdr_len; ++ if (paylen <= m->tso_segsz) ++ return; ++ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, ++ m->outer_l2_len + ++ m->outer_l3_len); ++ udp_hdr->dgram_cksum = 0; + } + + static int +@@ -4272,24 +4273,31 @@ int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) { @@ -41149,7 +51215,7 @@ index f1163ce8a9..9d473dbc22 100644 } static bool -@@ -4303,11 +4322,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) +@@ -4303,11 +4311,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) static bool hns3_get_tx_prep_needed(struct rte_eth_dev *dev) { @@ -41161,7 +51227,7 @@ index f1163ce8a9..9d473dbc22 100644 #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ -@@ -4321,27 +4335,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) +@@ -4321,27 +4324,30 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO) uint64_t tx_offload = dev->data->dev_conf.txmode.offloads; @@ -41199,7 +51265,7 @@ index f1163ce8a9..9d473dbc22 100644 if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) return hns3_xmit_pkts_vec; -@@ -4349,19 +4366,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) +@@ -4349,19 +4355,14 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) return hns3_xmit_pkts_vec_sve; if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed) return hns3_xmit_pkts_simple; @@ -41220,7 +51286,7 @@ index f1163ce8a9..9d473dbc22 100644 return hns3_xmit_pkts; } -@@ -4401,7 +4413,6 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +@@ -4401,7 +4402,6 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct hns3_adapter *hns = eth_dev->data->dev_private; @@ -41228,7 +51294,7 @@ index f1163ce8a9..9d473dbc22 100644 if (hns->hw.adapter_state == HNS3_NIC_STARTED && __atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) { -@@ -4409,16 +4420,16 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +@@ -4409,16 +4409,16 @@ hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; eth_dev->tx_pkt_burst = hw->set_link_down ? rte_eth_pkt_burst_dummy : @@ -41248,7 +51314,7 @@ index f1163ce8a9..9d473dbc22 100644 hns3_eth_dev_fp_ops_config(eth_dev); } -@@ -4469,6 +4480,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4469,6 +4469,13 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -41262,7 +51328,7 @@ index f1163ce8a9..9d473dbc22 100644 ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX); if (ret) { hns3_err(hw, "fail to reset Rx queue %u, ret = %d.", -@@ -4477,6 +4495,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4477,6 +4484,9 @@ hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) return ret; } @@ -41272,7 +51338,7 @@ index f1163ce8a9..9d473dbc22 100644 ret = hns3_init_rxq(hns, rx_queue_id); if (ret) { hns3_err(hw, "fail to init Rx queue %u, ret = %d.", -@@ -4515,6 +4536,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +@@ -4515,6 +4525,13 @@ hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -41286,7 +51352,7 @@ index f1163ce8a9..9d473dbc22 100644 hns3_enable_rxq(rxq, false); hns3_rx_queue_release_mbufs(rxq); -@@ -4537,6 +4565,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4537,6 +4554,13 @@ hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -41300,7 +51366,7 @@ index f1163ce8a9..9d473dbc22 100644 ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX); if (ret) { hns3_err(hw, "fail to reset Tx queue %u, ret = %d.", -@@ -4563,6 +4598,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -4563,6 +4587,13 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -ENOTSUP; rte_spinlock_lock(&hw->lock); @@ -41314,7 +51380,7 @@ index f1163ce8a9..9d473dbc22 100644 hns3_enable_txq(txq, false); hns3_tx_queue_release_mbufs(txq); /* -@@ -4756,10 +4798,8 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) +@@ -4756,10 +4787,8 @@ hns3_stop_tx_datapath(struct rte_eth_dev *dev) void hns3_start_tx_datapath(struct rte_eth_dev *dev) { @@ -41327,7 +51393,7 @@ index f1163ce8a9..9d473dbc22 100644 hns3_eth_dev_fp_ops_config(dev); if (rte_eal_process_type() == RTE_PROC_SECONDARY) -@@ -4767,3 +4807,31 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) +@@ -4767,3 +4796,31 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) hns3_mp_req_start_tx(dev); } @@ -41798,7 +51864,7 @@ index e1089b6bd0..d969164014 100644 .node_shaper_update = hns3_tm_node_shaper_update_wrap, }; diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c -index 7726a89d99..0ff334745d 100644 +index 7726a89d99..2049c32c4e 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev.c +++ b/dpdk/drivers/net/i40e/i40e_ethdev.c @@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev *dev, @@ -41877,7 +51943,21 @@ index 7726a89d99..0ff334745d 100644 uint32_t rep_cnt = MAX_REPEAT_TIME; struct i40e_link_status link_status; int status; -@@ -5993,14 +6006,16 @@ i40e_vsi_setup(struct i40e_pf *pf, +@@ -3708,8 +3721,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | + dev_info->tx_queue_offload_capa; ++ if (hw->mac.type == I40E_MAC_X722) { ++ dev_info->tx_offload_capa |= ++ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ } ++ + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +@@ -5993,14 +6010,16 @@ i40e_vsi_setup(struct i40e_pf *pf, } } @@ -41901,7 +51981,7 @@ index 7726a89d99..0ff334745d 100644 } /* Get VSI BW information */ -@@ -6738,7 +6753,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) +@@ -6738,7 +6757,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) if (!ret) rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); @@ -41909,7 +51989,7 @@ index 7726a89d99..0ff334745d 100644 break; default: PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", -@@ -12123,40 +12137,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) +@@ -12123,40 +12141,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) return ret; } @@ -41974,7 +52054,7 @@ index fe943a45ff..9b806d130e 100644 if (is_pf) interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c -index 65a826d51c..67df77890a 100644 +index 65a826d51c..882152bd4a 100644 --- a/dpdk/drivers/net/i40e/i40e_flow.c +++ b/dpdk/drivers/net/i40e/i40e_flow.c @@ -1236,6 +1236,14 @@ i40e_flow_parse_attr(const struct rte_flow_attr *attr, @@ -41992,11 +52072,37 @@ index 65a826d51c..67df77890a 100644 /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, +@@ -1700,8 +1708,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + + ether_type = rte_be_to_cpu_16(eth_spec->type); + +- if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || +- ether_type == RTE_ETHER_TYPE_IPV4 || ++ if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6 || + ether_type == i40e_get_outer_vlan(dev)) { + rte_flow_error_set(error, EINVAL, diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c -index 788ffb51c2..d96bbbb677 100644 +index 788ffb51c2..6522f2b810 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx.c -@@ -304,10 +304,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, +@@ -295,6 +295,15 @@ i40e_parse_tunneling_params(uint64_t ol_flags, + */ + *cd_tunneling |= (tx_offload.l2_len >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; ++ ++ /** ++ * Calculate the tunneling UDP checksum (only supported with X722). ++ * Shall be set only if L4TUNT = 01b and EIPT is not zero ++ */ ++ if ((*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK) && ++ (*cd_tunneling & I40E_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) ++ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } + + static inline void +@@ -304,10 +313,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags, union i40e_tx_offload tx_offload) { /* Set MACLEN */ @@ -42008,7 +52114,7 @@ index 788ffb51c2..d96bbbb677 100644 *td_offset |= (tx_offload.l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; -@@ -1171,9 +1168,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -1171,9 +1177,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Fill in tunneling parameters if necessary */ cd_tunneling_params = 0; @@ -42022,7 +52128,7 @@ index 788ffb51c2..d96bbbb677 100644 /* Enable checksum offloading */ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) i40e_txd_enable_checksum(ol_flags, &td_cmd, -@@ -1918,6 +1918,12 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, +@@ -1918,6 +1927,12 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, if (use_def_burst_func) ad->rx_bulk_alloc_allowed = false; i40e_set_rx_function(dev); @@ -42035,7 +52141,7 @@ index 788ffb51c2..d96bbbb677 100644 return 0; } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) { PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor" -@@ -2904,6 +2910,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) +@@ -2904,6 +2919,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) rxq->rx_hdr_len = 0; rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -42080,6 +52186,81 @@ index 2dfa04599c..da4a1bc03b 100644 */ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +index 761edb9d20..60baff7970 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +@@ -276,46 +276,30 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- do { +- const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); +- +- raw_desc6_7 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); +- raw_desc4_5 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); +- raw_desc2_3 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); +- raw_desc0_1 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); +- } while (0); ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++ ++ const __m256i raw_desc6_7 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c index 60c97d5331..74ff54c653 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c @@ -42164,7 +52345,7 @@ index 1edebab8dc..aa18650ffa 100644 struct iavf_parser_list dist_parser_list; struct iavf_parser_list ipsec_crypto_parser_list; diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c -index 3196210f2c..a12ea39444 100644 +index 3196210f2c..b6c3cd425d 100644 --- a/dpdk/drivers/net/iavf/iavf_ethdev.c +++ b/dpdk/drivers/net/iavf/iavf_ethdev.c @@ -131,6 +131,8 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, @@ -42176,7 +52357,28 @@ index 3196210f2c..a12ea39444 100644 static int iavf_dev_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops); static int iavf_set_mc_addr_list(struct rte_eth_dev *dev, -@@ -1065,6 +1067,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -607,7 +609,8 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev) + RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK); + if (err) { +- PMD_DRV_LOG(ERR, "Failed to update vlan offload"); ++ PMD_DRV_LOG(INFO, ++ "VLAN offloading is not supported, or offloading was refused by the PF"); + return err; + } + +@@ -683,9 +686,7 @@ iavf_dev_configure(struct rte_eth_dev *dev) + vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; + } + +- ret = iavf_dev_init_vlan(dev); +- if (ret) +- PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret); ++ iavf_dev_init_vlan(dev); + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (iavf_init_rss(ad) != 0) { +@@ -1065,6 +1066,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -42186,7 +52388,7 @@ index 3196210f2c..a12ea39444 100644 if (adapter->closed) return -1; -@@ -1075,8 +1080,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1075,8 +1079,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) if (adapter->stopped == 1) return 0; @@ -42195,7 +52397,7 @@ index 3196210f2c..a12ea39444 100644 /* Disable the interrupt for Rx */ rte_intr_efd_disable(intr_handle); /* Rx interrupt vector mapping free */ -@@ -1089,6 +1092,8 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1089,6 +1091,8 @@ iavf_dev_stop(struct rte_eth_dev *dev) iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, false); @@ -42204,7 +52406,26 @@ index 3196210f2c..a12ea39444 100644 adapter->stopped = 1; dev->data->dev_started = 0; -@@ -1178,6 +1183,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1136,7 +1140,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | +@@ -1145,6 +1148,10 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + ++ /* X710 does not support outer udp checksum */ ++ if (adapter->hw.mac.type != IAVF_MAC_XL710) ++ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC) + dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; + +@@ -1178,6 +1185,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_max = IAVF_MAX_RING_DESC, .nb_min = IAVF_MIN_RING_DESC, .nb_align = IAVF_ALIGN_RING_DESC, @@ -42213,7 +52434,7 @@ index 3196210f2c..a12ea39444 100644 }; dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE; -@@ -1350,6 +1357,7 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -1350,6 +1359,7 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); @@ -42221,7 +52442,7 @@ index 3196210f2c..a12ea39444 100644 int err; if (adapter->closed) -@@ -1368,6 +1376,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -1368,6 +1378,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) err = iavf_add_del_vlan(adapter, vlan_id, on); if (err) return -EIO; @@ -42245,7 +52466,7 @@ index 3196210f2c..a12ea39444 100644 return 0; } -@@ -2607,6 +2632,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2607,6 +2634,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) adapter->dev_data = eth_dev->data; adapter->stopped = 1; @@ -42255,7 +52476,7 @@ index 3196210f2c..a12ea39444 100644 if (iavf_init_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Init vf failed"); return -1; -@@ -2634,8 +2662,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2634,8 +2664,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); @@ -42264,7 +52485,7 @@ index 3196210f2c..a12ea39444 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* register callback func to eal lib */ -@@ -2667,18 +2693,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2667,18 +2695,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) ret = iavf_security_ctx_create(adapter); if (ret) { PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); @@ -42286,7 +52507,7 @@ index 3196210f2c..a12ea39444 100644 /* Start device watchdog */ iavf_dev_watchdog_enable(adapter); -@@ -2686,7 +2713,23 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2686,7 +2715,23 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) return 0; @@ -42310,7 +52531,7 @@ index 3196210f2c..a12ea39444 100644 rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; -@@ -2732,6 +2775,18 @@ iavf_dev_close(struct rte_eth_dev *dev) +@@ -2732,6 +2777,18 @@ iavf_dev_close(struct rte_eth_dev *dev) if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) iavf_config_promisc(adapter, false, false); @@ -42362,7 +52583,7 @@ index ae6fb38594..cf4d677101 100644 uint8_t tmp_c = 0; int i, j; diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c -index 868921cac5..26459088af 100644 +index 868921cac5..8da41bb68e 100644 --- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c +++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c @@ -828,6 +828,7 @@ iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter, @@ -42373,6 +52594,19 @@ index 868921cac5..26459088af 100644 /* send virtual channel request to add SA to hardware database */ rc = iavf_ipsec_crypto_request(adapter, +@@ -1484,8 +1485,11 @@ iavf_security_ctx_create(struct iavf_adapter *adapter) + if (adapter->security_ctx == NULL) { + adapter->security_ctx = rte_malloc("iavf_security_ctx", + sizeof(struct iavf_security_ctx), 0); +- if (adapter->security_ctx == NULL) ++ if (adapter->security_ctx == NULL) { ++ rte_free(adapter->vf.eth_dev->security_ctx); ++ adapter->vf.eth_dev->security_ctx = NULL; + return -ENOMEM; ++ } + } + + return 0; diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.c b/dpdk/drivers/net/iavf/iavf_rxtx.c index cf87a6beda..6a0cf31a4c 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx.c @@ -42542,10 +52776,96 @@ index a6ad88885b..180f7ec108 100644 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK) diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c -index 862f6eb0c0..b4ebac9d34 100644 +index 862f6eb0c0..7bf22d5c0d 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c -@@ -1074,7 +1074,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, +@@ -192,62 +192,30 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- { +- const __m128i raw_desc7 = +- _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = +- _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = +- _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = +- _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = +- _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = +- _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = +- _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = +- _mm_load_si128((void *)(rxdp + 0)); ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); + +- raw_desc6_7 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc6), +- raw_desc7, 1); +- raw_desc4_5 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc4), +- raw_desc5, 1); +- raw_desc2_3 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc2), +- raw_desc3, 1); +- raw_desc0_1 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc0), +- raw_desc1, 1); +- } ++ const __m256i raw_desc6_7 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; +@@ -1074,7 +1042,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -43089,9 +53409,20 @@ index f92daf97f2..aeffb07cca 100644 PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME"); diff --git a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h -index 5a817982b4..93a3a6007f 100644 +index 5a817982b4..534649802f 100644 --- a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +++ b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +@@ -1702,8 +1702,8 @@ struct ice_aqc_link_topo_addr { + #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) + /* Used to decode the handle field */ + #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) +-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) +-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 ++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM 0 ++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ BIT(9) + #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 + /* In case of a Mezzanine type */ + #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ @@ -2002,8 +2002,8 @@ struct ice_aqc_lldp_get_mib { #define ICE_AQ_LLDP_DCBX_S 6 #define ICE_AQ_LLDP_DCBX_M (0x3 << ICE_AQ_LLDP_DCBX_S) @@ -43103,8 +53434,25 @@ index 5a817982b4..93a3a6007f 100644 /* The following bytes are reserved for the Get LLDP MIB command (0x0A00) * and in the LLDP MIB Change Event (0x0A01). They are valid for the * Get LLDP MIB (0x0A00) response only. +diff --git a/dpdk/drivers/net/ice/base/ice_bitops.h b/dpdk/drivers/net/ice/base/ice_bitops.h +index c4ae2b9c8e..81f9dadc07 100644 +--- a/dpdk/drivers/net/ice/base/ice_bitops.h ++++ b/dpdk/drivers/net/ice/base/ice_bitops.h +@@ -411,10 +411,10 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits) + * Note that this function assumes it is operating on a bitmap declared using + * ice_declare_bitmap. + */ +-static inline int ++static inline u16 + ice_bitmap_hweight(ice_bitmap_t *bm, u16 size) + { +- int count = 0; ++ u16 count = 0; + u16 bit = 0; + + while (size > (bit = ice_find_next_bit(bm, size, bit))) { diff --git a/dpdk/drivers/net/ice/base/ice_common.c b/dpdk/drivers/net/ice/base/ice_common.c -index 5391bd666b..a327a4b374 100644 +index 5391bd666b..92a520daf2 100644 --- a/dpdk/drivers/net/ice/base/ice_common.c +++ b/dpdk/drivers/net/ice/base/ice_common.c @@ -167,6 +167,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) @@ -43127,11 +53475,253 @@ index 5391bd666b..a327a4b374 100644 u8 clk_freq; ice_debug(hw, ICE_DBG_INIT, "1588 func caps: raw value %x\n", number); +@@ -3803,8 +3803,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + break; + case ICE_FEC_DIS_AUTO: + /* Set No FEC and auto FEC */ +- if (!ice_fw_supports_fec_dis_auto(hw)) +- return ICE_ERR_NOT_SUPPORTED; ++ if (!ice_fw_supports_fec_dis_auto(hw)) { ++ status = ICE_ERR_NOT_SUPPORTED; ++ goto out; ++ } + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; + /* fall-through */ + case ICE_FEC_AUTO: +@@ -4817,7 +4819,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + + ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); + +- dest_byte &= ~(mask); ++ dest_byte &= mask; + + dest_byte >>= shift_width; + +@@ -4857,7 +4859,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_word &= ~(CPU_TO_LE16(mask)); ++ src_word &= CPU_TO_LE16(mask); + + /* get the data back into host order before shifting */ + dest_word = LE16_TO_CPU(src_word); +@@ -4908,7 +4910,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_dword &= ~(CPU_TO_LE32(mask)); ++ src_dword &= CPU_TO_LE32(mask); + + /* get the data back into host order before shifting */ + dest_dword = LE32_TO_CPU(src_dword); +@@ -4959,7 +4961,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_qword &= ~(CPU_TO_LE64(mask)); ++ src_qword &= CPU_TO_LE64(mask); + + /* get the data back into host order before shifting */ + dest_qword = LE64_TO_CPU(src_qword); +diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c +index b6bc0062a3..e1c5e00c91 100644 +--- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c ++++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.c +@@ -1534,16 +1534,14 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK(mask_idx); +- val = (idx << GLQF_HMASK_MSK_INDEX_S) & +- GLQF_HMASK_MSK_INDEX_M; +- val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; ++ val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; ++ val |= ((u32)mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK(mask_idx); + val = (idx << GLQF_FDMASK_MSK_INDEX_S) & + GLQF_FDMASK_MSK_INDEX_M; +- val |= (mask << GLQF_FDMASK_MASK_S) & +- GLQF_FDMASK_MASK_M; ++ val |= ((u32)mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", +diff --git a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h +index be6d88f0ca..cd12d47d9b 100644 +--- a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h ++++ b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h +@@ -1074,10 +1074,9 @@ struct ice_tx_ctx_desc { + __le64 qw1; + }; + +-#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */ +-#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */ +-#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */ +-#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */ ++#define ICE_TX_GCS_DESC_START 0 /* 8 BITS */ ++#define ICE_TX_GCS_DESC_OFFSET 8 /* 4 BITS */ ++#define ICE_TX_GCS_DESC_TYPE 12 /* 3 BITS */ + + #define ICE_TXD_CTX_QW1_DTYPE_S 0 + #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) +diff --git a/dpdk/drivers/net/ice/base/ice_nvm.c b/dpdk/drivers/net/ice/base/ice_nvm.c +index 6550dda557..6da52f4d58 100644 +--- a/dpdk/drivers/net/ice/base/ice_nvm.c ++++ b/dpdk/drivers/net/ice/base/ice_nvm.c +@@ -72,6 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + enum ice_status status; + u32 inlen = *length; + u32 bytes_read = 0; ++ int retry_cnt = 0; + bool last_cmd; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); +@@ -106,11 +107,24 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + offset, (u16)read_size, + data + bytes_read, last_cmd, + read_shadow_ram, NULL); +- if (status) +- break; +- +- bytes_read += read_size; +- offset += read_size; ++ if (status) { ++ if (hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY || ++ retry_cnt > ICE_SQ_SEND_MAX_EXECUTE) ++ break; ++ ice_debug(hw, ICE_DBG_NVM, ++ "NVM read EBUSY error, retry %d\n", ++ retry_cnt + 1); ++ ice_release_nvm(hw); ++ msleep(ICE_SQ_SEND_DELAY_TIME_MS); ++ status = ice_acquire_nvm(hw, ICE_RES_READ); ++ if (status) ++ break; ++ retry_cnt++; ++ } else { ++ bytes_read += read_size; ++ offset += read_size; ++ retry_cnt = 0; ++ } + } while (!last_cmd); + + *length = bytes_read; +@@ -474,7 +488,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + { + enum ice_status status; + u16 pfa_len, pfa_ptr; +- u16 next_tlv; ++ u32 next_tlv; + + status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); + if (status != ICE_SUCCESS) { +@@ -490,25 +504,30 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + * of TLVs to find the requested one. + */ + next_tlv = pfa_ptr + 1; +- while (next_tlv < pfa_ptr + pfa_len) { ++ while (next_tlv < ((u32)pfa_ptr + pfa_len)) { + u16 tlv_sub_module_type; + u16 tlv_len; + + /* Read TLV type */ +- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); ++ status = ice_read_sr_word(hw, (u16)next_tlv, ++ &tlv_sub_module_type); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); + break; + } + /* Read TLV length */ +- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); ++ status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); + break; + } ++ if (tlv_len > pfa_len) { ++ ice_debug(hw, ICE_DBG_INIT, "Invalid TLV length.\n"); ++ return ICE_ERR_INVAL_SIZE; ++ } + if (tlv_sub_module_type == module_type) { + if (tlv_len) { +- *module_tlv = next_tlv; ++ *module_tlv = (u16)next_tlv; + *module_tlv_len = tlv_len; + return ICE_SUCCESS; + } +diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.c b/dpdk/drivers/net/ice/base/ice_ptp_hw.c +index a0b8af1b94..0f02d2fcbe 100644 +--- a/dpdk/drivers/net/ice/base/ice_ptp_hw.c ++++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.c +@@ -2839,8 +2839,8 @@ ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd, + val &= ~TS_CMD_MASK; + val |= cmd_val; + +- status = ice_write_phy_reg_e822_lp(hw, port, P_REG_RX_TMR_CMD, val, +- lock_sbq); ++ status = ice_write_phy_reg_e822_lp(hw, port, P_REG_RX_TMR_CMD, ++ val | TS_CMD_RX_TYPE, lock_sbq); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, status %d\n", + status); +diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.h b/dpdk/drivers/net/ice/base/ice_ptp_hw.h +index 09c236e7e0..c2a3e53103 100644 +--- a/dpdk/drivers/net/ice/base/ice_ptp_hw.h ++++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.h +@@ -295,6 +295,9 @@ enum ice_status ice_ptp_init_phy_cfg(struct ice_hw *hw); + #define TS_CMD_MASK_E810 0xFF + #define TS_CMD_MASK 0xF + #define SYNC_EXEC_CMD 0x3 ++#define TS_CMD_RX_TYPE_S 0x4 ++#define TS_CMD_RX_TYPE MAKEMASK(0x18, TS_CMD_RX_TYPE_S) ++ + + /* Macros to derive port low and high addresses on both quads */ + #define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF) diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c -index a526c8f32c..b16b27dcbf 100644 +index a526c8f32c..25bf3fca76 100644 --- a/dpdk/drivers/net/ice/base/ice_sched.c +++ b/dpdk/drivers/net/ice/base/ice_sched.c -@@ -1417,11 +1417,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) +@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, + if (!root) + return ICE_ERR_NO_MEMORY; + +- /* coverity[suspicious_sizeof] */ + root->children = (struct ice_sched_node **) +- ice_calloc(hw, hw->max_children[0], sizeof(*root)); ++ ice_calloc(hw, hw->max_children[0], sizeof(*root->children)); + if (!root->children) { + ice_free(hw, root); + return ICE_ERR_NO_MEMORY; +@@ -180,9 +179,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, + if (!node) + return ICE_ERR_NO_MEMORY; + if (hw->max_children[layer]) { +- /* coverity[suspicious_sizeof] */ + node->children = (struct ice_sched_node **) +- ice_calloc(hw, hw->max_children[layer], sizeof(*node)); ++ ice_calloc(hw, hw->max_children[layer], ++ sizeof(*node->children)); + if (!node->children) { + ice_free(hw, node); + return ICE_ERR_NO_MEMORY; +@@ -1057,11 +1056,11 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + u32 *first_teid_ptr = first_node_teid; + u16 new_num_nodes = num_nodes; + enum ice_status status = ICE_SUCCESS; ++ u32 temp; + + *num_nodes_added = 0; + while (*num_nodes_added < num_nodes) { + u16 max_child_nodes, num_added = 0; +- u32 temp; + + status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, + layer, new_num_nodes, +@@ -1417,11 +1416,6 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; @@ -43143,7 +53733,7 @@ index a526c8f32c..b16b27dcbf 100644 switch (clk_src) { case PSM_CLK_SRC_367_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; -@@ -1435,11 +1430,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) +@@ -1435,11 +1429,12 @@ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) case PSM_CLK_SRC_390_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; break; @@ -43177,6 +53767,45 @@ index 3724ef33a8..64ed5e0f9b 100644 struct rl_profile_params { u32 bw; /* in Kbps */ u16 rl_multiplier; +diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c +index a2581f404d..073c139c43 100644 +--- a/dpdk/drivers/net/ice/base/ice_switch.c ++++ b/dpdk/drivers/net/ice/base/ice_switch.c +@@ -4339,7 +4339,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + u16 vsi_handle_arr[2]; + + /* A rule already exists with the new VSI being added */ +- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) ++ if (cur_fltr->vsi_handle == new_fltr->vsi_handle) + return ICE_ERR_ALREADY_EXISTS; + + vsi_handle_arr[0] = cur_fltr->vsi_handle; +@@ -4387,7 +4387,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + + /* A rule already exists with the new VSI being added */ + if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) +- return ICE_SUCCESS; ++ return ICE_ERR_ALREADY_EXISTS; + + /* Update the previously created VSI list set with + * the new VSI ID passed in +@@ -7124,7 +7124,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles, + ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); + + /* return number of free indexes */ +- return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); ++ return ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); + } + + /** +@@ -7822,6 +7822,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + enum ice_status status = ICE_SUCCESS; + struct ice_sw_recipe *rm; + u8 i; ++ u16 cnt; + + if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt) + return ICE_ERR_PARAM; diff --git a/dpdk/drivers/net/ice/ice_dcf.c b/dpdk/drivers/net/ice/ice_dcf.c index 1c3d22ae0f..6f7e103c3b 100644 --- a/dpdk/drivers/net/ice/ice_dcf.c @@ -43510,10 +54139,18 @@ index b9fcfc80ad..af281f069a 100644 + } +} diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c -index 0bc739daf0..8ce7d0ebaa 100644 +index 0bc739daf0..ba19c16384 100644 --- a/dpdk/drivers/net/ice/ice_ethdev.c +++ b/dpdk/drivers/net/ice/ice_ethdev.c -@@ -2399,6 +2399,17 @@ ice_dev_init(struct rte_eth_dev *dev) +@@ -1773,6 +1773,7 @@ ice_pf_setup(struct ice_pf *pf) + } + + pf->main_vsi = vsi; ++ rte_spinlock_init(&pf->link_lock); + + return 0; + } +@@ -2399,6 +2400,17 @@ ice_dev_init(struct rte_eth_dev *dev) /* Initialize TM configuration */ ice_tm_conf_init(dev); @@ -43531,7 +54168,7 @@ index 0bc739daf0..8ce7d0ebaa 100644 if (!ad->is_safe_mode) { ret = ice_flow_init(ad); if (ret) { -@@ -2415,6 +2426,9 @@ ice_dev_init(struct rte_eth_dev *dev) +@@ -2415,6 +2427,9 @@ ice_dev_init(struct rte_eth_dev *dev) pf->supported_rxdid = ice_get_supported_rxdid(hw); @@ -43541,7 +54178,7 @@ index 0bc739daf0..8ce7d0ebaa 100644 return 0; err_flow_init: -@@ -3298,7 +3312,8 @@ static int ice_init_rss(struct ice_pf *pf) +@@ -3298,7 +3313,8 @@ static int ice_init_rss(struct ice_pf *pf) rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf; nb_q = dev_data->nb_rx_queues; @@ -43551,7 +54188,7 @@ index 0bc739daf0..8ce7d0ebaa 100644 vsi->rss_lut_size = pf->hash_lut_size; if (nb_q == 0) { -@@ -3339,7 +3354,10 @@ static int ice_init_rss(struct ice_pf *pf) +@@ -3339,7 +3355,10 @@ static int ice_init_rss(struct ice_pf *pf) vsi->rss_key_size)); rte_memcpy(key.standard_rss_key, vsi->rss_key, @@ -43563,7 +54200,42 @@ index 0bc739daf0..8ce7d0ebaa 100644 ret = ice_aq_set_rss_key(hw, vsi->idx, &key); if (ret) goto out; -@@ -3590,6 +3608,8 @@ ice_get_init_link_status(struct rte_eth_dev *dev) +@@ -3571,17 +3590,31 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev) + return 0; + } + ++static enum ice_status ++ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse, ++ struct ice_link_status *link) ++{ ++ struct ice_hw *hw = ICE_PF_TO_HW(pf); ++ int ret; ++ ++ rte_spinlock_lock(&pf->link_lock); ++ ++ ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL); ++ ++ rte_spinlock_unlock(&pf->link_lock); ++ ++ return ret; ++} ++ + static void + ice_get_init_link_status(struct rte_eth_dev *dev) + { +- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + struct ice_link_status link_status; + int ret; + +- ret = ice_aq_get_link_info(hw->port_info, enable_lse, +- &link_status, NULL); ++ ret = ice_get_link_info_safe(pf, enable_lse, &link_status); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get link info"); + pf->init_link_up = false; +@@ -3590,6 +3623,8 @@ ice_get_init_link_status(struct rte_eth_dev *dev) if (link_status.link_info & ICE_AQ_LINK_UP) pf->init_link_up = true; @@ -43572,7 +54244,7 @@ index 0bc739daf0..8ce7d0ebaa 100644 } static int -@@ -3660,6 +3680,16 @@ ice_dev_start(struct rte_eth_dev *dev) +@@ -3660,6 +3695,16 @@ ice_dev_start(struct rte_eth_dev *dev) } } @@ -43589,7 +54261,20 @@ index 0bc739daf0..8ce7d0ebaa 100644 /* program Rx queues' context in hardware*/ for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) { ret = ice_rx_queue_start(dev, nb_rxq); -@@ -3856,6 +3886,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -3814,7 +3859,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | ++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO; + dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; + } + +@@ -3856,6 +3905,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_max = ICE_MAX_RING_DESC, .nb_min = ICE_MIN_RING_DESC, .nb_align = ICE_ALIGN_RING_DESC, @@ -43598,18 +54283,30 @@ index 0bc739daf0..8ce7d0ebaa 100644 }; dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M | -@@ -3926,8 +3958,8 @@ ice_atomic_write_link_status(struct rte_eth_dev *dev, +@@ -3926,9 +3977,9 @@ ice_atomic_write_link_status(struct rte_eth_dev *dev, static int ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) { -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ +- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); +#define CHECK_INTERVAL 50 /* 50ms */ +#define MAX_REPEAT_TIME 40 /* 2s (40 * 50ms) in total */ - struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct ice_link_status link_status; struct rte_eth_link link, old; -@@ -5800,11 +5832,6 @@ ice_timesync_enable(struct rte_eth_dev *dev) + int status; +@@ -3942,8 +3993,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) + + do { + /* Get link status information from hardware */ +- status = ice_aq_get_link_info(hw->port_info, enable_lse, +- &link_status, NULL); ++ status = ice_get_link_info_safe(pf, enable_lse, &link_status); + if (status != ICE_SUCCESS) { + link.link_speed = RTE_ETH_SPEED_NUM_100M; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; +@@ -5800,11 +5850,6 @@ ice_timesync_enable(struct rte_eth_dev *dev) return -1; } @@ -43621,7 +54318,7 @@ index 0bc739daf0..8ce7d0ebaa 100644 if (hw->func_caps.ts_func_info.src_tmr_owned) { ret = ice_ptp_init_phc(hw); if (ret) { -@@ -5925,16 +5952,17 @@ ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +@@ -5925,16 +5970,17 @@ ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); @@ -43644,7 +54341,7 @@ index 0bc739daf0..8ce7d0ebaa 100644 } time = ((uint64_t)hi << 32) | lo; -@@ -5950,6 +5978,7 @@ ice_timesync_disable(struct rte_eth_dev *dev) +@@ -5950,6 +5996,7 @@ ice_timesync_disable(struct rte_eth_dev *dev) struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); @@ -43652,7 +54349,7 @@ index 0bc739daf0..8ce7d0ebaa 100644 uint64_t val; uint8_t lport; -@@ -5957,12 +5986,12 @@ ice_timesync_disable(struct rte_eth_dev *dev) +@@ -5957,12 +6004,12 @@ ice_timesync_disable(struct rte_eth_dev *dev) ice_clear_phy_tstamp(hw, lport, 0); @@ -43669,11 +54366,63 @@ index 0bc739daf0..8ce7d0ebaa 100644 ad->ptp_ena = 0; +diff --git a/dpdk/drivers/net/ice/ice_ethdev.h b/dpdk/drivers/net/ice/ice_ethdev.h +index c8311be179..9799cad394 100644 +--- a/dpdk/drivers/net/ice/ice_ethdev.h ++++ b/dpdk/drivers/net/ice/ice_ethdev.h +@@ -351,8 +351,6 @@ struct ice_fdir_filter_conf { + u8 pkt_len; + }; + +-#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16) +- + struct ice_fdir_fltr_pattern { + enum ice_fltr_ptype flow_type; + +@@ -550,6 +548,10 @@ struct ice_pf { + uint64_t supported_rxdid; /* bitmap for supported RXDID */ + uint64_t rss_hf; + struct ice_tm_conf tm_conf; ++ /* lock prevent race condition between lsc interrupt handler ++ * and link status update during dev_start. ++ */ ++ rte_spinlock_t link_lock; + }; + + #define ICE_MAX_QUEUE_NUM 2048 diff --git a/dpdk/drivers/net/ice/ice_fdir_filter.c b/dpdk/drivers/net/ice/ice_fdir_filter.c -index 7914ba9407..81e88c1dd8 100644 +index 7914ba9407..7e97547d8b 100644 --- a/dpdk/drivers/net/ice/ice_fdir_filter.c +++ b/dpdk/drivers/net/ice/ice_fdir_filter.c -@@ -1876,7 +1876,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, +@@ -377,12 +377,17 @@ ice_fdir_init_filter_list(struct ice_pf *pf) + { + struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id]; + struct ice_fdir_info *fdir_info = &pf->fdir; ++ struct ice_hw *hw = &pf->adapter->hw; + char fdir_hash_name[RTE_HASH_NAMESIZE]; ++ const uint32_t max_fd_filter_entries = ++ hw->func_caps.fd_fltr_guar + hw->func_caps.fd_fltr_best_effort; ++ /* dimension hash table as max filters + 12.5% to ensure a little headroom */ ++ const uint32_t hash_table_entries = max_fd_filter_entries + (max_fd_filter_entries >> 3); + int ret; + + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, +- .entries = ICE_MAX_FDIR_FILTER_NUM, ++ .entries = hash_table_entries, + .key_len = sizeof(struct ice_fdir_fltr_pattern), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, +@@ -400,7 +405,7 @@ ice_fdir_init_filter_list(struct ice_pf *pf) + } + fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map", + sizeof(*fdir_info->hash_map) * +- ICE_MAX_FDIR_FILTER_NUM, ++ hash_table_entries, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, +@@ -1876,7 +1881,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, (uint8_t *)(uintptr_t)raw_mask->pattern; uint8_t *tmp_spec, *tmp_mask; uint16_t tmp_val = 0; @@ -43716,10 +54465,10 @@ index d496c28dec..91bf1d6fcb 100644 if (ad->devargs.pipe_mode_support) { if (attr->priority == 0) diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c -index f35727856e..52646e9408 100644 +index f35727856e..94b104fb36 100644 --- a/dpdk/drivers/net/ice/ice_hash.c +++ b/dpdk/drivers/net/ice/ice_hash.c -@@ -653,8 +653,8 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, +@@ -653,14 +653,14 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, const struct rte_flow_item_raw *raw_spec, *raw_mask; struct ice_parser_profile prof; struct ice_parser_result rslt; @@ -43728,9 +54477,58 @@ index f35727856e..52646e9408 100644 - uint8_t spec_len, pkt_len; uint8_t tmp_val = 0; uint8_t tmp_c = 0; - int i, j; +- int i, j; ++ int i, j, ret = 0; + + if (ad->psr == NULL) +- return -rte_errno; ++ return -ENOTSUP; + + raw_spec = item->spec; + raw_mask = item->mask; +@@ -677,8 +677,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + return -ENOMEM; + + msk_buf = rte_zmalloc(NULL, pkt_len, 0); +- if (!msk_buf) ++ if (!msk_buf) { ++ rte_free(pkt_buf); + return -ENOMEM; ++ } + + /* convert string to int array */ + for (i = 0, j = 0; i < spec_len; i += 2, j++) { +@@ -715,18 +717,22 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + msk_buf[j] = tmp_val * 16 + tmp_c - '0'; + } + +- if (ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt)) +- return -rte_errno; ++ ret = ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt); ++ if (ret) ++ goto free_mem; + +- if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf, +- pkt_len, ICE_BLK_RSS, true, &prof)) +- return -rte_errno; ++ ret = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, ++ pkt_len, ICE_BLK_RSS, true, &prof); ++ if (ret) ++ goto free_mem; + + rte_memcpy(&meta->raw.prof, &prof, sizeof(prof)); + ++free_mem: + rte_free(pkt_buf); + rte_free(msk_buf); +- return 0; ++ ++ return ret; + } + + static void diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c -index 0ea0045836..9a653cbc4a 100644 +index 0ea0045836..f73065b81c 100644 --- a/dpdk/drivers/net/ice/ice_rxtx.c +++ b/dpdk/drivers/net/ice/ice_rxtx.c @@ -259,7 +259,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) @@ -43808,13 +54606,15 @@ index 0ea0045836..9a653cbc4a 100644 rxq->time_high = rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); if (unlikely(is_tsinit)) { -@@ -2727,7 +2734,8 @@ ice_parse_tunneling_params(uint64_t ol_flags, +@@ -2726,8 +2733,9 @@ ice_parse_tunneling_params(uint64_t ol_flags, + * Calculate the tunneling UDP checksum. * Shall be set only if L4TUNT = 01b and EIPT is not zero */ - if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && +- if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && - (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING)) -+ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && -+ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) ++ if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) && ++ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; } @@ -43964,8 +54764,99 @@ index 4947d5c25f..bd2c4abec9 100644 } ad->time_hw = ((uint64_t)hi << 32) | lo; +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +index 31d6af42fd..5d591f9834 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +@@ -254,62 +254,30 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- { +- const __m128i raw_desc7 = +- _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = +- _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = +- _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = +- _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = +- _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = +- _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = +- _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = +- _mm_load_si128((void *)(rxdp + 0)); +- +- raw_desc6_7 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc6), +- raw_desc7, 1); +- raw_desc4_5 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc4), +- raw_desc5, 1); +- raw_desc2_3 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc2), +- raw_desc3, 1); +- raw_desc0_1 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc0), +- raw_desc1, 1); +- } ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++ ++ const __m256i raw_desc6_7 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h -index eec6ea2134..55840cf170 100644 +index eec6ea2134..4b73465af5 100644 --- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h @@ -72,7 +72,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, @@ -43977,6 +54868,81 @@ index eec6ea2134..55840cf170 100644 return pkt_idx; } +@@ -251,6 +251,10 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_TSO | \ ++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) + + #define ICE_TX_VECTOR_OFFLOAD ( \ +diff --git a/dpdk/drivers/net/ice/ice_tm.c b/dpdk/drivers/net/ice/ice_tm.c +index 34a0bfcff8..c24642c867 100644 +--- a/dpdk/drivers/net/ice/ice_tm.c ++++ b/dpdk/drivers/net/ice/ice_tm.c +@@ -58,8 +58,15 @@ void + ice_tm_conf_uninit(struct rte_eth_dev *dev) + { + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); ++ struct ice_tm_shaper_profile *shaper_profile; + struct ice_tm_node *tm_node; + ++ /* clear profile */ ++ while ((shaper_profile = TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) { ++ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); ++ rte_free(shaper_profile); ++ } ++ + /* clear node configuration */ + while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) { + TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); +@@ -648,6 +655,8 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, + uint16_t buf_size = ice_struct_size(buf, txqs, 1); + + buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf)); ++ if (buf == NULL) ++ return -ENOMEM; + + queue_parent_node = queue_sched_node->parent; + buf->src_teid = queue_parent_node->info.node_teid; +@@ -659,6 +668,7 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, + NULL, buf, buf_size, &txqs_moved, NULL); + if (ret || txqs_moved == 0) { + PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id); ++ rte_free(buf); + return ICE_ERR_PARAM; + } + +@@ -668,12 +678,14 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, + } else { + PMD_DRV_LOG(ERR, "invalid children number %d for queue %u", + queue_parent_node->num_children, queue_id); ++ rte_free(buf); + return ICE_ERR_PARAM; + } + dst_node->children[dst_node->num_children++] = queue_sched_node; + queue_sched_node->parent = dst_node; + ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info); + ++ rte_free(buf); + return ret; + } + +diff --git a/dpdk/drivers/net/ice/version.map b/dpdk/drivers/net/ice/version.map +index d70c250e9a..72d77458ba 100644 +--- a/dpdk/drivers/net/ice/version.map ++++ b/dpdk/drivers/net/ice/version.map +@@ -7,5 +7,7 @@ EXPERIMENTAL { + + # added in 19.11 + rte_pmd_ice_dump_package; ++ ++ # added in 22.11 + rte_pmd_ice_dump_switch; + }; diff --git a/dpdk/drivers/net/idpf/idpf_ethdev.c b/dpdk/drivers/net/idpf/idpf_ethdev.c index 8b347631ce..b31cb47e90 100644 --- a/dpdk/drivers/net/idpf/idpf_ethdev.c @@ -44206,6 +55172,244 @@ index ffd219b0df..160865e911 100644 } igc_config_collision_dist(hw); +diff --git a/dpdk/drivers/net/ionic/ionic_ethdev.c b/dpdk/drivers/net/ionic/ionic_ethdev.c +index 340fd0cd59..4ec9598b8e 100644 +--- a/dpdk/drivers/net/ionic/ionic_ethdev.c ++++ b/dpdk/drivers/net/ionic/ionic_ethdev.c +@@ -561,7 +561,7 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; +- int i, num; ++ int i, j, num; + uint16_t tbl_sz = rte_le_to_cpu_16(ident->lif.eth.rss_ind_tbl_sz); + + IONIC_PRINT_CALL(); +@@ -582,9 +582,10 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + num = reta_size / RTE_ETH_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { +- memcpy(reta_conf->reta, +- &lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE], +- RTE_ETH_RETA_GROUP_SIZE); ++ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) { ++ reta_conf->reta[j] = ++ lif->rss_ind_tbl[(i * RTE_ETH_RETA_GROUP_SIZE) + j]; ++ } + reta_conf++; + } + +@@ -969,19 +970,21 @@ ionic_dev_close(struct rte_eth_dev *eth_dev) + + ionic_lif_stop(lif); + +- ionic_lif_free_queues(lif); +- + IONIC_PRINT(NOTICE, "Removing device %s", eth_dev->device->name); + if (adapter->intf->unconfigure_intr) + (*adapter->intf->unconfigure_intr)(adapter); + +- rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); +- + ionic_port_reset(adapter); + ionic_reset(adapter); ++ ++ ionic_lif_free_queues(lif); ++ ionic_lif_deinit(lif); ++ ionic_lif_free(lif); /* Does not free LIF object */ ++ + if (adapter->intf->unmap_bars) + (*adapter->intf->unmap_bars)(adapter); + ++ lif->adapter = NULL; + rte_free(adapter); + + return 0; +@@ -1058,21 +1061,18 @@ err: + static int + eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev) + { +- struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); +- struct ionic_adapter *adapter = lif->adapter; +- + IONIC_PRINT_CALL(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + +- adapter->lif = NULL; +- +- ionic_lif_deinit(lif); +- ionic_lif_free(lif); ++ if (eth_dev->state != RTE_ETH_DEV_UNUSED) ++ ionic_dev_close(eth_dev); + +- if (!(lif->state & IONIC_LIF_F_FW_RESET)) +- ionic_lif_reset(lif); ++ eth_dev->dev_ops = NULL; ++ eth_dev->rx_pkt_burst = NULL; ++ eth_dev->tx_pkt_burst = NULL; ++ eth_dev->tx_pkt_prepare = NULL; + + return 0; + } +@@ -1227,17 +1227,18 @@ eth_ionic_dev_remove(struct rte_device *rte_dev) + { + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; ++ int ret = 0; + + /* Adapter lookup is using the eth_dev name */ + snprintf(name, sizeof(name), "%s_lif", rte_dev->name); + + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev) +- ionic_dev_close(eth_dev); ++ ret = rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); + else + IONIC_PRINT(DEBUG, "Cannot find device %s", rte_dev->name); + +- return 0; ++ return ret; + } + + RTE_LOG_REGISTER_DEFAULT(ionic_logtype, NOTICE); +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx.c b/dpdk/drivers/net/ionic/ionic_rxtx.c +index b9e73b4871..170d3b0802 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx.c +@@ -26,38 +26,40 @@ + #include "ionic_logs.h" + + static void +-ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) ++ionic_empty_array(void **array, uint32_t free_idx, uint32_t zero_idx) + { + uint32_t i; + +- for (i = idx; i < cnt; i++) ++ for (i = 0; i < free_idx; i++) + if (array[i]) + rte_pktmbuf_free_seg(array[i]); + +- memset(array, 0, sizeof(void *) * cnt); ++ memset(array, 0, sizeof(void *) * zero_idx); + } + + static void __rte_cold + ionic_tx_empty(struct ionic_tx_qcq *txq) + { + struct ionic_queue *q = &txq->qcq.q; ++ uint32_t info_len = q->num_descs * q->num_segs; + +- ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); ++ ionic_empty_array(q->info, info_len, info_len); + } + + static void __rte_cold + ionic_rx_empty(struct ionic_rx_qcq *rxq) + { + struct ionic_queue *q = &rxq->qcq.q; ++ uint32_t info_len = q->num_descs * q->num_segs; + + /* + * Walk the full info array so that the clean up includes any + * fragments that were left dangling for later reuse + */ +- ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); ++ ionic_empty_array(q->info, info_len, info_len); + +- ionic_empty_array((void **)rxq->mbs, +- IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); ++ ionic_empty_array((void **)rxq->mbs, rxq->mb_idx, ++ IONIC_MBUF_BULK_ALLOC); + rxq->mb_idx = 0; + } + +@@ -752,7 +754,7 @@ ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) + { + struct ionic_rx_qcq *rxq = rx_queue; + struct ionic_qcq *qcq = &rxq->qcq; +- struct ionic_rxq_comp *cq_desc; ++ volatile struct ionic_rxq_comp *cq_desc; + uint16_t mask, head, tail, pos; + bool done_color; + +@@ -791,7 +793,7 @@ ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) + { + struct ionic_tx_qcq *txq = tx_queue; + struct ionic_qcq *qcq = &txq->qcq; +- struct ionic_txq_comp *cq_desc; ++ volatile struct ionic_txq_comp *cq_desc; + uint16_t mask, head, tail, pos, cq_pos; + bool done_color; + +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c +index ab8e56e91c..241b6f8587 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c +@@ -27,7 +27,8 @@ ionic_tx_flush_sg(struct ionic_tx_qcq *txq) + struct ionic_cq *cq = &txq->qcq.cq; + struct ionic_queue *q = &txq->qcq.q; + struct rte_mbuf *txm; +- struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_txq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_txq_comp *cq_desc; + void **info; + uint32_t i; + +@@ -252,7 +253,7 @@ ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + static __rte_always_inline void + ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq, +- struct ionic_rxq_comp *cq_desc, ++ volatile struct ionic_rxq_comp *cq_desc, + struct ionic_rx_service *rx_svc) + { + struct ionic_queue *q = &rxq->qcq.q; +@@ -438,7 +439,8 @@ ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do, + struct ionic_cq *cq = &rxq->qcq.cq; + struct ionic_queue *q = &rxq->qcq.q; + struct ionic_rxq_desc *q_desc_base = q->base; +- struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_rxq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_rxq_comp *cq_desc; + uint32_t work_done = 0; + uint64_t then, now, hz, delta; + +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c +index 5f81856256..0992177afc 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c +@@ -27,7 +27,8 @@ ionic_tx_flush(struct ionic_tx_qcq *txq) + struct ionic_cq *cq = &txq->qcq.cq; + struct ionic_queue *q = &txq->qcq.q; + struct rte_mbuf *txm; +- struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_txq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_txq_comp *cq_desc; + void **info; + + cq_desc = &cq_desc_base[cq->tail_idx]; +@@ -225,7 +226,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + static __rte_always_inline void + ionic_rx_clean_one(struct ionic_rx_qcq *rxq, +- struct ionic_rxq_comp *cq_desc, ++ volatile struct ionic_rxq_comp *cq_desc, + struct ionic_rx_service *rx_svc) + { + struct ionic_queue *q = &rxq->qcq.q; +@@ -359,7 +360,8 @@ ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, + struct ionic_cq *cq = &rxq->qcq.cq; + struct ionic_queue *q = &rxq->qcq.q; + struct ionic_rxq_desc *q_desc_base = q->base; +- struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_rxq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_rxq_comp *cq_desc; + uint32_t work_done = 0; + uint64_t then, now, hz, delta; + diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c index 70a06a3b15..2c15611a23 100644 --- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c @@ -44272,11 +55476,140 @@ index 2ef96a984a..5361867785 100644 return NULL; } +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c +index 74c5db16fa..56267bb00d 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c +@@ -432,8 +432,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; +- case X550_PHY_ID2: +- case X550_PHY_ID3: ++ case X550_PHY_ID: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; +@@ -915,6 +914,10 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; ++ break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h +index b7eec45635..5973c60477 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h +@@ -1663,6 +1663,7 @@ struct ixgbe_dmac_config { + #define TN1010_PHY_ID 0x00A19410 + #define TNX_FW_REV 0xB + #define X540_PHY_ID 0x01540200 ++#define X550_PHY_ID 0x01540220 + #define X550_PHY_ID2 0x01540223 + #define X550_PHY_ID3 0x01540221 + #define X557_PHY_ID 0x01540240 +@@ -1799,7 +1800,7 @@ enum { + /* VFRE bitmask */ + #define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++#define IXGBE_VF_INIT_TIMEOUT 10000 /* Number of retries to clear RSTI */ + + /* RDHMPN and TDHMPN bitmasks */ + #define IXGBE_RDHMPN_RDICADDR 0x007FF800 +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c +index 5e3ae1b519..11dbbe2a86 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c +@@ -585,7 +585,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; +- if (hw->mac.type >= ixgbe_mac_X550) { ++ if (hw->mac.type >= ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } +@@ -595,7 +595,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; +- if (hw->mac.type == ixgbe_mac_X550) { ++ if (hw->mac.type == ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } +@@ -603,7 +603,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ +- if (hw->mac.type >= ixgbe_mac_X550) ++ if (hw->mac.type >= ixgbe_mac_X550_vf) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -index ae9f65b334..65655b9212 100644 +index ae9f65b334..9e1a65a50a 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -@@ -3852,23 +3852,32 @@ static int +@@ -1187,7 +1187,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + diag = ixgbe_validate_eeprom_checksum(hw, &csum); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_exit; + } + + #ifdef RTE_LIBRTE_IXGBE_BYPASS +@@ -1225,7 +1226,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); + if (diag) { + PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_exit; + } + + /* Reset the hw statistics */ +@@ -1245,7 +1247,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + "Failed to allocate %u bytes needed to store " + "MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_exit; + } + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, +@@ -1260,7 +1263,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_exit; + } + + /* initialize the vfta */ +@@ -1344,6 +1348,11 @@ err_pf_host_init: + eth_dev->data->mac_addrs = NULL; + rte_free(eth_dev->data->hash_mac_addrs); + eth_dev->data->hash_mac_addrs = NULL; ++err_exit: ++#ifdef RTE_LIB_SECURITY ++ rte_free(eth_dev->security_ctx); ++ eth_dev->security_ctx = NULL; ++#endif + return ret; + } + +@@ -3852,23 +3861,32 @@ static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -44320,6 +55653,45 @@ index ae9f65b334..65655b9212 100644 } static int +@@ -4265,6 +4283,9 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait = 1; + u32 esdp_reg; + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return -1; ++ + memset(&link, 0, sizeof(link)); + link.link_status = RTE_ETH_LINK_DOWN; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; +@@ -4639,14 +4660,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); +- if (rte_eal_alarm_set(timeout * 1000, +- ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) +- PMD_DRV_LOG(ERR, "Error setting alarm"); +- else { +- /* remember original mask */ +- intr->mask_original = intr->mask; +- /* only disable lsc interrupt */ +- intr->mask &= ~IXGBE_EIMS_LSC; ++ ++ /* Don't program delayed handler if LSC interrupt is disabled. ++ * It means one is already programmed. ++ */ ++ if (intr->mask & IXGBE_EIMS_LSC) { ++ if (rte_eal_alarm_set(timeout * 1000, ++ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) ++ PMD_DRV_LOG(ERR, "Error setting alarm"); ++ else { ++ /* remember original mask */ ++ intr->mask_original = intr->mask; ++ /* only disable lsc interrupt */ ++ intr->mask &= ~IXGBE_EIMS_LSC; ++ } + } + } + diff --git a/dpdk/drivers/net/ixgbe/ixgbe_flow.c b/dpdk/drivers/net/ixgbe/ixgbe_flow.c index 110ff34fcc..7cccbfddb3 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_flow.c @@ -44351,7 +55723,7 @@ index 110ff34fcc..7cccbfddb3 100644 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -index c9d6ca9efe..a3a7c68806 100644 +index c9d6ca9efe..e41bfa5ce3 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1818,11 +1818,22 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -44443,7 +55815,33 @@ index c9d6ca9efe..a3a7c68806 100644 } } /* If loopback mode was enabled, reconfigure the link accordingly */ -@@ -5830,6 +5827,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -5743,6 +5740,25 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) + IXGBE_PSRTYPE_RQPL_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + ++ /* Initialize the rss for x550_vf cards if enabled */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550_vf: ++ case ixgbe_mac_X550EM_x_vf: ++ case ixgbe_mac_X550EM_a_vf: ++ switch (dev->data->dev_conf.rxmode.mq_mode) { ++ case RTE_ETH_MQ_RX_RSS: ++ case RTE_ETH_MQ_RX_DCB_RSS: ++ case RTE_ETH_MQ_RX_VMDQ_RSS: ++ ixgbe_rss_configure(dev); ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ break; ++ } ++ + ixgbe_set_rx_function(dev); + + return 0; +@@ -5830,6 +5846,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); @@ -44452,7 +55850,7 @@ index c9d6ca9efe..a3a7c68806 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { -@@ -5847,6 +5846,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -5847,6 +5865,8 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); @@ -44760,10 +56158,48 @@ index 3d4039014f..7f66a7a7cf 100644 + return num_comp; } diff --git a/dpdk/drivers/net/mana/mana.c b/dpdk/drivers/net/mana/mana.c -index 43221e743e..896b53ed35 100644 +index 43221e743e..eb3b734949 100644 --- a/dpdk/drivers/net/mana/mana.c +++ b/dpdk/drivers/net/mana/mana.c -@@ -487,6 +487,15 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -292,8 +292,8 @@ mana_dev_info_get(struct rte_eth_dev *dev, + dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = MAX_FRAME_SIZE; + +- dev_info->max_rx_queues = priv->max_rx_queues; +- dev_info->max_tx_queues = priv->max_tx_queues; ++ dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, UINT16_MAX); ++ dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, UINT16_MAX); + + dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR; + dev_info->max_hash_mac_addrs = 0; +@@ -334,16 +334,20 @@ mana_dev_info_get(struct rte_eth_dev *dev, + + /* Buffer limits */ + dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; +- dev_info->rx_desc_lim.nb_max = priv->max_rx_desc; ++ dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, UINT16_MAX); + dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; +- dev_info->rx_desc_lim.nb_seg_max = priv->max_recv_sge; +- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; ++ dev_info->rx_desc_lim.nb_seg_max = ++ RTE_MIN(priv->max_recv_sge, UINT16_MAX); ++ dev_info->rx_desc_lim.nb_mtu_seg_max = ++ RTE_MIN(priv->max_recv_sge, UINT16_MAX); + + dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; +- dev_info->tx_desc_lim.nb_max = priv->max_tx_desc; ++ dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, UINT16_MAX); + dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; +- dev_info->tx_desc_lim.nb_seg_max = priv->max_send_sge; +- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; ++ dev_info->tx_desc_lim.nb_seg_max = ++ RTE_MIN(priv->max_send_sge, UINT16_MAX); ++ dev_info->tx_desc_lim.nb_mtu_seg_max = ++ RTE_MIN(priv->max_send_sge, UINT16_MAX); + + /* Speed */ + dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G; +@@ -487,6 +491,15 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, goto fail; } @@ -44779,7 +56215,7 @@ index 43221e743e..896b53ed35 100644 ret = mana_mr_btree_init(&txq->mr_btree, MANA_MR_BTREE_PER_QUEUE_N, socket_id); if (ret) { -@@ -506,6 +515,7 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -506,6 +519,7 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; fail: @@ -44787,7 +56223,7 @@ index 43221e743e..896b53ed35 100644 rte_free(txq->desc_ring); rte_free(txq); return ret; -@@ -518,6 +528,7 @@ mana_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +@@ -518,6 +532,7 @@ mana_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) mana_mr_btree_free(&txq->mr_btree); @@ -44795,7 +56231,7 @@ index 43221e743e..896b53ed35 100644 rte_free(txq->desc_ring); rte_free(txq); } -@@ -557,6 +568,15 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -557,6 +572,15 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->desc_ring_head = 0; rxq->desc_ring_tail = 0; @@ -44811,7 +56247,7 @@ index 43221e743e..896b53ed35 100644 ret = mana_mr_btree_init(&rxq->mr_btree, MANA_MR_BTREE_PER_QUEUE_N, socket_id); if (ret) { -@@ -572,6 +592,7 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, +@@ -572,6 +596,7 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; fail: @@ -44819,7 +56255,7 @@ index 43221e743e..896b53ed35 100644 rte_free(rxq->desc_ring); rte_free(rxq); return ret; -@@ -584,6 +605,7 @@ mana_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) +@@ -584,6 +609,7 @@ mana_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) mana_mr_btree_free(&rxq->mr_btree); @@ -44827,7 +56263,7 @@ index 43221e743e..896b53ed35 100644 rte_free(rxq->desc_ring); rte_free(rxq); } -@@ -616,9 +638,9 @@ mana_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -616,9 +642,9 @@ mana_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (!txq) continue; @@ -44840,7 +56276,7 @@ index 43221e743e..896b53ed35 100644 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { stats->q_opackets[i] = txq->stats.packets; -@@ -633,9 +655,9 @@ mana_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -633,9 +659,9 @@ mana_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) if (!rxq) continue; @@ -44853,7 +56289,7 @@ index 43221e743e..896b53ed35 100644 /* There is no good way to get stats->imissed, not setting it */ -@@ -1238,7 +1260,7 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr, +@@ -1238,7 +1264,7 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr, /* Create a parent domain with the port number */ attr.pd = priv->ib_pd; attr.comp_mask = IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT; @@ -44862,7 +56298,19 @@ index 43221e743e..896b53ed35 100644 priv->ib_parent_pd = ibv_alloc_parent_domain(ctx, &attr); if (!priv->ib_parent_pd) { DRV_LOG(ERR, "ibv_alloc_parent_domain failed port %d", port); -@@ -1321,6 +1343,7 @@ failed: +@@ -1268,9 +1294,9 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr, + priv->max_mr = dev_attr->orig_attr.max_mr; + priv->max_mr_size = dev_attr->orig_attr.max_mr_size; + +- DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d", ++ DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d mr %" PRIu64, + name, priv->max_rx_queues, priv->max_rx_desc, +- priv->max_send_sge); ++ priv->max_send_sge, priv->max_mr_size); + + rte_eth_copy_pci_info(eth_dev, pci_dev); + +@@ -1321,6 +1347,7 @@ failed: /* * Goes through the IB device list to look for the IB port matching the * mac_addr. If found, create a rte_eth_dev for it. @@ -44870,7 +56318,7 @@ index 43221e743e..896b53ed35 100644 */ static int mana_pci_probe_mac(struct rte_pci_device *pci_dev, -@@ -1330,8 +1353,9 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, +@@ -1330,8 +1357,9 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, int ibv_idx; struct ibv_context *ctx; int num_devices; @@ -44881,7 +56329,7 @@ index 43221e743e..896b53ed35 100644 ibv_list = ibv_get_device_list(&num_devices); for (ibv_idx = 0; ibv_idx < num_devices; ibv_idx++) { -@@ -1361,6 +1385,12 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, +@@ -1361,6 +1389,12 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, ret = ibv_query_device_ex(ctx, NULL, &dev_attr); ibv_close_device(ctx); @@ -44894,7 +56342,7 @@ index 43221e743e..896b53ed35 100644 for (port = 1; port <= dev_attr.orig_attr.phys_port_cnt; port++) { struct rte_ether_addr addr; -@@ -1372,15 +1402,17 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, +@@ -1372,15 +1406,17 @@ mana_pci_probe_mac(struct rte_pci_device *pci_dev, continue; ret = mana_probe_port(ibdev, &dev_attr, port, pci_dev, &addr); @@ -44915,7 +56363,7 @@ index 43221e743e..896b53ed35 100644 } /* -@@ -1394,6 +1426,7 @@ mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, +@@ -1394,6 +1430,7 @@ mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct mana_conf conf = {0}; unsigned int i; int ret; @@ -44923,7 +56371,7 @@ index 43221e743e..896b53ed35 100644 if (args && args->drv_str) { ret = mana_parse_args(args, &conf); -@@ -1411,16 +1444,21 @@ mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, +@@ -1411,16 +1448,21 @@ mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, } /* If there are no driver parameters, probe on all ports */ @@ -44952,7 +56400,7 @@ index 43221e743e..896b53ed35 100644 } static int -@@ -1453,6 +1491,7 @@ mana_pci_remove(struct rte_pci_device *pci_dev) +@@ -1453,6 +1495,7 @@ mana_pci_remove(struct rte_pci_device *pci_dev) if (!mana_shared_data->primary_cnt) { DRV_LOG(DEBUG, "free shared memezone data"); rte_memzone_free(mana_shared_mz); @@ -44961,7 +56409,7 @@ index 43221e743e..896b53ed35 100644 rte_spinlock_unlock(&mana_shared_data_lock); diff --git a/dpdk/drivers/net/mana/mana.h b/dpdk/drivers/net/mana/mana.h -index 4a05238a96..f70a3e0b3d 100644 +index 4a05238a96..57576c62e4 100644 --- a/dpdk/drivers/net/mana/mana.h +++ b/dpdk/drivers/net/mana/mana.h @@ -50,6 +50,21 @@ struct mana_shared_data { @@ -45084,6 +56532,19 @@ index 4a05238a96..f70a3e0b3d 100644 int mana_start_rx_queues(struct rte_eth_dev *dev); int mana_start_tx_queues(struct rte_eth_dev *dev); +@@ -493,9 +517,9 @@ void mana_del_pmd_mr(struct mana_mr_cache *mr); + void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque, + struct rte_mempool_memhdr *memhdr, unsigned int idx); + +-struct mana_mr_cache *mana_mr_btree_lookup(struct mana_mr_btree *bt, +- uint16_t *idx, +- uintptr_t addr, size_t len); ++int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, ++ uintptr_t addr, size_t len, ++ struct mana_mr_cache **cache); + int mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry); + int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket); + void mana_mr_btree_free(struct mana_mr_btree *bt); diff --git a/dpdk/drivers/net/mana/meson.build b/dpdk/drivers/net/mana/meson.build index 493f0d26d4..2d72eca5a8 100644 --- a/dpdk/drivers/net/mana/meson.build @@ -45116,10 +56577,18 @@ index 92432c431d..738487f65a 100644 mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); if (!mr) { diff --git a/dpdk/drivers/net/mana/mr.c b/dpdk/drivers/net/mana/mr.c -index 22df0917bb..b8e6ea0bbf 100644 +index 22df0917bb..eb6d073a95 100644 --- a/dpdk/drivers/net/mana/mr.c +++ b/dpdk/drivers/net/mana/mr.c -@@ -47,23 +47,23 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, +@@ -40,30 +40,30 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + struct ibv_mr *ibv_mr; + struct mana_range ranges[pool->nb_mem_chunks]; + uint32_t i; +- struct mana_mr_cache *mr; ++ struct mana_mr_cache mr; + int ret; + + rte_mempool_mem_iter(pool, mana_mempool_chunk_cb, ranges); for (i = 0; i < pool->nb_mem_chunks; i++) { if (ranges[i].len > priv->max_mr_size) { @@ -45151,7 +56620,7 @@ index 22df0917bb..b8e6ea0bbf 100644 return ret; } continue; -@@ -72,8 +72,8 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, +@@ -72,35 +72,34 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, ibv_mr = ibv_reg_mr(priv->ib_pd, (void *)ranges[i].start, ranges[i].len, IBV_ACCESS_LOCAL_WRITE); if (ibv_mr) { @@ -45160,9 +56629,19 @@ index 22df0917bb..b8e6ea0bbf 100644 + DP_LOG(DEBUG, "MR lkey %u addr %p len %zu", + ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); - mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); - mr->lkey = ibv_mr->lkey; -@@ -86,7 +86,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, +- mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); +- mr->lkey = ibv_mr->lkey; +- mr->addr = (uintptr_t)ibv_mr->addr; +- mr->len = ibv_mr->length; +- mr->verb_obj = ibv_mr; ++ mr.lkey = ibv_mr->lkey; ++ mr.addr = (uintptr_t)ibv_mr->addr; ++ mr.len = ibv_mr->length; ++ mr.verb_obj = ibv_mr; + + rte_spinlock_lock(&priv->mr_btree_lock); +- ret = mana_mr_btree_insert(&priv->mr_btree, mr); ++ ret = mana_mr_btree_insert(&priv->mr_btree, &mr); rte_spinlock_unlock(&priv->mr_btree_lock); if (ret) { ibv_dereg_mr(ibv_mr); @@ -45171,7 +56650,9 @@ index 22df0917bb..b8e6ea0bbf 100644 return ret; } -@@ -95,12 +95,12 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, +- ret = mana_mr_btree_insert(local_tree, mr); ++ ret = mana_mr_btree_insert(local_tree, &mr); + if (ret) { /* Don't need to clean up MR as it's already * in the global tree */ @@ -45187,7 +56668,7 @@ index 22df0917bb..b8e6ea0bbf 100644 return -errno; } } -@@ -118,7 +118,7 @@ mana_del_pmd_mr(struct mana_mr_cache *mr) +@@ -118,7 +117,7 @@ mana_del_pmd_mr(struct mana_mr_cache *mr) ret = ibv_dereg_mr(ibv_mr); if (ret) @@ -45196,7 +56677,7 @@ index 22df0917bb..b8e6ea0bbf 100644 } /* -@@ -133,17 +133,16 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv, +@@ -133,50 +132,56 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv, struct mana_mr_cache *mr; uint16_t idx; @@ -45207,8 +56688,14 @@ index 22df0917bb..b8e6ea0bbf 100644 try_again: /* First try to find the MR in local queue tree */ - mr = mana_mr_btree_lookup(local_mr_btree, &idx, - (uintptr_t)mbuf->buf_addr, mbuf->buf_len); +- mr = mana_mr_btree_lookup(local_mr_btree, &idx, +- (uintptr_t)mbuf->buf_addr, mbuf->buf_len); ++ ret = mana_mr_btree_lookup(local_mr_btree, &idx, ++ (uintptr_t)mbuf->buf_addr, mbuf->buf_len, ++ &mr); ++ if (ret) ++ return NULL; ++ if (mr) { - DRV_LOG(DEBUG, - "Local mr lkey %u addr 0x%" PRIx64 " len %" PRIu64, @@ -45218,7 +56705,20 @@ index 22df0917bb..b8e6ea0bbf 100644 return mr; } -@@ -158,25 +157,25 @@ try_again: + /* If not found, try to find the MR in global tree */ + rte_spinlock_lock(&priv->mr_btree_lock); +- mr = mana_mr_btree_lookup(&priv->mr_btree, &idx, +- (uintptr_t)mbuf->buf_addr, +- mbuf->buf_len); ++ ret = mana_mr_btree_lookup(&priv->mr_btree, &idx, ++ (uintptr_t)mbuf->buf_addr, ++ mbuf->buf_len, &mr); + rte_spinlock_unlock(&priv->mr_btree_lock); + ++ if (ret) ++ return NULL; ++ + /* If found in the global tree, add it to the local tree */ if (mr) { ret = mana_mr_btree_insert(local_mr_btree, mr); if (ret) { @@ -45251,7 +56751,7 @@ index 22df0917bb..b8e6ea0bbf 100644 return NULL; } -@@ -215,11 +214,11 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n) +@@ -215,11 +220,11 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n) mem = rte_realloc_socket(bt->table, n * sizeof(struct mana_mr_cache), 0, bt->socket); if (!mem) { @@ -45265,9 +56765,45 @@ index 22df0917bb..b8e6ea0bbf 100644 bt->table = mem; bt->size = n; -@@ -266,9 +265,9 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, - if (addr + len <= table[base].addr + table[base].len) - return &table[base]; +@@ -229,22 +234,23 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n) + /* + * Look for a region of memory in MR cache. + */ +-struct mana_mr_cache * +-mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, +- uintptr_t addr, size_t len) ++int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, ++ uintptr_t addr, size_t len, ++ struct mana_mr_cache **cache) + { + struct mana_mr_cache *table; + uint16_t n; + uint16_t base = 0; + int ret; + +- n = bt->len; ++ *cache = NULL; + ++ n = bt->len; + /* Try to double the cache if it's full */ + if (n == bt->size) { + ret = mana_mr_btree_expand(bt, bt->size << 1); + if (ret) +- return NULL; ++ return ret; + } + + table = bt->table; +@@ -263,14 +269,16 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, + + *idx = base; + +- if (addr + len <= table[base].addr + table[base].len) +- return &table[base]; ++ if (addr + len <= table[base].addr + table[base].len) { ++ *cache = &table[base]; ++ return 0; ++ } - DRV_LOG(DEBUG, - "addr 0x%" PRIx64 " len %zu idx %u sum 0x%" PRIx64 " not found", @@ -45276,20 +56812,37 @@ index 22df0917bb..b8e6ea0bbf 100644 + "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found", + addr, len, *idx, addr + len); - return NULL; +- return NULL; ++ return 0; } -@@ -317,8 +316,8 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) + + int +@@ -315,14 +323,21 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) + struct mana_mr_cache *table; + uint16_t idx = 0; uint16_t shift; ++ int ret; ++ ++ ret = mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len, &table); ++ if (ret) ++ return ret; - if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { +- if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { - DRV_LOG(DEBUG, "Addr 0x%" PRIx64 " len %zu exists in btree", - entry->addr, entry->len); ++ if (table) { + DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree", + entry->addr, entry->len); return 0; } -@@ -332,17 +331,17 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) + if (bt->len >= bt->size) { ++ DP_LOG(ERR, "Btree overflow detected len %u size %u", ++ bt->len, bt->size); + bt->overflow = 1; + return -1; + } +@@ -332,17 +347,17 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) idx++; shift = (bt->len - idx) * sizeof(struct mana_mr_cache); if (shift) { @@ -45811,10 +57364,30 @@ index 300bf27cc1..3e255157f9 100644 return pkt_sent; diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c -index 1b1c1a652b..3b83a5a8bb 100644 +index 1b1c1a652b..86b821ac5c 100644 --- a/dpdk/drivers/net/memif/rte_eth_memif.c +++ b/dpdk/drivers/net/memif/rte_eth_memif.c -@@ -1240,6 +1240,7 @@ memif_dev_start(struct rte_eth_dev *dev) +@@ -261,8 +261,6 @@ memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_q + cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); + while (mq->last_tail != cur_tail) { + RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]); +- /* Decrement refcnt and free mbuf. (current segment) */ +- rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); + rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); + mq->last_tail++; + } +@@ -707,10 +705,6 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq + next_in_chain: + /* store pointer to mbuf to free it later */ + mq->buffers[slot & mask] = mbuf; +- /* Increment refcnt to make sure the buffer is not freed before server +- * receives it. (current segment) +- */ +- rte_mbuf_refcnt_update(mbuf, 1); + /* populate descriptor */ + d0 = &ring->desc[slot & mask]; + d0->length = rte_pktmbuf_data_len(mbuf); +@@ -1240,6 +1234,7 @@ memif_dev_start(struct rte_eth_dev *dev) { struct pmd_internals *pmd = dev->data->dev_private; int ret = 0; @@ -45822,7 +57395,7 @@ index 1b1c1a652b..3b83a5a8bb 100644 switch (pmd->role) { case MEMIF_ROLE_CLIENT: -@@ -1254,13 +1255,28 @@ memif_dev_start(struct rte_eth_dev *dev) +@@ -1254,13 +1249,28 @@ memif_dev_start(struct rte_eth_dev *dev) break; } @@ -45908,6 +57481,40 @@ index a54016f4a2..1389b606cc 100644 return 0; } +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr.h b/dpdk/drivers/net/mlx5/hws/mlx5dr.h +index f8de27c615..d570810e95 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr.h ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr.h +@@ -81,6 +81,7 @@ enum mlx5dr_action_aso_ct_flags { + }; + + enum mlx5dr_match_template_flags { ++ MLX5DR_MATCH_TEMPLATE_FLAG_NONE = 0, + /* Allow relaxed matching by skipping derived dependent match fields. */ + MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH = 1, + }; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c +index b0ae4e7693..4fb9a03d80 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c +@@ -1593,6 +1593,7 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) + case MLX5DR_ACTION_TYP_ASO_METER: + case MLX5DR_ACTION_TYP_ASO_CT: + case MLX5DR_ACTION_TYP_PUSH_VLAN: ++ case MLX5DR_ACTION_TYP_VPORT: + mlx5dr_action_destroy_stcs(action); + break; + case MLX5DR_ACTION_TYP_POP_VLAN: +@@ -1614,6 +1615,9 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) + mlx5dr_action_destroy_stcs(action); + mlx5dr_cmd_destroy_obj(action->reformat.arg_obj); + break; ++ default: ++ DR_LOG(ERR, "Not supported action type: %d", action->type); ++ assert(false); + } + } + diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c index 721376b8da..acad42e12e 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c @@ -45946,11 +57553,56 @@ index 721376b8da..acad42e12e 100644 devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!devx_obj->obj) { DR_LOG(ERR, "Failed to create header_modify_pattern"); +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c +index 76ada7bb7f..d1923a8e93 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c +@@ -210,6 +210,7 @@ struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx, + free_caps: + simple_free(ctx->caps); + free_ctx: ++ pthread_spin_destroy(&ctx->ctrl_lock); + simple_free(ctx); + return NULL; + } +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c +index 890a761c48..092a87921d 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c +@@ -129,7 +129,7 @@ mlx5dr_debug_dump_matcher_action_template(FILE *f, struct mlx5dr_matcher *matche + MLX5DR_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE, + (uint64_t)(uintptr_t)at, + (uint64_t)(uintptr_t)matcher, +- at->only_term ? 0 : 1, ++ at->only_term, + is_root ? 0 : at->num_of_action_stes, + at->num_actions); + if (ret < 0) { diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c -index 6b98eb8c96..6fc5d70f67 100644 +index 6b98eb8c96..bc1decbec9 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c -@@ -123,6 +123,7 @@ struct mlx5dr_definer_conv_data { +@@ -8,7 +8,7 @@ + #define BAD_PORT 0xBAD + #define ETH_TYPE_IPV4_VXLAN 0x0800 + #define ETH_TYPE_IPV6_VXLAN 0x86DD +-#define ETH_VXLAN_DEFAULT_PORT 4789 ++#define UDP_VXLAN_PORT 4789 + + #define STE_NO_VLAN 0x0 + #define STE_SVLAN 0x1 +@@ -31,6 +31,10 @@ + (bit_off))); \ + } while (0) + ++/* Getter function based on bit offset and mask, for 32bit DW*/ ++#define DR_GET_32(p, byte_off, bit_off, mask) \ ++ ((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask)) ++ + /* Setter function based on bit offset and mask */ + #define DR_SET(p, v, byte_off, bit_off, mask) \ + do { \ +@@ -123,6 +127,7 @@ struct mlx5dr_definer_conv_data { X(SET, ipv4_next_proto, v->next_proto_id, rte_ipv4_hdr) \ X(SET, ipv4_version, STE_IPV4, rte_ipv4_hdr) \ X(SET_BE16, ipv4_frag, v->fragment_offset, rte_ipv4_hdr) \ @@ -45958,7 +57610,71 @@ index 6b98eb8c96..6fc5d70f67 100644 X(SET_BE16, ipv6_payload_len, v->hdr.payload_len, rte_flow_item_ipv6) \ X(SET, ipv6_proto, v->hdr.proto, rte_flow_item_ipv6) \ X(SET, ipv6_hop_limits, v->hdr.hop_limits, rte_flow_item_ipv6) \ -@@ -542,8 +543,13 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd, +@@ -152,7 +157,7 @@ struct mlx5dr_definer_conv_data { + X(SET, gtp_ext_hdr_pdu, v->hdr.type, rte_flow_item_gtp_psc) \ + X(SET, gtp_ext_hdr_qfi, v->hdr.qfi, rte_flow_item_gtp_psc) \ + X(SET, vxlan_flags, v->flags, rte_flow_item_vxlan) \ +- X(SET, vxlan_udp_port, ETH_VXLAN_DEFAULT_PORT, rte_flow_item_vxlan) \ ++ X(SET, vxlan_udp_port, UDP_VXLAN_PORT, rte_flow_item_vxlan) \ + X(SET, source_qp, v->queue, mlx5_rte_flow_item_sq) \ + X(SET, tag, v->data, rte_flow_item_tag) \ + X(SET, metadata, v->data, rte_flow_item_meta) \ +@@ -162,7 +167,9 @@ struct mlx5dr_definer_conv_data { + X(SET_BE32, gre_opt_key, v->key.key, rte_flow_item_gre_opt) \ + X(SET_BE32, gre_opt_seq, v->sequence.sequence, rte_flow_item_gre_opt) \ + X(SET_BE16, gre_opt_checksum, v->checksum_rsvd.checksum, rte_flow_item_gre_opt) \ +- X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) ++ X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) \ ++ X(SET, cvlan, STE_CVLAN, rte_flow_item_vlan) \ ++ X(SET_BE16, inner_type, v->inner_type, rte_flow_item_vlan) + + /* Item set function format */ + #define X(set_type, func_name, value, item_type) \ +@@ -268,7 +275,7 @@ mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc, + { + bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I); + const struct rte_flow_item_integrity *v = item_spec; +- uint32_t ok1_bits = 0; ++ uint32_t ok1_bits = DR_GET_32(tag, fc->byte_off, fc->bit_off, fc->bit_mask); + + if (v->l3_ok) + ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) | +@@ -476,6 +483,15 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, + struct mlx5dr_definer_fc *fc; + bool inner = cd->tunnel; + ++ if (!cd->relaxed) { ++ /* Mark packet as tagged (CVLAN) */ ++ fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; ++ fc->item_idx = item_idx; ++ fc->tag_mask_set = &mlx5dr_definer_ones_set; ++ fc->tag_set = &mlx5dr_definer_cvlan_set; ++ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner); ++ } ++ + if (!m) + return 0; + +@@ -484,8 +500,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, + return rte_errno; + } + +- if (!cd->relaxed || m->has_more_vlan) { +- /* Mark packet as tagged (CVLAN or SVLAN) even if TCI is not specified.*/ ++ if (m->has_more_vlan) { + fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; + fc->item_idx = item_idx; + fc->tag_mask_set = &mlx5dr_definer_ones_set; +@@ -503,7 +518,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, + if (m->inner_type) { + fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)]; + fc->item_idx = item_idx; +- fc->tag_set = &mlx5dr_definer_eth_type_set; ++ fc->tag_set = &mlx5dr_definer_inner_type_set; + DR_CALC_SET(fc, eth_l2, l3_ethertype, inner); + } + +@@ -542,8 +557,13 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd, if (m->fragment_offset) { fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)]; fc->item_idx = item_idx; @@ -45974,7 +57690,70 @@ index 6b98eb8c96..6fc5d70f67 100644 } if (m->next_proto_id) { -@@ -1322,7 +1328,6 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd, +@@ -818,6 +838,12 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, + const struct rte_flow_item_gtp *m = item->mask; + struct mlx5dr_definer_fc *fc; + ++ if (cd->tunnel) { ++ DR_LOG(ERR, "Inner GTPU item not supported"); ++ rte_errno = ENOTSUP; ++ return rte_errno; ++ } ++ + /* Overwrite GTPU dest port if not present */ + fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; + if (!fc->tag_set && !cd->relaxed) { +@@ -990,9 +1016,20 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, + struct mlx5dr_definer_fc *fc; + bool inner = cd->tunnel; + +- /* In order to match on VXLAN we must match on ether_type, ip_protocol +- * and l4_dport. +- */ ++ if (m && (m->rsvd0[0] != 0 || m->rsvd0[1] != 0 || m->rsvd0[2] != 0 || ++ m->rsvd1 != 0)) { ++ DR_LOG(ERR, "reserved fields are not supported"); ++ rte_errno = ENOTSUP; ++ return rte_errno; ++ } ++ ++ if (inner) { ++ DR_LOG(ERR, "Inner VXLAN item not supported"); ++ rte_errno = ENOTSUP; ++ return rte_errno; ++ } ++ ++ /* In order to match on VXLAN we must match on ip_protocol and l4_dport */ + if (!cd->relaxed) { + fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)]; + if (!fc->tag_set) { +@@ -1015,12 +1052,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, + return 0; + + if (m->flags) { +- if (inner) { +- DR_LOG(ERR, "Inner VXLAN flags item not supported"); +- rte_errno = ENOTSUP; +- return rte_errno; +- } +- + fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_FLAGS]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_vxlan_flags_set; +@@ -1030,12 +1061,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, + } + + if (!is_mem_zero(m->vni, 3)) { +- if (inner) { +- DR_LOG(ERR, "Inner VXLAN vni item not supported"); +- rte_errno = ENOTSUP; +- return rte_errno; +- } +- + fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_VNI]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_vxlan_vni_set; +@@ -1322,7 +1347,6 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd, { const struct rte_flow_item_integrity *m = item->mask; struct mlx5dr_definer_fc *fc; @@ -45982,7 +57761,7 @@ index 6b98eb8c96..6fc5d70f67 100644 if (!m) return 0; -@@ -1333,7 +1338,7 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd, +@@ -1333,7 +1357,7 @@ mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd, } if (m->l3_ok || m->ipv4_csum_ok || m->l4_ok || m->l4_csum_ok) { @@ -45991,7 +57770,7 @@ index 6b98eb8c96..6fc5d70f67 100644 fc->item_idx = item_idx; fc->tag_set = &mlx5dr_definer_integrity_set; DR_CALC_SET_HDR(fc, oks1, oks1_bits); -@@ -1563,8 +1568,7 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx, +@@ -1563,8 +1587,7 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx, break; case RTE_FLOW_ITEM_TYPE_INTEGRITY: ret = mlx5dr_definer_conv_item_integrity(&cd, items, i); @@ -46001,7 +57780,7 @@ index 6b98eb8c96..6fc5d70f67 100644 break; case RTE_FLOW_ITEM_TYPE_CONNTRACK: ret = mlx5dr_definer_conv_item_conntrack(&cd, items, i); -@@ -1629,11 +1633,15 @@ mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer, +@@ -1629,11 +1652,15 @@ mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer, uint32_t *tag_byte_off) { uint8_t byte_offset; @@ -46019,6 +57798,24 @@ index 6b98eb8c96..6fc5d70f67 100644 if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) { *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1); return 0; +@@ -1815,7 +1842,7 @@ mlx5dr_definer_find_best_hl_fit(struct mlx5dr_context *ctx, + return 0; + } + +- DR_LOG(ERR, "Unable to find supporting match/jumbo definer combination"); ++ DR_LOG(DEBUG, "Unable to find supporting match/jumbo definer combination"); + rte_errno = ENOTSUP; + return rte_errno; + } +@@ -1907,7 +1934,7 @@ int mlx5dr_definer_get(struct mlx5dr_context *ctx, + /* Convert items to hl and allocate the field copy array (fc) */ + ret = mlx5dr_definer_conv_items_to_hl(ctx, mt, hl); + if (ret) { +- DR_LOG(ERR, "Failed to convert items to hl"); ++ DR_LOG(DEBUG, "Failed to convert items to hl"); + goto free_hl; + } + diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h index d52c6b0627..5b38a54e6b 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.h @@ -46039,6 +57836,36 @@ index d52c6b0627..5b38a54e6b 100644 u8 ipv4_total_length[0x10]; u8 checksum[0x10]; u8 reserved_at_60[0xc]; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c +index 2e444c1179..e89ebb657a 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c +@@ -739,6 +739,13 @@ static int mlx5dr_matcher_init_root(struct mlx5dr_matcher *matcher) + return rte_errno; + } + ++ ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id); ++ if (ret) { ++ DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name); ++ rte_errno = EINVAL; ++ return rte_errno; ++ } ++ + mask = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) + + offsetof(struct mlx5dv_flow_match_parameters, match_buf)); + if (!mask) { +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h +index d9353e9a3e..0a1257f98c 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h +@@ -31,7 +31,6 @@ struct mlx5dr_pat_cached_pattern { + enum mlx5dr_action_type type; + struct { + struct mlx5dr_devx_obj *pattern_obj; +- struct dr_icm_chunk *chunk; + uint8_t *data; + uint16_t num_of_actions; + } mh_data; diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_pool.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_pool.c index fdbd3d438d..af6a5c743b 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_pool.c @@ -46057,6 +57884,24 @@ index fdbd3d438d..af6a5c743b 100644 pool->p_db_uninit = &mlx5dr_pool_general_element_db_uninit; pool->p_get_chunk = &mlx5dr_pool_general_element_db_get_chunk; pool->p_put_chunk = &mlx5dr_pool_general_element_db_put_chunk; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c +index b27318e6d4..00b6c49b88 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c +@@ -386,6 +386,13 @@ static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule, + uint8_t match_criteria; + int ret; + ++ ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id); ++ if (ret) { ++ DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name); ++ rte_errno = EINVAL; ++ return rte_errno; ++ } ++ + attr = simple_calloc(num_actions, sizeof(*attr)); + if (!attr) { + rte_errno = ENOMEM; diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c index 5c8bbe6fc6..a8aba31cbe 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c @@ -46072,8 +57917,21 @@ index 5c8bbe6fc6..a8aba31cbe 100644 } return 0; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h +index 8d4769495d..28efd70c64 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h +@@ -192,8 +192,6 @@ struct mlx5dr_send_ste_attr { + * value to write in CPU endian format. + * @param addr + * Address to write to. +- * @param lock +- * Address of the lock to use for that UAR access. + */ + static __rte_always_inline void + mlx5dr_uar_write64_relaxed(uint64_t val, void *addr) diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c -index 72268c0c8a..dd5a0c546d 100644 +index 72268c0c8a..1d999ef66b 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c @@ -28,6 +28,7 @@ @@ -46084,6 +57942,15 @@ index 72268c0c8a..dd5a0c546d 100644 #include #include #include +@@ -670,7 +671,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + ifr.ifr_data = (void *)ðpause; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { +- DRV_LOG(WARNING, ++ DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" + " %s", + dev->data->port_id, strerror(rte_errno)); @@ -745,6 +746,7 @@ mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) for (i = 0; i < sh->max_port; ++i) { @@ -46189,7 +58056,377 @@ index 72268c0c8a..dd5a0c546d 100644 fclose(file); } file = fopen(phys_switch_id, "rb"); -@@ -1776,3 +1807,70 @@ exit: +@@ -1255,13 +1286,17 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + struct ifreq ifr; +- unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); ++ unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd); ++ unsigned int stats_sz = max_stats_n * sizeof(uint64_t); + unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; + struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; ++ uint16_t i_idx, o_idx; ++ uint32_t total_stats = xstats_n; + + et_stats->cmd = ETHTOOL_GSTATS; +- et_stats->n_stats = xstats_ctrl->stats_n; ++ /* Pass the maximum value, the driver may ignore this. */ ++ et_stats->n_stats = max_stats_n; + ifr.ifr_data = (caddr_t)et_stats; + if (pf >= 0) + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, +@@ -1274,21 +1309,34 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + dev->data->port_id); + return ret; + } +- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { +- if (xstats_ctrl->info[i].dev) +- continue; +- stats[i] += (uint64_t) +- et_stats->data[xstats_ctrl->dev_table_idx[i]]; ++ if (pf <= 0) { ++ for (i = 0; i != total_stats; i++) { ++ i_idx = xstats_ctrl->dev_table_idx[i]; ++ o_idx = xstats_ctrl->xstats_o_idx[i]; ++ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) ++ continue; ++ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; ++ } ++ } else { ++ for (i = 0; i != total_stats; i++) { ++ i_idx = xstats_ctrl->dev_table_idx_2nd[i]; ++ o_idx = xstats_ctrl->xstats_o_idx_2nd[i]; ++ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) ++ continue; ++ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; ++ } + } + return 0; + } + +-/** ++/* + * Read device counters. + * + * @param dev + * Pointer to Ethernet device. +- * @param[out] stats ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param stats + * Counters table output buffer. + * + * @return +@@ -1296,7 +1344,7 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + * rte_errno is set. + */ + int +-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) ++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; +@@ -1304,7 +1352,7 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + + memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); + /* Read ifreq counters. */ +- if (priv->master && priv->pf_bond >= 0) { ++ if (bond_master) { + /* Sum xstats from bonding device member ports. */ + for (i = 0; i < priv->sh->bond.n_port; i++) { + ret = _mlx5_os_read_dev_counters(dev, i, stats); +@@ -1316,13 +1364,17 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + if (ret) + return ret; + } +- /* Read IB counters. */ +- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { ++ /* ++ * Read IB dev counters. ++ * The counters are unique per IB device but not per netdev IF. ++ * In bonding mode, getting the stats name only from 1 port is enough. ++ */ ++ for (i = xstats_ctrl->dev_cnt_start; i < xstats_ctrl->mlx5_stats_n; i++) { + if (!xstats_ctrl->info[i].dev) + continue; + /* return last xstats counter if fail to read. */ + if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, +- &stats[i]) == 0) ++ &stats[i]) == 0) + xstats_ctrl->xstats[i] = stats[i]; + else + stats[i] = xstats_ctrl->xstats[i]; +@@ -1330,18 +1382,24 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + return ret; + } + +-/** ++/* + * Query the number of statistics provided by ETHTOOL. + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param n_stats ++ * Pointer to number of stats to store. ++ * @param n_stats_sec ++ * Pointer to number of stats to store for the 2nd port of the bond. + * + * @return +- * Number of statistics on success, negative errno value otherwise and +- * rte_errno is set. ++ * 0 on success, negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_os_get_stats_n(struct rte_eth_dev *dev) ++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct ethtool_drvinfo drvinfo; +@@ -1350,18 +1408,34 @@ mlx5_os_get_stats_n(struct rte_eth_dev *dev) + + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t)&drvinfo; +- if (priv->master && priv->pf_bond >= 0) +- /* Bonding PF. */ ++ /* Bonding PFs. */ ++ if (bond_master) { + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, + SIOCETHTOOL, &ifr); +- else ++ if (ret) { ++ DRV_LOG(WARNING, "bonding port %u unable to query number of" ++ " statistics for the 1st slave, %d", PORT_ID(priv), ret); ++ return ret; ++ } ++ *n_stats = drvinfo.n_stats; ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, ++ SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "bonding port %u unable to query number of" ++ " statistics for the 2nd slave, %d", PORT_ID(priv), ret); ++ return ret; ++ } ++ *n_stats_sec = drvinfo.n_stats; ++ } else { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); +- if (ret) { +- DRV_LOG(WARNING, "port %u unable to query number of statistics", +- dev->data->port_id); +- return ret; ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to query number of statistics", ++ PORT_ID(priv)); ++ return ret; ++ } ++ *n_stats = drvinfo.n_stats; + } +- return drvinfo.n_stats; ++ return 0; + } + + static const struct mlx5_counter_ctrl mlx5_counters_init[] = { +@@ -1545,7 +1619,104 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { + }, + }; + +-static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); ++const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); ++ ++static int ++mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master, ++ struct ethtool_gstrings *strings, ++ uint32_t stats_n, uint32_t stats_n_2nd) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ struct ifreq ifr; ++ int ret; ++ uint32_t i, j, idx; ++ ++ /* Ensure no out of bounds access before. */ ++ MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS); ++ strings->cmd = ETHTOOL_GSTRINGS; ++ strings->string_set = ETH_SS_STATS; ++ strings->len = stats_n; ++ ifr.ifr_data = (caddr_t)strings; ++ if (bond_master) ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, ++ SIOCETHTOOL, &ifr); ++ else ++ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to get statistic names with %d", ++ PORT_ID(priv), ret); ++ return ret; ++ } ++ /* Reorganize the orders to reduce the iterations. */ ++ for (j = 0; j < xstats_n; j++) { ++ xstats_ctrl->dev_table_idx[j] = UINT16_MAX; ++ for (i = 0; i < stats_n; i++) { ++ const char *curr_string = ++ (const char *)&strings->data[i * ETH_GSTRING_LEN]; ++ ++ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->dev_table_idx[j] = i; ++ xstats_ctrl->xstats_o_idx[j] = idx; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ } ++ } ++ } ++ if (!bond_master) { ++ /* Add dev counters, unique per IB device. */ ++ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; ++ for (j = 0; j != xstats_n; j++) { ++ if (mlx5_counters_init[j].dev) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ xstats_ctrl->hw_stats[idx] = 0; ++ } ++ } ++ return 0; ++ } ++ ++ strings->len = stats_n_2nd; ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, ++ SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d", ++ PORT_ID(priv), ret); ++ return ret; ++ } ++ /* The 2nd slave port may have a different strings set, based on the configuration. */ ++ for (j = 0; j != xstats_n; j++) { ++ xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX; ++ for (i = 0; i != stats_n_2nd; i++) { ++ const char *curr_string = ++ (const char *)&strings->data[i * ETH_GSTRING_LEN]; ++ ++ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { ++ xstats_ctrl->dev_table_idx_2nd[j] = i; ++ if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) { ++ /* Already mapped in the 1st slave port. */ ++ idx = xstats_ctrl->xstats_o_idx[j]; ++ xstats_ctrl->xstats_o_idx_2nd[j] = idx; ++ } else { ++ /* Append the new items to the end of the map. */ ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->xstats_o_idx_2nd[j] = idx; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ } ++ } ++ } ++ } ++ /* Dev counters are always at the last now. */ ++ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; ++ for (j = 0; j != xstats_n; j++) { ++ if (mlx5_counters_init[j].dev) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ xstats_ctrl->hw_stats[idx] = 0; ++ } ++ } ++ return 0; ++} + + /** + * Init the structures to read device counters. +@@ -1559,76 +1730,44 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; +- unsigned int i; +- unsigned int j; +- struct ifreq ifr; + struct ethtool_gstrings *strings = NULL; +- unsigned int dev_stats_n; ++ uint16_t dev_stats_n = 0; ++ uint16_t dev_stats_n_2nd = 0; ++ unsigned int max_stats_n; + unsigned int str_sz; + int ret; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + + /* So that it won't aggregate for each init. */ + xstats_ctrl->mlx5_stats_n = 0; +- ret = mlx5_os_get_stats_n(dev); ++ ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd); + if (ret < 0) { + DRV_LOG(WARNING, "port %u no extended statistics available", + dev->data->port_id); + return; + } +- dev_stats_n = ret; ++ max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd); + /* Allocate memory to grab stat names and values. */ +- str_sz = dev_stats_n * ETH_GSTRING_LEN; ++ str_sz = max_stats_n * ETH_GSTRING_LEN; + strings = (struct ethtool_gstrings *) + mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, + SOCKET_ID_ANY); + if (!strings) { + DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", +- dev->data->port_id); ++ dev->data->port_id); + return; + } +- strings->cmd = ETHTOOL_GSTRINGS; +- strings->string_set = ETH_SS_STATS; +- strings->len = dev_stats_n; +- ifr.ifr_data = (caddr_t)strings; +- if (priv->master && priv->pf_bond >= 0) +- /* Bonding master. */ +- ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, +- SIOCETHTOOL, &ifr); +- else +- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); +- if (ret) { +- DRV_LOG(WARNING, "port %u unable to get statistic names", ++ ret = mlx5_os_get_stats_strings(dev, bond_master, strings, ++ dev_stats_n, dev_stats_n_2nd); ++ if (ret < 0) { ++ DRV_LOG(WARNING, "port %u failed to get the stats strings", + dev->data->port_id); + goto free; + } +- for (i = 0; i != dev_stats_n; ++i) { +- const char *curr_string = (const char *) +- &strings->data[i * ETH_GSTRING_LEN]; +- +- for (j = 0; j != xstats_n; ++j) { +- if (!strcmp(mlx5_counters_init[j].ctr_name, +- curr_string)) { +- unsigned int idx = xstats_ctrl->mlx5_stats_n++; +- +- xstats_ctrl->dev_table_idx[idx] = i; +- xstats_ctrl->info[idx] = mlx5_counters_init[j]; +- break; +- } +- } +- } +- /* Add dev counters. */ +- MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); +- for (i = 0; i != xstats_n; ++i) { +- if (mlx5_counters_init[i].dev) { +- unsigned int idx = xstats_ctrl->mlx5_stats_n++; +- +- xstats_ctrl->info[idx] = mlx5_counters_init[i]; +- xstats_ctrl->hw_stats[idx] = 0; +- } +- } + xstats_ctrl->stats_n = dev_stats_n; ++ xstats_ctrl->stats_n_2nd = dev_stats_n_2nd; + /* Copy to base at first time. */ +- ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base); ++ ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base); + if (ret) + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); +@@ -1776,3 +1915,70 @@ exit: mlx5_free(sset_info); return ret; } @@ -46331,10 +58568,30 @@ index ed71289322..3f7a94c9ee 100644 + #endif /* RTE_PMD_MLX5_FLOW_OS_H_ */ diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -index a71474c90a..28bf7211e4 100644 +index a71474c90a..b88ae631d9 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -@@ -474,6 +474,10 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) +@@ -455,15 +455,16 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv) + * Routine checks the reference counter and does actual + * resources creation/initialization only if counter is zero. + * +- * @param[in] priv +- * Pointer to the private device data structure. ++ * @param[in] eth_dev ++ * Pointer to the device. + * + * @return + * Zero on success, positive error code otherwise. + */ + static int +-mlx5_alloc_shared_dr(struct mlx5_priv *priv) ++mlx5_alloc_shared_dr(struct rte_eth_dev *eth_dev) + { ++ struct mlx5_priv *priv = eth_dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + char s[MLX5_NAME_SIZE] __rte_unused; + int err; +@@ -474,6 +475,10 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) err = mlx5_alloc_table_hash_list(priv); if (err) goto error; @@ -46345,7 +58602,52 @@ index a71474c90a..28bf7211e4 100644 if (priv->sh->config.dv_flow_en == 2) return 0; /* The resources below are only valid with DV support. */ -@@ -597,10 +601,6 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) +@@ -571,6 +576,44 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) + err = errno; + goto error; + } ++ ++ if (sh->config.dv_flow_en == 1) { ++ /* Query availability of metadata reg_c's. */ ++ if (!priv->sh->metadata_regc_check_flag) { ++ err = mlx5_flow_discover_mreg_c(eth_dev); ++ if (err < 0) { ++ err = -err; ++ goto error; ++ } ++ } ++ if (!mlx5_flow_ext_mreg_supported(eth_dev)) { ++ DRV_LOG(DEBUG, ++ "port %u extensive metadata register is not supported", ++ eth_dev->data->port_id); ++ if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { ++ DRV_LOG(ERR, "metadata mode %u is not supported " ++ "(no metadata registers available)", ++ sh->config.dv_xmeta_en); ++ err = ENOTSUP; ++ goto error; ++ } ++ } ++ if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && ++ mlx5_flow_ext_mreg_supported(eth_dev) && sh->dv_regc0_mask) { ++ sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, ++ MLX5_FLOW_MREG_HTABLE_SZ, ++ false, true, eth_dev, ++ flow_dv_mreg_create_cb, ++ flow_dv_mreg_match_cb, ++ flow_dv_mreg_remove_cb, ++ flow_dv_mreg_clone_cb, ++ flow_dv_mreg_clone_free_cb); ++ if (!sh->mreg_cp_tbl) { ++ err = ENOMEM; ++ goto error; ++ } ++ } ++ } + #endif + if (!sh->tunnel_hub && sh->config.dv_miss_info) + err = mlx5_alloc_tunnel_hub(sh); +@@ -597,10 +640,6 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) __mlx5_discovery_misc5_cap(priv); #endif /* HAVE_MLX5DV_DR */ @@ -46356,7 +58658,29 @@ index a71474c90a..28bf7211e4 100644 LIST_INIT(&sh->shared_rxqs); return 0; error: -@@ -873,10 +873,10 @@ mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused) +@@ -659,6 +698,10 @@ error: + mlx5_list_destroy(sh->dest_array_list); + sh->dest_array_list = NULL; + } ++ if (sh->mreg_cp_tbl) { ++ mlx5_hlist_destroy(sh->mreg_cp_tbl); ++ sh->mreg_cp_tbl = NULL; ++ } + return err; + } + +@@ -751,6 +794,10 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv) + mlx5_list_destroy(sh->dest_array_list); + sh->dest_array_list = NULL; + } ++ if (sh->mreg_cp_tbl) { ++ mlx5_hlist_destroy(sh->mreg_cp_tbl); ++ sh->mreg_cp_tbl = NULL; ++ } + } + + /** +@@ -873,10 +920,10 @@ mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused) */ if (!priv->sh->drop_action_check_flag) { if (!mlx5_flow_discover_dr_action_support(dev)) @@ -46369,7 +58693,35 @@ index a71474c90a..28bf7211e4 100644 priv->root_drop_action = priv->sh->dr_drop_action; else priv->root_drop_action = priv->drop_queue.hrxq->action; -@@ -1613,6 +1613,23 @@ err_secondary: +@@ -1508,13 +1555,6 @@ err_secondary: + } + /* Create context for virtual machine VLAN workaround. */ + priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); +- if (sh->config.dv_flow_en) { +- err = mlx5_alloc_shared_dr(priv); +- if (err) +- goto error; +- if (mlx5_flex_item_port_init(eth_dev) < 0) +- goto error; +- } + if (mlx5_devx_obj_ops_en(sh)) { + priv->obj_ops = devx_obj_ops; + mlx5_queue_counter_id_prepare(eth_dev); +@@ -1565,6 +1605,13 @@ err_secondary: + goto error; + } + rte_rwlock_init(&priv->ind_tbls_lock); ++ if (sh->config.dv_flow_en) { ++ err = mlx5_alloc_shared_dr(eth_dev); ++ if (err) ++ goto error; ++ if (mlx5_flex_item_port_init(eth_dev) < 0) ++ goto error; ++ } + if (priv->sh->config.dv_flow_en == 2) { + #ifdef HAVE_MLX5_HWS_SUPPORT + if (priv->sh->config.dv_esw_en) { +@@ -1613,6 +1660,23 @@ err_secondary: err = EINVAL; goto error; } @@ -46393,8 +58745,88 @@ index a71474c90a..28bf7211e4 100644 return eth_dev; #else DRV_LOG(ERR, "DV support is missing for HWS."); +@@ -1631,43 +1695,6 @@ err_secondary: + err = -err; + goto error; + } +- /* Query availability of metadata reg_c's. */ +- if (!priv->sh->metadata_regc_check_flag) { +- err = mlx5_flow_discover_mreg_c(eth_dev); +- if (err < 0) { +- err = -err; +- goto error; +- } +- } +- if (!mlx5_flow_ext_mreg_supported(eth_dev)) { +- DRV_LOG(DEBUG, +- "port %u extensive metadata register is not supported", +- eth_dev->data->port_id); +- if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { +- DRV_LOG(ERR, "metadata mode %u is not supported " +- "(no metadata registers available)", +- sh->config.dv_xmeta_en); +- err = ENOTSUP; +- goto error; +- } +- } +- if (sh->config.dv_flow_en && +- sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && +- mlx5_flow_ext_mreg_supported(eth_dev) && +- priv->sh->dv_regc0_mask) { +- priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, +- MLX5_FLOW_MREG_HTABLE_SZ, +- false, true, eth_dev, +- flow_dv_mreg_create_cb, +- flow_dv_mreg_match_cb, +- flow_dv_mreg_remove_cb, +- flow_dv_mreg_clone_cb, +- flow_dv_mreg_clone_free_cb); +- if (!priv->mreg_cp_tbl) { +- err = ENOMEM; +- goto error; +- } +- } + rte_spinlock_init(&priv->shared_act_sl); + mlx5_flow_counter_mode_config(eth_dev); + mlx5_flow_drop_action_config(eth_dev); +@@ -1686,8 +1713,6 @@ error: + priv->sh->config.dv_esw_en) + flow_hw_destroy_vport_action(eth_dev); + #endif +- if (priv->mreg_cp_tbl) +- mlx5_hlist_destroy(priv->mreg_cp_tbl); + if (priv->sh) + mlx5_os_free_shared_dr(priv); + if (priv->nl_socket_route >= 0) +@@ -2145,8 +2170,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, + list[ns].info.master = 0; + list[ns].info.representor = 0; + } +- if (list[ns].info.port_name == bd) +- ns++; ++ ns++; + break; + case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: + /* Fallthrough */ +@@ -2665,9 +2689,15 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, + + if (priv->sh) { + if (priv->q_counters != NULL && +- strcmp(ctr_name, "out_of_buffer") == 0) ++ strcmp(ctr_name, "out_of_buffer") == 0) { ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ DRV_LOG(WARNING, "Devx out_of_buffer counter is not supported in the secondary process"); ++ rte_errno = ENOTSUP; ++ return 1; ++ } + return mlx5_devx_cmd_queue_counter_query + (priv->q_counters, 0, (uint32_t *)stat); ++ } + MKSTR(path, "%s/ports/%d/hw_counters/%s", + priv->sh->ibdev_path, + priv->dev_port, diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c -index e55be8720e..1a5f95b22b 100644 +index e55be8720e..d66254740b 100644 --- a/dpdk/drivers/net/mlx5/mlx5.c +++ b/dpdk/drivers/net/mlx5/mlx5.c @@ -241,7 +241,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { @@ -46460,7 +58892,49 @@ index e55be8720e..1a5f95b22b 100644 mlx5_free(dev->process_private); dev->process_private = NULL; } -@@ -2473,6 +2494,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, +@@ -2037,6 +2058,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) + mlx5_flex_item_port_cleanup(dev); + #ifdef HAVE_MLX5_HWS_SUPPORT + flow_hw_destroy_vport_action(dev); ++ /* dr context will be closed after mlx5_os_free_shared_dr. */ + flow_hw_resource_release(dev); + flow_hw_clear_port_info(dev); + if (priv->sh->config.dv_flow_en == 2) { +@@ -2053,7 +2075,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) + mlx5_free(priv->rxq_privs); + priv->rxq_privs = NULL; + } +- if (priv->txqs != NULL) { ++ if (priv->txqs != NULL && dev->data->tx_queues != NULL) { + /* XXX race condition if mlx5_tx_burst() is still running. */ + rte_delay_us_sleep(1000); + for (i = 0; (i != priv->txqs_n); ++i) +@@ -2062,16 +2084,20 @@ mlx5_dev_close(struct rte_eth_dev *dev) + priv->txqs = NULL; + } + mlx5_proc_priv_uninit(dev); ++ if (priv->drop_queue.hrxq) ++ mlx5_drop_action_destroy(dev); + if (priv->q_counters) { + mlx5_devx_cmd_destroy(priv->q_counters); + priv->q_counters = NULL; + } +- if (priv->drop_queue.hrxq) +- mlx5_drop_action_destroy(dev); +- if (priv->mreg_cp_tbl) +- mlx5_hlist_destroy(priv->mreg_cp_tbl); + mlx5_mprq_free_mp(dev); + mlx5_os_free_shared_dr(priv); ++#ifdef HAVE_MLX5_HWS_SUPPORT ++ if (priv->dr_ctx) { ++ claim_zero(mlx5dr_context_close(priv->dr_ctx)); ++ priv->dr_ctx = NULL; ++ } ++#endif + if (priv->rss_conf.rss_key != NULL) + mlx5_free(priv->rss_conf.rss_key); + if (priv->reta_idx != NULL) +@@ -2473,6 +2499,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; @@ -46469,10 +58943,41 @@ index e55be8720e..1a5f95b22b 100644 config->std_delay_drop = 0; config->hp_delay_drop = 0; diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h -index 31982002ee..96a269ccd0 100644 +index 31982002ee..5d826527b2 100644 --- a/dpdk/drivers/net/mlx5/mlx5.h +++ b/dpdk/drivers/net/mlx5/mlx5.h -@@ -1245,7 +1245,7 @@ struct mlx5_aso_ct_action { +@@ -234,16 +234,29 @@ struct mlx5_counter_ctrl { + struct mlx5_xstats_ctrl { + /* Number of device stats. */ + uint16_t stats_n; ++ /* Number of device stats, for the 2nd port in bond. */ ++ uint16_t stats_n_2nd; + /* Number of device stats identified by PMD. */ +- uint16_t mlx5_stats_n; ++ uint16_t mlx5_stats_n; ++ /* First device counters index. */ ++ uint16_t dev_cnt_start; + /* Index in the device counters table. */ + uint16_t dev_table_idx[MLX5_MAX_XSTATS]; ++ /* Index in the output table. */ ++ uint16_t xstats_o_idx[MLX5_MAX_XSTATS]; + uint64_t base[MLX5_MAX_XSTATS]; + uint64_t xstats[MLX5_MAX_XSTATS]; + uint64_t hw_stats[MLX5_MAX_XSTATS]; + struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; ++ /* Index in the device counters table, for the 2nd port in bond. */ ++ uint16_t dev_table_idx_2nd[MLX5_MAX_XSTATS]; ++ /* Index in the output table, for the 2nd port in bond. */ ++ uint16_t xstats_o_idx_2nd[MLX5_MAX_XSTATS]; + }; + ++/* xstats array size. */ ++extern const unsigned int xstats_n; ++ + struct mlx5_stats_ctrl { + /* Base for imissed counter. */ + uint64_t imissed_base; +@@ -1245,7 +1258,7 @@ struct mlx5_aso_ct_action { /* General action object for reply dir. */ void *dr_action_rply; uint32_t refcnt; /* Action used count in device flows. */ @@ -46481,7 +58986,7 @@ index 31982002ee..96a269ccd0 100644 uint16_t peer; /* The only peer port index could also use this CT. */ enum mlx5_aso_ct_state state; /* ASO CT state. */ bool is_original; /* The direction of the DR action to be used. */ -@@ -1367,7 +1367,7 @@ struct mlx5_dev_ctx_shared { +@@ -1367,7 +1380,7 @@ struct mlx5_dev_ctx_shared { uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */ uint32_t tunnel_header_2_3:1; /* tunnel_header_2_3 is supported. */ uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */ @@ -46490,7 +58995,16 @@ index 31982002ee..96a269ccd0 100644 uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */ uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */ uint32_t metadata_regc_check_flag:1; /* Check Flag for metadata REGC. */ -@@ -1455,6 +1455,8 @@ struct mlx5_dev_ctx_shared { +@@ -1404,6 +1417,8 @@ struct mlx5_dev_ctx_shared { + struct mlx5_hlist *flow_tbls; /* SWS flow table. */ + struct mlx5_hlist *groups; /* HWS flow group. */ + }; ++ struct mlx5_hlist *mreg_cp_tbl; ++ /* Hash table of Rx metadata register copy table. */ + struct mlx5_flow_tunnel_hub *tunnel_hub; + /* Direct Rules tables for FDB, NIC TX+RX */ + void *dr_drop_action; /* Pointer to DR drop action, any domain. */ +@@ -1455,6 +1470,8 @@ struct mlx5_dev_ctx_shared { uint32_t host_shaper_rate:8; uint32_t lwm_triggered:1; struct mlx5_hws_cnt_svc_mng *cnt_svc; @@ -46499,7 +59013,7 @@ index 31982002ee..96a269ccd0 100644 struct mlx5_dev_shared_port port[]; /* per device port data array. */ }; -@@ -1463,6 +1465,8 @@ struct mlx5_dev_ctx_shared { +@@ -1463,6 +1480,8 @@ struct mlx5_dev_ctx_shared { * Caution, secondary process may rebuild the struct during port start. */ struct mlx5_proc_priv { @@ -46508,7 +59022,7 @@ index 31982002ee..96a269ccd0 100644 size_t uar_table_sz; /* Size of UAR register table. */ struct mlx5_uar_data uar_table[]; -@@ -1635,10 +1639,50 @@ struct mlx5_obj_ops { +@@ -1635,10 +1654,50 @@ struct mlx5_obj_ops { #define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields) @@ -46559,7 +59073,7 @@ index 31982002ee..96a269ccd0 100644 }; struct mlx5_flow_hw_ctrl_rx; -@@ -1663,6 +1707,7 @@ struct mlx5_priv { +@@ -1663,6 +1722,7 @@ struct mlx5_priv { unsigned int mtr_en:1; /* Whether support meter. */ unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */ unsigned int lb_used:1; /* Loopback queue is referred to. */ @@ -46567,20 +59081,29 @@ index 31982002ee..96a269ccd0 100644 uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */ uint16_t domain_id; /* Switch domain identifier. */ uint16_t vport_id; /* Associated VF vport index (if any). */ -@@ -1685,10 +1730,12 @@ struct mlx5_priv { +@@ -1685,10 +1745,8 @@ struct mlx5_priv { void *root_drop_action; /* Pointer to root drop action. */ rte_spinlock_t hw_ctrl_lock; LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows; +- struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; +- struct rte_flow_template_table *hw_esw_sq_miss_tbl; +- struct rte_flow_template_table *hw_esw_zero_tbl; +- struct rte_flow_template_table *hw_tx_meta_cpy_tbl; + LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows; - struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; - struct rte_flow_template_table *hw_esw_sq_miss_tbl; - struct rte_flow_template_table *hw_esw_zero_tbl; - struct rte_flow_template_table *hw_tx_meta_cpy_tbl; -+ struct rte_flow_template_table *hw_lacp_rx_tbl; ++ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; struct rte_flow_pattern_template *hw_tx_repr_tagging_pt; struct rte_flow_actions_template *hw_tx_repr_tagging_at; struct rte_flow_template_table *hw_tx_repr_tagging_tbl; -@@ -1768,6 +1815,8 @@ struct mlx5_priv { +@@ -1720,8 +1778,6 @@ struct mlx5_priv { + int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ + int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ + struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ +- struct mlx5_hlist *mreg_cp_tbl; +- /* Hash table of Rx metadata register copy table. */ + struct mlx5_mtr_config mtr_config; /* Meter configuration */ + uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */ + uint8_t mtr_color_reg; /* Meter color match REG_C. */ +@@ -1768,6 +1824,8 @@ struct mlx5_priv { struct mlx5dr_action *hw_drop[2]; /* HW steering global tag action. */ struct mlx5dr_action *hw_tag[2]; @@ -46589,7 +59112,19 @@ index 31982002ee..96a269ccd0 100644 /* HW steering create ongoing rte flow table list header. */ LIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo; struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */ -@@ -2163,6 +2212,8 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, +@@ -1925,8 +1983,9 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + int mlx5_os_read_dev_stat(struct mlx5_priv *priv, + const char *ctr_name, uint64_t *stat); +-int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats); +-int mlx5_os_get_stats_n(struct rte_eth_dev *dev); ++int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats); ++int mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec); + void mlx5_os_stats_init(struct rte_eth_dev *dev); + int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev); + +@@ -2163,6 +2222,8 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int n, unsigned int n_used); void mlx5_txpp_interrupt_handler(void *cb_arg); @@ -46613,8 +59148,25 @@ index 02deaac612..7e0ec91328 100644 if (hrxq->tir != NULL) mlx5_devx_tir_destroy(hrxq); if (hrxq->ind_table->ind_table != NULL) +diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c +index 4a85415ff3..df7cd241a2 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c ++++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c +@@ -146,6 +146,12 @@ mlx5_dev_configure(struct rte_eth_dev *dev) + ret = mlx5_proc_priv_init(dev); + if (ret) + return ret; ++ ret = mlx5_dev_set_mtu(dev, dev->data->mtu); ++ if (ret) { ++ DRV_LOG(ERR, "port %u failed to set MTU to %u", dev->data->port_id, ++ dev->data->mtu); ++ return ret; ++ } + return 0; + } + diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c -index a0cf677fb0..01b463adec 100644 +index a0cf677fb0..a44ccea436 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -364,7 +364,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], @@ -46675,7 +59227,153 @@ index a0cf677fb0..01b463adec 100644 return 0; } -@@ -5878,6 +5891,7 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -4858,8 +4871,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, + }; + + /* Check if already registered. */ +- MLX5_ASSERT(priv->mreg_cp_tbl); +- entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx); ++ MLX5_ASSERT(priv->sh->mreg_cp_tbl); ++ entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx); + if (!entry) + return NULL; + return container_of(entry, struct mlx5_flow_mreg_copy_resource, +@@ -4898,10 +4911,10 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev, + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); +- if (!mcp_res || !priv->mreg_cp_tbl) ++ if (!mcp_res || !priv->sh->mreg_cp_tbl) + return; + MLX5_ASSERT(mcp_res->rix_flow); +- mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent); ++ mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent); + flow->rix_mreg_copy = 0; + } + +@@ -4923,14 +4936,14 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) + uint32_t mark_id; + + /* Check if default flow is registered. */ +- if (!priv->mreg_cp_tbl) ++ if (!priv->sh->mreg_cp_tbl) + return; + mark_id = MLX5_DEFAULT_COPY_ID; + ctx.data = &mark_id; +- entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx); ++ entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx); + if (!entry) + return; +- mlx5_hlist_unregister(priv->mreg_cp_tbl, entry); ++ mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry); + } + + /** +@@ -4968,7 +4981,7 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, + */ + mark_id = MLX5_DEFAULT_COPY_ID; + ctx.data = &mark_id; +- if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx)) ++ if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx)) + return 0; + mcp_res = flow_mreg_add_copy_action(dev, mark_id, error); + if (!mcp_res) +@@ -5122,6 +5135,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, + } + break; + case RTE_FLOW_ACTION_TYPE_COUNT: ++ case RTE_FLOW_ACTION_TYPE_AGE: + if (encap) { + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); +@@ -5445,8 +5459,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + struct mlx5_rte_flow_item_tag *tag_item_spec; + struct mlx5_rte_flow_item_tag *tag_item_mask; + uint32_t tag_id = 0; +- struct rte_flow_item *vlan_item_dst = NULL; +- const struct rte_flow_item *vlan_item_src = NULL; ++ bool vlan_actions; ++ struct rte_flow_item *orig_sfx_items = sfx_items; + const struct rte_flow_item *orig_items = items; + struct rte_flow_action *hw_mtr_action; + struct rte_flow_action *action_pre_head = NULL; +@@ -5463,6 +5477,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + + /* Prepare the suffix subflow items. */ + tag_item = sfx_items++; ++ tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int item_type = items->type; + +@@ -5485,10 +5500,13 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + sfx_items++; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: +- /* Determine if copy vlan item below. */ +- vlan_item_src = items; +- vlan_item_dst = sfx_items++; +- vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID; ++ /* ++ * Copy VLAN items in case VLAN actions are performed. ++ * If there are no VLAN actions, these items will be VOID. ++ */ ++ memcpy(sfx_items, items, sizeof(*sfx_items)); ++ sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN; ++ sfx_items++; + break; + default: + break; +@@ -5505,6 +5523,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + tag_action = actions_pre++; + } + /* Prepare the actions for prefix and suffix flow. */ ++ vlan_actions = false; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + struct rte_flow_action *action_cur = NULL; + +@@ -5535,16 +5554,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: +- if (vlan_item_dst && vlan_item_src) { +- memcpy(vlan_item_dst, vlan_item_src, +- sizeof(*vlan_item_dst)); +- /* +- * Convert to internal match item, it is used +- * for vlan push and set vid. +- */ +- vlan_item_dst->type = (enum rte_flow_item_type) +- MLX5_RTE_FLOW_ITEM_TYPE_VLAN; +- } ++ vlan_actions = true; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (fm->def_policy) +@@ -5559,6 +5569,14 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + actions_sfx++ : actions_pre++; + memcpy(action_cur, actions, sizeof(struct rte_flow_action)); + } ++ /* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */ ++ if (!vlan_actions) { ++ struct rte_flow_item *it = orig_sfx_items; ++ ++ for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++) ++ if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) ++ it->type = RTE_FLOW_ITEM_TYPE_VOID; ++ } + /* Add end action to the actions. */ + actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; + if (priv->sh->meter_aso_en) { +@@ -5648,8 +5666,6 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; + tag_action->conf = set_tag; +- tag_item->type = (enum rte_flow_item_type) +- MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item->spec = tag_item_spec; + tag_item->last = NULL; + tag_item->mask = tag_item_mask; +@@ -5878,6 +5894,7 @@ flow_check_match_action(const struct rte_flow_action actions[], { const struct rte_flow_action_sample *sample; const struct rte_flow_action_raw_decap *decap; @@ -46683,7 +59381,7 @@ index a0cf677fb0..01b463adec 100644 int actions_n = 0; uint32_t ratio = 0; int sub_type = 0; -@@ -5938,12 +5952,12 @@ flow_check_match_action(const struct rte_flow_action actions[], +@@ -5938,12 +5955,12 @@ flow_check_match_action(const struct rte_flow_action actions[], break; case RTE_FLOW_ACTION_TYPE_RAW_DECAP: decap = actions->conf; @@ -46700,7 +59398,7 @@ index a0cf677fb0..01b463adec 100644 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE && encap->size > -@@ -6125,13 +6139,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, +@@ -6125,13 +6142,14 @@ flow_sample_split_prep(struct rte_eth_dev *dev, /* Prepare the prefix tag action. */ append_index++; set_tag = (void *)(actions_pre + actions_n + append_index); @@ -46717,7 +59415,50 @@ index a0cf677fb0..01b463adec 100644 if (ret < 0) return ret; mlx5_ipool_malloc(priv->sh->ipool -@@ -6918,36 +6933,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) +@@ -6475,6 +6493,19 @@ flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, + &drop_split_info, error); + } + ++static int ++flow_count_vlan_items(const struct rte_flow_item items[]) ++{ ++ int items_n = 0; ++ ++ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { ++ if (items->type == RTE_FLOW_ITEM_TYPE_VLAN || ++ items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) ++ items_n++; ++ } ++ return items_n; ++} ++ + /** + * The splitting for meter feature. + * +@@ -6530,6 +6561,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, + size_t act_size; + size_t item_size; + int actions_n = 0; ++ int vlan_items_n = 0; + int ret = 0; + + if (priv->mtr_en) +@@ -6589,9 +6621,11 @@ flow_create_split_meter(struct rte_eth_dev *dev, + act_size = (sizeof(struct rte_flow_action) * + (actions_n + METER_PREFIX_ACTION)) + + sizeof(struct mlx5_rte_flow_action_set_tag); +- /* Suffix items: tag, vlan, port id, end. */ +-#define METER_SUFFIX_ITEM 4 +- item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + ++ /* Flow can have multiple VLAN items. Account for them in suffix items. */ ++ vlan_items_n = flow_count_vlan_items(items); ++ /* Suffix items: tag, [vlans], port id, end. */ ++#define METER_SUFFIX_ITEM 3 ++ item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) + + sizeof(struct mlx5_rte_flow_item_tag) * 2; + sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), + 0, SOCKET_ID_ANY); +@@ -6918,36 +6952,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow) return tunnel; } @@ -46754,7 +59495,7 @@ index a0cf677fb0..01b463adec 100644 /** * Create a flow and add it to @p list. * -@@ -7066,8 +7051,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, +@@ -7066,8 +7070,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, if (attr->ingress) rss = flow_get_rss_action(dev, p_actions_rx); if (rss) { @@ -46764,7 +59505,7 @@ index a0cf677fb0..01b463adec 100644 /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. -@@ -7555,7 +7539,6 @@ flow_release_workspace(void *data) +@@ -7555,7 +7558,6 @@ flow_release_workspace(void *data) while (wks) { next = wks->next; @@ -46772,7 +59513,7 @@ index a0cf677fb0..01b463adec 100644 free(wks); wks = next; } -@@ -7586,23 +7569,17 @@ mlx5_flow_get_thread_workspace(void) +@@ -7586,23 +7588,17 @@ mlx5_flow_get_thread_workspace(void) static struct mlx5_flow_workspace* flow_alloc_thread_workspace(void) { @@ -46802,7 +59543,7 @@ index a0cf677fb0..01b463adec 100644 } /** -@@ -7623,6 +7600,7 @@ mlx5_flow_push_thread_workspace(void) +@@ -7623,6 +7619,7 @@ mlx5_flow_push_thread_workspace(void) data = flow_alloc_thread_workspace(); if (!data) return NULL; @@ -46810,7 +59551,7 @@ index a0cf677fb0..01b463adec 100644 } else if (!curr->inuse) { data = curr; } else if (curr->next) { -@@ -7971,6 +7949,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, +@@ -7971,6 +7968,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, "port must be stopped first"); return -rte_errno; } @@ -46821,7 +59562,7 @@ index a0cf677fb0..01b463adec 100644 priv->isolated = !!enable; if (enable) dev->dev_ops = &mlx5_dev_ops_isolate; -@@ -9758,23 +9740,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, +@@ -9758,23 +9759,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, } i = lcore_index; @@ -46886,7 +59627,7 @@ index a0cf677fb0..01b463adec 100644 } } -@@ -10104,9 +10110,19 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, +@@ -10104,9 +10129,19 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, const struct mlx5_flow_driver_ops *fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); int ret; @@ -46908,7 +59649,7 @@ index a0cf677fb0..01b463adec 100644 if (ret) return ret; return flow_drv_action_update(dev, handle, update, fops, -@@ -10841,7 +10857,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, +@@ -10841,7 +10876,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, if (!is_tunnel_offload_active(dev)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, @@ -46918,9 +59659,18 @@ index a0cf677fb0..01b463adec 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h -index 1f57ecd6e1..eb87f84166 100644 +index 1f57ecd6e1..4f3a216ed4 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.h +++ b/dpdk/drivers/net/mlx5/mlx5_flow.h +@@ -75,7 +75,7 @@ enum { + /* Now, the maximal ports will be supported is 16, action number is 32M. */ + #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10 + +-#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22 ++#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25 + #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1) + + /* 29-31: type, 25-28: owner port, 0-24: index */ @@ -1437,10 +1437,10 @@ struct mlx5_flow_workspace { /* If creating another flow in same thread, push new as stack. */ struct mlx5_flow_workspace *prev; @@ -46933,7 +59683,36 @@ index 1f57ecd6e1..eb87f84166 100644 uint32_t flow_idx; /* Intermediate device flow index. */ struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */ struct mlx5_flow_meter_policy *policy; -@@ -1926,6 +1926,7 @@ struct mlx5_flow_driver_ops { +@@ -1594,6 +1594,28 @@ flow_hw_get_reg_id(enum rte_flow_item_type type, uint32_t id) + } + } + ++static __rte_always_inline int ++flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val) ++{ ++#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) ++ uint32_t port; ++ ++ MLX5_ETH_FOREACH_DEV(port, NULL) { ++ struct mlx5_priv *priv; ++ priv = rte_eth_devices[port].data->dev_private; ++ ++ if (priv->dr_ctx == dr_ctx) { ++ *port_val = port; ++ return 0; ++ } ++ } ++#else ++ RTE_SET_USED(dr_ctx); ++ RTE_SET_USED(port_val); ++#endif ++ return -EINVAL; ++} ++ + void flow_hw_set_port_info(struct rte_eth_dev *dev); + void flow_hw_clear_port_info(struct rte_eth_dev *dev); + +@@ -1926,6 +1948,7 @@ struct mlx5_flow_driver_ops { struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); void mlx5_flow_pop_thread_workspace(void); struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); @@ -46941,7 +59720,33 @@ index 1f57ecd6e1..eb87f84166 100644 __extension__ struct flow_grp_info { uint64_t external:1; -@@ -2226,7 +2227,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, +@@ -2185,6 +2208,25 @@ struct mlx5_flow_hw_ctrl_rx { + [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; + }; + ++/* Contains all templates required for control flow rules in FDB with HWS. */ ++struct mlx5_flow_hw_ctrl_fdb { ++ struct rte_flow_pattern_template *esw_mgr_items_tmpl; ++ struct rte_flow_actions_template *regc_jump_actions_tmpl; ++ struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; ++ struct rte_flow_pattern_template *regc_sq_items_tmpl; ++ struct rte_flow_actions_template *port_actions_tmpl; ++ struct rte_flow_template_table *hw_esw_sq_miss_tbl; ++ struct rte_flow_pattern_template *port_items_tmpl; ++ struct rte_flow_actions_template *jump_one_actions_tmpl; ++ struct rte_flow_template_table *hw_esw_zero_tbl; ++ struct rte_flow_pattern_template *tx_meta_items_tmpl; ++ struct rte_flow_actions_template *tx_meta_actions_tmpl; ++ struct rte_flow_template_table *hw_tx_meta_cpy_tbl; ++ struct rte_flow_pattern_template *lacp_rx_items_tmpl; ++ struct rte_flow_actions_template *lacp_rx_actions_tmpl; ++ struct rte_flow_template_table *hw_lacp_rx_tbl; ++}; ++ + #define MLX5_CTRL_PROMISCUOUS (RTE_BIT32(0)) + #define MLX5_CTRL_ALL_MULTICAST (RTE_BIT32(1)) + #define MLX5_CTRL_BROADCAST (RTE_BIT32(2)) +@@ -2226,7 +2268,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error); @@ -46951,7 +59756,7 @@ index 1f57ecd6e1..eb87f84166 100644 const struct rte_flow_attr *attr, struct rte_flow_error *error); int mlx5_flow_validate_action_flag(uint64_t action_flags, -@@ -2579,10 +2581,13 @@ int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, +@@ -2579,10 +2622,13 @@ int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev); int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, @@ -46982,10 +59787,62 @@ index 29bd7ce9e8..8441be3dea 100644 } do { diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -index 62c38b87a1..5c2af50fe5 100644 +index 62c38b87a1..6521b5b230 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -@@ -2129,6 +2129,8 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, +@@ -267,21 +267,41 @@ struct field_modify_info modify_tcp[] = { + {0, 0, 0}, + }; + +-static void ++enum mlx5_l3_tunnel_detection { ++ l3_tunnel_none, ++ l3_tunnel_outer, ++ l3_tunnel_inner ++}; ++ ++static enum mlx5_l3_tunnel_detection + mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, +- uint8_t next_protocol, uint64_t *item_flags, +- int *tunnel) ++ uint8_t next_protocol, uint64_t item_flags, ++ uint64_t *l3_tunnel_flag) + { ++ enum mlx5_l3_tunnel_detection td = l3_tunnel_none; ++ + MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6); +- if (next_protocol == IPPROTO_IPIP) { +- *item_flags |= MLX5_FLOW_LAYER_IPIP; +- *tunnel = 1; +- } +- if (next_protocol == IPPROTO_IPV6) { +- *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; +- *tunnel = 1; ++ if ((item_flags & MLX5_FLOW_LAYER_OUTER_L3) == 0) { ++ switch (next_protocol) { ++ case IPPROTO_IPIP: ++ td = l3_tunnel_outer; ++ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPIP; ++ break; ++ case IPPROTO_IPV6: ++ td = l3_tunnel_outer; ++ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP; ++ break; ++ default: ++ break; ++ } ++ } else { ++ td = l3_tunnel_inner; ++ *l3_tunnel_flag = item->type == RTE_FLOW_ITEM_TYPE_IPV4 ? ++ MLX5_FLOW_LAYER_IPIP : ++ MLX5_FLOW_LAYER_IPV6_ENCAP; + } ++ return td; + } + + static inline struct mlx5_hlist * +@@ -2129,6 +2149,8 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, * Pointer to the rte_eth_dev structure. * @param[in] item * Item specification. @@ -46994,7 +59851,7 @@ index 62c38b87a1..5c2af50fe5 100644 * @param[in] attr * Attributes of flow that includes this item. * @param[out] error -@@ -2140,6 +2142,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, +@@ -2140,6 +2162,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused, static int flow_dv_validate_item_tag(struct rte_eth_dev *dev, const struct rte_flow_item *item, @@ -47002,7 +59859,7 @@ index 62c38b87a1..5c2af50fe5 100644 const struct rte_flow_attr *attr __rte_unused, struct rte_flow_error *error) { -@@ -2183,6 +2186,12 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, +@@ -2183,6 +2206,12 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev, if (ret < 0) return ret; MLX5_ASSERT(ret != REG_NON); @@ -47015,7 +59872,7 @@ index 62c38b87a1..5c2af50fe5 100644 return 0; } -@@ -4445,6 +4454,7 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) +@@ -4445,6 +4474,7 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) { struct rte_ether_hdr *eth = NULL; struct rte_vlan_hdr *vlan = NULL; @@ -47023,7 +59880,7 @@ index 62c38b87a1..5c2af50fe5 100644 struct rte_ipv6_hdr *ipv6 = NULL; struct rte_udp_hdr *udp = NULL; char *next_hdr; -@@ -4461,24 +4471,27 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) +@@ -4461,24 +4491,27 @@ flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error) next_hdr += sizeof(struct rte_vlan_hdr); } @@ -47062,7 +59919,30 @@ index 62c38b87a1..5c2af50fe5 100644 udp->dgram_cksum = 0; return 0; -@@ -5750,6 +5763,7 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) +@@ -5194,13 +5227,6 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, + &grp_info, error); + if (ret) + return ret; +- if (attributes->group == target_group && +- !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | +- MLX5_FLOW_ACTION_TUNNEL_MATCH))) +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, NULL, +- "target group must be other than" +- " the current flow group"); + if (table == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, +@@ -5662,7 +5688,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx) + "cannot allocate resource memory"); + return NULL; + } +- rte_memcpy(&entry->ft_type, ++ rte_memcpy(RTE_PTR_ADD(entry, offsetof(typeof(*entry), ft_type)), + RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), + key_len + data_len); + if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) +@@ -5750,6 +5776,7 @@ flow_dv_modify_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) */ static int flow_dv_validate_action_sample(uint64_t *action_flags, @@ -47070,7 +59950,7 @@ index 62c38b87a1..5c2af50fe5 100644 const struct rte_flow_action *action, struct rte_eth_dev *dev, const struct rte_flow_attr *attr, -@@ -5758,14 +5772,15 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5758,14 +5785,15 @@ flow_dv_validate_action_sample(uint64_t *action_flags, const struct rte_flow_action_rss **sample_rss, const struct rte_flow_action_count **count, int *fdb_mirror, @@ -47087,7 +59967,7 @@ index 62c38b87a1..5c2af50fe5 100644 uint16_t queue_index = 0xFFFF; int actions_n = 0; int ret; -@@ -5812,20 +5827,20 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5812,20 +5840,20 @@ flow_dv_validate_action_sample(uint64_t *action_flags, switch (act->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: ret = mlx5_flow_validate_action_queue(act, @@ -47111,7 +59991,7 @@ index 62c38b87a1..5c2af50fe5 100644 dev, attr, item_flags, error); -@@ -5841,48 +5856,57 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5841,48 +5869,57 @@ flow_dv_validate_action_sample(uint64_t *action_flags, "or level in the same flow"); if (*sample_rss != NULL && (*sample_rss)->queue_num) queue_index = (*sample_rss)->queue[0]; @@ -47178,7 +60058,7 @@ index 62c38b87a1..5c2af50fe5 100644 &actions_n, action, item_flags, error); if (ret < 0) return ret; -@@ -5891,12 +5915,12 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5891,12 +5928,12 @@ flow_dv_validate_action_sample(uint64_t *action_flags, case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: ret = flow_dv_validate_action_l2_encap(dev, @@ -47193,7 +60073,7 @@ index 62c38b87a1..5c2af50fe5 100644 ++actions_n; break; default: -@@ -5908,7 +5932,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5908,7 +5945,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, } } if (attr->ingress) { @@ -47202,7 +60082,7 @@ index 62c38b87a1..5c2af50fe5 100644 MLX5_FLOW_ACTION_RSS))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -5930,17 +5954,17 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5930,17 +5967,17 @@ flow_dv_validate_action_sample(uint64_t *action_flags, "E-Switch doesn't support " "any optional action " "for sampling"); @@ -47223,7 +60103,7 @@ index 62c38b87a1..5c2af50fe5 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, -@@ -5949,16 +5973,16 @@ flow_dv_validate_action_sample(uint64_t *action_flags, +@@ -5949,16 +5986,16 @@ flow_dv_validate_action_sample(uint64_t *action_flags, *fdb_mirror = 1; } /* Continue validation for Xcap actions.*/ @@ -47243,7 +60123,63 @@ index 62c38b87a1..5c2af50fe5 100644 return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "encap is not supported" -@@ -7051,11 +7075,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -6761,11 +6798,13 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, + } + + static int +-validate_integrity_bits(const struct rte_flow_item_integrity *mask, ++validate_integrity_bits(const void *arg, + int64_t pattern_flags, uint64_t l3_flags, + uint64_t l4_flags, uint64_t ip4_flag, + struct rte_flow_error *error) + { ++ const struct rte_flow_item_integrity *mask = arg; ++ + if (mask->l3_ok && !(pattern_flags & l3_flags)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, +@@ -6954,6 +6993,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev, + return 0; + } + ++static __rte_always_inline uint8_t ++mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item, ++ enum MLX5_SET_MATCHER key_type) ++{ ++#define MLX5_L3_NEXT_PROTOCOL(i, ms) \ ++ ((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ? \ ++ ((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id : \ ++ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ? \ ++ ((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto : \ ++ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ? \ ++ ((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\ ++ 0xff) ++ ++ uint8_t next_protocol; ++ ++ if (l3_item->mask != NULL && l3_item->spec != NULL) { ++ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); ++ if (next_protocol) ++ next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, spec); ++ else ++ next_protocol = 0xff; ++ } else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) { ++ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); ++ } else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) { ++ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec); ++ } else { ++ /* Reset for inner layer. */ ++ next_protocol = 0xff; ++ } ++ return next_protocol; ++ ++#undef MLX5_L3_NEXT_PROTOCOL ++} ++ + /** + * Internal validation function. For validating both actions and items. + * +@@ -7051,11 +7124,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, bool def_policy = false; bool shared_count = false; uint16_t udp_dport = 0; @@ -47262,7 +60198,118 @@ index 62c38b87a1..5c2af50fe5 100644 if (items == NULL) return -1; -@@ -7371,7 +7401,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7085,6 +7164,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return ret; + is_root = (uint64_t)ret; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { ++ enum mlx5_l3_tunnel_detection l3_tunnel_detection; ++ uint64_t l3_tunnel_flag; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int type = items->type; + +@@ -7162,8 +7243,16 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + vlan_m = items->mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol ++ (items, (enum MLX5_SET_MATCHER)-1); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + ret = flow_dv_validate_item_ipv4(dev, items, item_flags, + last_item, ether_type, + error); +@@ -7171,23 +7260,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; +- if (items->mask != NULL && +- ((const struct rte_flow_item_ipv4 *) +- items->mask)->hdr.next_proto_id) { +- next_protocol = +- ((const struct rte_flow_item_ipv4 *) +- (items->spec))->hdr.next_proto_id; +- next_protocol &= +- ((const struct rte_flow_item_ipv4 *) +- (items->mask))->hdr.next_proto_id; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol ++ (items, (enum MLX5_SET_MATCHER)-1); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + ret = mlx5_flow_validate_item_ipv6(items, item_flags, + last_item, + ether_type, +@@ -7197,22 +7283,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; +- if (items->mask != NULL && +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto) { +- item_ipv6_proto = +- ((const struct rte_flow_item_ipv6 *) +- items->spec)->hdr.proto; +- next_protocol = +- ((const struct rte_flow_item_ipv6 *) +- items->spec)->hdr.proto; +- next_protocol &= +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + ret = flow_dv_validate_item_ipv6_frag_ext(items, +@@ -7223,19 +7295,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; +- if (items->mask != NULL && +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header) { +- next_protocol = +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->spec)->hdr.next_header; +- next_protocol &= +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ next_protocol = mlx5_flow_l3_next_protocol ++ (items, (enum MLX5_SET_MATCHER)-1); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_validate_item_tcp +@@ -7371,7 +7432,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, last_item = MLX5_FLOW_LAYER_ICMP6; break; case RTE_FLOW_ITEM_TYPE_TAG: @@ -47271,7 +60318,7 @@ index 62c38b87a1..5c2af50fe5 100644 attr, error); if (ret < 0) return ret; -@@ -7381,6 +7411,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7381,6 +7442,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, last_item = MLX5_FLOW_ITEM_SQ; break; case MLX5_RTE_FLOW_ITEM_TYPE_TAG: @@ -47285,7 +60332,7 @@ index 62c38b87a1..5c2af50fe5 100644 break; case RTE_FLOW_ITEM_TYPE_GTP: ret = flow_dv_validate_item_gtp(dev, items, item_flags, -@@ -7486,6 +7523,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7486,6 +7554,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret) return ret; @@ -47300,7 +60347,7 @@ index 62c38b87a1..5c2af50fe5 100644 action_flags |= MLX5_FLOW_ACTION_PORT_ID; ++actions_n; break; -@@ -7562,7 +7607,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7562,7 +7638,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, rw_act_num += MLX5_ACT_NUM_SET_TAG; break; case RTE_FLOW_ACTION_TYPE_DROP: @@ -47309,7 +60356,7 @@ index 62c38b87a1..5c2af50fe5 100644 attr, error); if (ret < 0) return ret; -@@ -7985,11 +8030,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7985,11 +8061,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_SAMPLE: ret = flow_dv_validate_action_sample(&action_flags, @@ -47323,7 +60370,7 @@ index 62c38b87a1..5c2af50fe5 100644 is_root, error); if (ret < 0) -@@ -8301,6 +8348,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -8301,6 +8379,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "sample before ASO action is not supported"); @@ -47353,7 +60400,7 @@ index 62c38b87a1..5c2af50fe5 100644 } /* * Validation the NIC Egress flow on representor, except implicit -@@ -9223,12 +9293,10 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, +@@ -9223,12 +9324,10 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, { const struct rte_flow_item_vxlan *vxlan_m; const struct rte_flow_item_vxlan *vxlan_v; @@ -47366,7 +60413,7 @@ index 62c38b87a1..5c2af50fe5 100644 char *vni_v; uint16_t dport; int size; -@@ -9280,24 +9348,11 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, +@@ -9280,24 +9379,11 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, vni_v[i] = vxlan_m->vni[i] & vxlan_v->vni[i]; return; } @@ -47393,7 +60440,211 @@ index 62c38b87a1..5c2af50fe5 100644 } /** -@@ -13717,7 +13772,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, +@@ -9559,14 +9645,13 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, + { + const struct rte_flow_item_geneve_opt *geneve_opt_m; + const struct rte_flow_item_geneve_opt *geneve_opt_v; +- const struct rte_flow_item_geneve_opt *geneve_opt_vv = item->spec; +- void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); ++ const struct rte_flow_item_geneve_opt *orig_spec = item->spec; + void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); + rte_be32_t opt_data_key = 0, opt_data_mask = 0; +- uint32_t *data; ++ size_t option_byte_len; + int ret = 0; + +- if (MLX5_ITEM_VALID(item, key_type)) ++ if (MLX5_ITEM_VALID(item, key_type) || !orig_spec) + return -1; + MLX5_ITEM_UPDATE(item, key_type, geneve_opt_v, geneve_opt_m, + &rte_flow_item_geneve_opt_mask); +@@ -9579,36 +9664,15 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, + return ret; + } + } +- /* +- * Set the option length in GENEVE header if not requested. +- * The GENEVE TLV option length is expressed by the option length field +- * in the GENEVE header. +- * If the option length was not requested but the GENEVE TLV option item +- * is present we set the option length field implicitly. +- */ +- if (!MLX5_GET16(fte_match_set_misc, misc_v, geneve_opt_len)) { +- if (key_type & MLX5_SET_MATCHER_M) +- MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, +- MLX5_GENEVE_OPTLEN_MASK); +- else +- MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, +- geneve_opt_v->option_len + 1); +- } +- /* Set the data. */ +- if (key_type == MLX5_SET_MATCHER_SW_V) +- data = geneve_opt_vv->data; +- else +- data = geneve_opt_v->data; +- if (data) { +- memcpy(&opt_data_key, data, +- RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), +- sizeof(opt_data_key))); +- memcpy(&opt_data_mask, geneve_opt_m->data, +- RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), +- sizeof(opt_data_mask))); ++ /* Convert the option length from DW to bytes for using memcpy. */ ++ option_byte_len = RTE_MIN((size_t)(orig_spec->option_len * 4), ++ sizeof(rte_be32_t)); ++ if (geneve_opt_v->data) { ++ memcpy(&opt_data_key, geneve_opt_v->data, option_byte_len); ++ memcpy(&opt_data_mask, geneve_opt_m->data, option_byte_len); + MLX5_SET(fte_match_set_misc3, misc3_v, +- geneve_tlv_option_0_data, +- rte_be_to_cpu_32(opt_data_key & opt_data_mask)); ++ geneve_tlv_option_0_data, ++ rte_be_to_cpu_32(opt_data_key & opt_data_mask)); + } + return ret; + } +@@ -13117,6 +13181,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Connection is not supported"); ++ if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "CT supports port indexes up to " ++ RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); ++ return 0; ++ } + idx = flow_dv_aso_ct_alloc(dev, error); + if (!idx) + return rte_flow_error_set(error, rte_errno, +@@ -13166,6 +13237,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev, + int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL); + int item_type = items->type; + uint64_t last_item = wks->last_item; ++ enum mlx5_l3_tunnel_detection l3_tunnel_detection; ++ uint64_t l3_tunnel_flag; + int ret; + + switch (item_type) { +@@ -13209,94 +13282,47 @@ flow_dv_translate_items(struct rte_eth_dev *dev, + MLX5_FLOW_LAYER_OUTER_VLAN); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &wks->item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ wks->item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ wks->item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + flow_dv_translate_item_ipv4(key, items, tunnel, + wks->group, key_type); + wks->priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; +- if (items->mask != NULL && +- items->spec != NULL && +- ((const struct rte_flow_item_ipv4 *) +- items->mask)->hdr.next_proto_id) { +- next_protocol = +- ((const struct rte_flow_item_ipv4 *) +- (items->spec))->hdr.next_proto_id; +- next_protocol &= +- ((const struct rte_flow_item_ipv4 *) +- (items->mask))->hdr.next_proto_id; +- } else if (key_type == MLX5_SET_MATCHER_HS_M && +- items->mask != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv4 *) +- (items->mask))->hdr.next_proto_id; +- } else if (key_type == MLX5_SET_MATCHER_HS_V && +- items->spec != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv4 *) +- (items->spec))->hdr.next_proto_id; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ wks->item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &wks->item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ wks->item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ wks->item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + flow_dv_translate_item_ipv6(key, items, tunnel, + wks->group, key_type); + wks->priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; +- if (items->mask != NULL && +- items->spec != NULL && +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto) { +- next_protocol = +- ((const struct rte_flow_item_ipv6 *) +- items->spec)->hdr.proto; +- next_protocol &= +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto; +- } else if (key_type == MLX5_SET_MATCHER_HS_M && +- items->mask != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6 *) +- (items->mask))->hdr.proto; +- } else if (key_type == MLX5_SET_MATCHER_HS_V && +- items->spec != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6 *) +- (items->spec))->hdr.proto; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ wks->item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + flow_dv_translate_item_ipv6_frag_ext + (key, items, tunnel, key_type); + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; +- if (items->mask != NULL && +- items->spec != NULL && +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header) { +- next_protocol = +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->spec)->hdr.next_header; +- next_protocol &= +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header; +- } else if (key_type == MLX5_SET_MATCHER_HS_M && +- items->mask != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) +- (items->mask))->hdr.next_header; +- } else if (key_type == MLX5_SET_MATCHER_HS_V && +- items->spec != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) +- (items->spec))->hdr.next_header; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + flow_dv_translate_item_tcp(key, items, tunnel, key_type); +@@ -13717,7 +13743,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, * is the suffix flow. */ dev_flow->handle->layers |= wks.item_flags; @@ -47403,11 +60654,11 @@ index 62c38b87a1..5c2af50fe5 100644 + * Avoid be overwritten by other sub mlx5_flows. + */ + if (wks.geneve_tlv_option) -+ dev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option; ++ dev_flow->flow->geneve_tlv_option += wks.geneve_tlv_option; return 0; } -@@ -14820,7 +14880,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, +@@ -14820,7 +14851,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } dv->actions[n++] = priv->sh->default_miss_action; } @@ -47416,7 +60667,38 @@ index 62c38b87a1..5c2af50fe5 100644 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask); err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, -@@ -17020,7 +17080,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) +@@ -14853,7 +14884,8 @@ error: + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dh, next) { + /* hrxq is union, don't clear it if the flag is not set. */ +- if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { ++ if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq && ++ !dh->dvh.rix_sample && !dh->dvh.rix_dest_array) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; + } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { +@@ -15317,9 +15349,9 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) + flow_dv_aso_ct_release(dev, flow->ct, NULL); + else if (flow->age) + flow_dv_aso_age_release(dev, flow->age); +- if (flow->geneve_tlv_option) { ++ while (flow->geneve_tlv_option) { + flow_dev_geneve_tlv_option_resource_release(priv->sh); +- flow->geneve_tlv_option = 0; ++ flow->geneve_tlv_option--; + } + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; +@@ -15781,6 +15813,8 @@ flow_dv_action_create(struct rte_eth_dev *dev, + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + ret = flow_dv_translate_create_conntrack(dev, action->conf, + err); ++ if (!ret) ++ break; + idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); + break; + default: +@@ -17020,7 +17054,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) static int __flow_dv_create_policy_flow(struct rte_eth_dev *dev, uint32_t color_reg_c_idx, @@ -47425,7 +60707,7 @@ index 62c38b87a1..5c2af50fe5 100644 int actions_n, void *actions, bool match_src_port, const struct rte_flow_item *item, void **rule, const struct rte_flow_attr *attr) -@@ -17050,9 +17110,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, +@@ -17050,9 +17084,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, } flow_dv_match_meta_reg(value.buf, (enum modify_reg)color_reg_c_idx, rte_col_2_mlx5_col(color), UINT32_MAX); @@ -47437,16 +60719,126 @@ index 62c38b87a1..5c2af50fe5 100644 actions_n, actions, rule); if (ret) { DRV_LOG(ERR, "Failed to create meter policy%d flow.", color); -@@ -17206,7 +17266,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, +@@ -17106,9 +17140,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, + } + } + tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl); +- if (priority < RTE_COLOR_RED) +- flow_dv_match_meta_reg(matcher.mask.buf, +- (enum modify_reg)color_reg_c_idx, color_mask, color_mask); ++ flow_dv_match_meta_reg(matcher.mask.buf, ++ (enum modify_reg)color_reg_c_idx, color_mask, color_mask); + matcher.priority = priority; + matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, + matcher.mask.size); +@@ -17142,7 +17175,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, + static int + __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter_sub_policy *sub_policy, +- uint8_t egress, uint8_t transfer, bool match_src_port, ++ uint8_t egress, uint8_t transfer, bool *match_src_port, + struct mlx5_meter_policy_acts acts[RTE_COLORS]) + { + struct mlx5_priv *priv = dev->data->dev_private; +@@ -17157,9 +17190,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + .reserved = 0, + }; + int i; ++ uint16_t priority; + int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err); + struct mlx5_sub_policy_color_rule *color_rule; +- bool svport_match; + struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL}; + + if (ret < 0) +@@ -17192,13 +17225,12 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + TAILQ_INSERT_TAIL(&sub_policy->color_rules[i], + color_rule, next_port); + color_rule->src_port = priv->representor_id; +- /* No use. */ +- attr.priority = i; ++ priority = (match_src_port[i] == match_src_port[RTE_COLOR_GREEN]) ? ++ MLX5_MTR_POLICY_MATCHER_PRIO : (MLX5_MTR_POLICY_MATCHER_PRIO + 1); + /* Create matchers for colors. */ +- svport_match = (i != RTE_COLOR_RED) ? match_src_port : false; + if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx, +- MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy, +- &attr, svport_match, NULL, ++ priority, sub_policy, ++ &attr, match_src_port[i], NULL, + &color_rule->matcher, &flow_err)) { + DRV_LOG(ERR, "Failed to create color%u matcher.", i); + goto err_exit; +@@ -17206,9 +17238,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, /* Create flow, matching color. */ if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)i, - color_rule->matcher->matcher_object, + color_rule->matcher, acts[i].actions_n, acts[i].dv_actions, - svport_match, NULL, &color_rule->rule, +- svport_match, NULL, &color_rule->rule, ++ match_src_port[i], NULL, &color_rule->rule, &attr)) { -@@ -17674,7 +17734,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, + DRV_LOG(ERR, "Failed to create color%u rule.", i); + goto err_exit; +@@ -17256,7 +17288,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; + uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0; + bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX); +- bool match_src_port = false; ++ bool match_src_port[RTE_COLORS] = {false}; + int i; + + /* If RSS or Queue, no previous actions / rules is created. */ +@@ -17327,7 +17359,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + acts[i].dv_actions[acts[i].actions_n] = + port_action->action; + acts[i].actions_n++; +- match_src_port = true; ++ match_src_port[i] = true; + break; + case MLX5_FLOW_FATE_DROP: + case MLX5_FLOW_FATE_JUMP: +@@ -17379,7 +17411,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + acts[i].dv_actions[acts[i].actions_n++] = + tbl_data->jump.action; + if (mtr_policy->act_cnt[i].modify_hdr) +- match_src_port = !!transfer; ++ match_src_port[i] = !!transfer; + break; + default: + /*Queue action do nothing*/ +@@ -17393,9 +17425,9 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + "Failed to create policy rules per domain."); + goto err_exit; + } +- if (match_src_port) { +- mtr_policy->match_port = match_src_port; +- mtr_policy->hierarchy_match_port = match_src_port; ++ if (match_src_port[RTE_COLOR_GREEN] || match_src_port[RTE_COLOR_YELLOW]) { ++ mtr_policy->match_port = 1; ++ mtr_policy->hierarchy_match_port = 1; + } + return 0; + err_exit: +@@ -17457,6 +17489,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) + uint8_t egress, transfer; + struct rte_flow_error error; + struct mlx5_meter_policy_acts acts[RTE_COLORS]; ++ bool match_src_port[RTE_COLORS] = {false}; + int ret; + + egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; +@@ -17532,7 +17565,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) + /* Create default policy rules. */ + ret = __flow_dv_create_domain_policy_rules(dev, + &def_policy->sub_policy, +- egress, transfer, false, acts); ++ egress, transfer, match_src_port, acts); + if (ret) { + DRV_LOG(ERR, "Failed to create default policy rules."); + goto def_policy_error; +@@ -17674,7 +17707,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, actions[i++] = priv->sh->dr_drop_action; flow_dv_match_meta_reg_all(matcher_para.buf, value.buf, (enum modify_reg)mtr_id_reg_c, 0, 0); @@ -47455,7 +60847,7 @@ index 62c38b87a1..5c2af50fe5 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow (mtrmng->def_matcher[domain]->matcher_object, -@@ -17719,7 +17779,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, +@@ -17719,7 +17752,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, fm->drop_cnt, NULL); actions[i++] = cnt->action; actions[i++] = priv->sh->dr_drop_action; @@ -47464,7 +60856,74 @@ index 62c38b87a1..5c2af50fe5 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object, (void *)&value, i, actions, -@@ -18199,7 +18259,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18091,7 +18124,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + struct { + struct mlx5_flow_meter_policy *fm_policy; + struct mlx5_flow_meter_info *next_fm; +- struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS]; ++ struct mlx5_sub_policy_color_rule *tag_rule[RTE_COLORS]; + } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} }; + uint32_t fm_cnt = 0; + uint32_t i, j; +@@ -18125,14 +18158,22 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + mtr_policy = fm_info[i].fm_policy; + rte_spinlock_lock(&mtr_policy->sl); + sub_policy = mtr_policy->sub_policys[domain][0]; +- for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { ++ for (j = 0; j < RTE_COLORS; j++) { + uint8_t act_n = 0; +- struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; ++ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr = NULL; + struct mlx5_flow_dv_port_id_action_resource *port_action; ++ uint8_t fate_action; + +- if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR && +- mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_PORT_ID) +- continue; ++ if (j == RTE_COLOR_RED) { ++ fate_action = MLX5_FLOW_FATE_DROP; ++ } else { ++ fate_action = mtr_policy->act_cnt[j].fate_action; ++ modify_hdr = mtr_policy->act_cnt[j].modify_hdr; ++ if (fate_action != MLX5_FLOW_FATE_MTR && ++ fate_action != MLX5_FLOW_FATE_PORT_ID && ++ fate_action != MLX5_FLOW_FATE_DROP) ++ continue; ++ } + color_rule = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(struct mlx5_sub_policy_color_rule), + 0, SOCKET_ID_ANY); +@@ -18144,9 +18185,8 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + goto err_exit; + } + color_rule->src_port = src_port; +- modify_hdr = mtr_policy->act_cnt[j].modify_hdr; + /* Prepare to create color rule. */ +- if (mtr_policy->act_cnt[j].fate_action == MLX5_FLOW_FATE_MTR) { ++ if (fate_action == MLX5_FLOW_FATE_MTR) { + next_fm = fm_info[i].next_fm; + if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) { + mlx5_free(color_rule); +@@ -18173,7 +18213,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + } + acts.dv_actions[act_n++] = tbl_data->jump.action; + acts.actions_n = act_n; +- } else { ++ } else if (fate_action == MLX5_FLOW_FATE_PORT_ID) { + port_action = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], + mtr_policy->act_cnt[j].rix_port_id_action); +@@ -18186,6 +18226,9 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + acts.dv_actions[act_n++] = modify_hdr->action; + acts.dv_actions[act_n++] = port_action->action; + acts.actions_n = act_n; ++ } else { ++ acts.dv_actions[act_n++] = mtr_policy->dr_drop_action[domain]; ++ acts.actions_n = act_n; + } + fm_info[i].tag_rule[j] = color_rule; + TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port); +@@ -18199,7 +18242,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, goto err_exit; } if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)j, @@ -47473,7 +60932,26 @@ index 62c38b87a1..5c2af50fe5 100644 acts.actions_n, acts.dv_actions, true, item, &color_rule->rule, &attr)) { rte_spinlock_unlock(&mtr_policy->sl); -@@ -18909,7 +18969,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, +@@ -18217,7 +18260,7 @@ err_exit: + mtr_policy = fm_info[i].fm_policy; + rte_spinlock_lock(&mtr_policy->sl); + sub_policy = mtr_policy->sub_policys[domain][0]; +- for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { ++ for (j = 0; j < RTE_COLORS; j++) { + color_rule = fm_info[i].tag_rule[j]; + if (!color_rule) + continue; +@@ -18547,8 +18590,7 @@ flow_dv_get_aged_flows(struct rte_eth_dev *dev, + LIST_FOREACH(act, &age_info->aged_aso, next) { + nb_flows++; + if (nb_contexts) { +- context[nb_flows - 1] = +- act->age_params.context; ++ context[nb_flows - 1] = act->age_params.context; + if (!(--nb_contexts)) + break; + } +@@ -18909,7 +18951,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_DROP: ret = mlx5_flow_validate_action_drop @@ -47482,7 +60960,24 @@ index 62c38b87a1..5c2af50fe5 100644 if (ret < 0) return -rte_mtr_error_set(error, ENOTSUP, -@@ -19243,7 +19303,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, +@@ -19104,11 +19146,13 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, + } + } + if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) { +- if (!(action_flags[RTE_COLOR_GREEN] & action_flags[RTE_COLOR_YELLOW] & +- MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) ++ uint64_t hierarchy_type_flag = ++ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | MLX5_FLOW_ACTION_JUMP; ++ if (!(action_flags[RTE_COLOR_GREEN] & hierarchy_type_flag) || ++ !(action_flags[RTE_COLOR_YELLOW] & hierarchy_type_flag)) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, + NULL, +- "Meter hierarchy supports meter action only."); ++ "Unsupported action in meter hierarchy."); + } + /* If both colors have RSS, the attributes should be the same. */ + if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN], +@@ -19243,7 +19287,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, break; } /* Try to apply the flow to HW. */ @@ -47492,10 +60987,86 @@ index 62c38b87a1..5c2af50fe5 100644 err = mlx5_flow_os_create_flow (flow.handle->dvh.matcher->matcher_object, diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c -index a3c8056515..6b889e9f81 100644 +index a3c8056515..aa315c054d 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c -@@ -1243,6 +1243,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, +@@ -56,6 +56,20 @@ + #define MLX5_HW_VLAN_PUSH_VID_IDX 1 + #define MLX5_HW_VLAN_PUSH_PCP_IDX 2 + ++static bool ++mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error) ++{ ++ const struct mlx5_priv *priv = dev->data->dev_private; ++ ++ if (!priv->dr_ctx) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "non-template flow engine was not configured"); ++ return false; ++ } ++ return true; ++} ++ + static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev); + static int flow_hw_translate_group(struct rte_eth_dev *dev, + const struct mlx5_flow_template_table_cfg *cfg, +@@ -72,6 +86,10 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, + static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev); + static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev); + ++static void ++flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow, ++ struct rte_flow_error *error); ++ + const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops; + + /* DR action flags with different table. */ +@@ -765,15 +783,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev, + if (!shared_rss || __flow_hw_act_data_shared_rss_append + (priv, acts, + (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS, +- action_src, action_dst, idx, shared_rss)) ++ action_src, action_dst, idx, shared_rss)) { ++ DRV_LOG(WARNING, "Indirect RSS action index %d translate failed", act_idx); + return -1; ++ } + break; + case MLX5_INDIRECT_ACTION_TYPE_COUNT: + if (__flow_hw_act_data_shared_cnt_append(priv, acts, + (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COUNT, +- action_src, action_dst, act_idx)) ++ action_src, action_dst, act_idx)) { ++ DRV_LOG(WARNING, "Indirect count action translate failed"); + return -1; ++ } + break; + case MLX5_INDIRECT_ACTION_TYPE_AGE: + /* Not supported, prevent by validate function. */ +@@ -781,15 +803,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev, + break; + case MLX5_INDIRECT_ACTION_TYPE_CT: + if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, +- idx, &acts->rule_acts[action_dst])) ++ idx, &acts->rule_acts[action_dst])) { ++ DRV_LOG(WARNING, "Indirect CT action translate failed"); + return -1; ++ } + break; + case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: + if (__flow_hw_act_data_shared_mtr_append(priv, acts, + (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK, +- action_src, action_dst, idx)) ++ action_src, action_dst, idx)) { ++ DRV_LOG(WARNING, "Indirect meter mark action translate failed"); + return -1; ++ } + break; + default: + DRV_LOG(WARNING, "Unsupported shared action type:%d", type); +@@ -1243,6 +1269,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, struct mlx5_flow_meter_info *fm; uint32_t mtr_id; @@ -47504,7 +61075,7 @@ index a3c8056515..6b889e9f81 100644 aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id); if (!aso_mtr) return NULL; -@@ -1361,7 +1363,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, +@@ -1361,7 +1389,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, else type = MLX5DR_TABLE_TYPE_NIC_RX; for (; !actions_end; actions++, masks++) { @@ -47513,7 +61084,7 @@ index a3c8056515..6b889e9f81 100644 case RTE_FLOW_ACTION_TYPE_INDIRECT: action_pos = at->actions_off[actions - at->actions]; if (!attr->group) { -@@ -1665,6 +1667,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, +@@ -1665,6 +1693,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, action_pos)) goto err; break; @@ -47530,7 +61101,262 @@ index a3c8056515..6b889e9f81 100644 case RTE_FLOW_ACTION_TYPE_END: actions_end = true; break; -@@ -3252,14 +3264,18 @@ flow_hw_translate_group(struct rte_eth_dev *dev, +@@ -1742,6 +1780,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, + } + return 0; + err: ++ /* If rte_errno was not initialized and reached error state. */ ++ if (!rte_errno) ++ rte_errno = EINVAL; + err = rte_errno; + __flow_hw_action_template_destroy(dev, acts); + return rte_flow_error_set(error, err, +@@ -2070,6 +2111,30 @@ flow_hw_modify_field_construct(struct mlx5_hw_q_job *job, + return 0; + } + ++/** ++ * Release any actions allocated for the flow rule during actions construction. ++ * ++ * @param[in] flow ++ * Pointer to flow structure. ++ */ ++static void ++flow_hw_release_actions(struct rte_eth_dev *dev, ++ uint32_t queue, ++ struct rte_flow_hw *flow) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; ++ ++ if (flow->fate_type == MLX5_FLOW_FATE_JUMP) ++ flow_hw_jump_release(dev, flow->jump); ++ else if (flow->fate_type == MLX5_FLOW_FATE_QUEUE) ++ mlx5_hrxq_obj_release(dev, flow->hrxq); ++ if (mlx5_hws_cnt_id_valid(flow->cnt_id)) ++ flow_hw_age_count_release(priv, queue, flow, NULL); ++ if (flow->mtr_id) ++ mlx5_ipool_free(pool->idx_pool, flow->mtr_id); ++} ++ + /** + * Construct flow action array. + * +@@ -2158,6 +2223,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + struct mlx5_hrxq *hrxq; + uint32_t ct_idx; + cnt_id_t cnt_id; ++ uint32_t *cnt_queue; + uint32_t mtr_id; + + action = &actions[act_data->action_src]; +@@ -2178,7 +2244,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + (dev, queue, action, table, it_idx, + at->action_flags, job->flow, + &rule_acts[act_data->action_dst])) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + break; +@@ -2198,7 +2264,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + jump = flow_hw_jump_action_register + (dev, &table->cfg, jump_group, NULL); + if (!jump) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = + (!!attr.group) ? jump->hws_action : jump->root_action; + job->flow->jump = jump; +@@ -2210,7 +2276,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + ft_flag, + action); + if (!hrxq) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = hrxq->action; + job->flow->hrxq = hrxq; + job->flow->fate_type = MLX5_FLOW_FATE_QUEUE; +@@ -2220,19 +2286,19 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + if (flow_hw_shared_action_get + (dev, act_data, item_flags, + &rule_acts[act_data->action_dst])) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + enc_item = ((const struct rte_flow_action_vxlan_encap *) + action->conf)->definition; + if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + enc_item = ((const struct rte_flow_action_nvgre_encap *) + action->conf)->definition; + if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap_data = +@@ -2254,12 +2320,12 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + hw_acts, + action); + if (ret) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + port_action = action->conf; + if (!priv->hw_vport[port_action->port_id]) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = + priv->hw_vport[port_action->port_id]; + break; +@@ -2274,7 +2340,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + jump = flow_hw_jump_action_register + (dev, &table->cfg, aso_mtr->fm.group, NULL); + if (!jump) +- return -1; ++ goto error; + MLX5_ASSERT + (!rule_acts[act_data->action_dst + 1].action); + rule_acts[act_data->action_dst + 1].action = +@@ -2283,7 +2349,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + job->flow->jump = jump; + job->flow->fate_type = MLX5_FLOW_FATE_JUMP; + if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_AGE: + age = action->conf; +@@ -2298,7 +2364,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + job->flow->idx, + error); + if (age_idx == 0) +- return -rte_errno; ++ goto error; + job->flow->age_idx = age_idx; + if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) + /* +@@ -2309,10 +2375,10 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + break; + /* Fall-through. */ + case RTE_FLOW_ACTION_TYPE_COUNT: +- ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, &queue, +- &cnt_id, age_idx); ++ cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); ++ ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx); + if (ret != 0) +- return ret; ++ goto error; + ret = mlx5_hws_cnt_pool_get_action_offset + (priv->hws_cpool, + cnt_id, +@@ -2320,7 +2386,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + &rule_acts[act_data->action_dst].counter.offset + ); + if (ret != 0) +- return ret; ++ goto error; + job->flow->cnt_id = cnt_id; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_COUNT: +@@ -2331,7 +2397,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + &rule_acts[act_data->action_dst].counter.offset + ); + if (ret != 0) +- return ret; ++ goto error; + job->flow->cnt_id = act_data->shared_counter.id; + break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: +@@ -2339,7 +2405,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + ((uint32_t)(uintptr_t)action->conf); + if (flow_hw_ct_compile(dev, queue, ct_idx, + &rule_acts[act_data->action_dst])) +- return -1; ++ goto error; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK: + mtr_id = act_data->shared_meter.id & +@@ -2347,7 +2413,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + /* Find ASO object. */ + aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id); + if (!aso_mtr) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = + pool->action; + rule_acts[act_data->action_dst].aso_meter.offset = +@@ -2365,7 +2431,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + act_data->action_dst, action, + rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE); + if (ret != 0) +- return ret; ++ goto error; + break; + default: + break; +@@ -2398,6 +2464,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) + job->flow->cnt_id = hw_acts->cnt_id; + return 0; ++ ++error: ++ flow_hw_release_actions(dev, queue, job->flow); ++ rte_errno = EINVAL; ++ return -rte_errno; + } + + static const struct rte_flow_item * +@@ -2502,10 +2573,6 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, + uint32_t flow_idx; + int ret; + +- if (unlikely((!dev->data->dev_started))) { +- rte_errno = EINVAL; +- goto error; +- } + if (unlikely(!priv->hw_q[queue].job_idx)) { + rte_errno = ENOMEM; + goto error; +@@ -2544,10 +2611,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, + if (flow_hw_actions_construct(dev, job, + &table->ats[action_template_index], + pattern_template_index, actions, +- rule_acts, queue, error)) { +- rte_errno = EINVAL; ++ rule_acts, queue, error)) + goto free; +- } + rule_items = flow_hw_get_rule_items(dev, table, items, + pattern_template_index, job); + if (!rule_items) +@@ -2646,6 +2711,8 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, + struct rte_flow_hw *flow, + struct rte_flow_error *error) + { ++ uint32_t *cnt_queue; ++ + if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) { + if (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) { + /* Remove this AGE parameter from indirect counter. */ +@@ -2656,8 +2723,9 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, + } + return; + } ++ cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); + /* Put the counter first to reduce the race risk in BG thread. */ +- mlx5_hws_cnt_pool_put(priv->hws_cpool, &queue, &flow->cnt_id); ++ mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id); + flow->cnt_id = 0; + if (flow->age_idx) { + if (mlx5_hws_age_is_indirect(flow->age_idx)) { +@@ -3252,14 +3320,18 @@ flow_hw_translate_group(struct rte_eth_dev *dev, "group index not supported"); *table_group = group + 1; } else if (config->dv_esw_en && @@ -47554,7 +61380,16 @@ index a3c8056515..6b889e9f81 100644 */ if (group > MLX5_HW_MAX_EGRESS_GROUP) return rte_flow_error_set(error, EINVAL, -@@ -3863,6 +3879,34 @@ flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev, +@@ -3349,7 +3421,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev, + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +- "table in using"); ++ "table is in use"); + } + LIST_REMOVE(table, next); + for (i = 0; i < table->nb_item_templates; i++) +@@ -3863,6 +3935,34 @@ flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev, #undef X_FIELD } @@ -47589,7 +61424,16 @@ index a3c8056515..6b889e9f81 100644 static int mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, const struct rte_flow_actions_template_attr *attr, -@@ -3896,7 +3940,7 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, +@@ -3879,6 +3979,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, + bool actions_end = false; + int ret; + ++ if (!mlx5_hw_ctx_validate(dev, error)) ++ return -rte_errno; + /* FDB actions are only valid to proxy port. */ + if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master)) + return rte_flow_error_set(error, EINVAL, +@@ -3896,7 +3998,7 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "mask type does not match action type"); @@ -47598,7 +61442,7 @@ index a3c8056515..6b889e9f81 100644 case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_INDIRECT: -@@ -4022,6 +4066,13 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, +@@ -4022,6 +4124,13 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_END: actions_end = true; break; @@ -47612,7 +61456,7 @@ index a3c8056515..6b889e9f81 100644 default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -4041,8 +4092,7 @@ flow_hw_actions_validate(struct rte_eth_dev *dev, +@@ -4041,8 +4150,7 @@ flow_hw_actions_validate(struct rte_eth_dev *dev, const struct rte_flow_action masks[], struct rte_flow_error *error) { @@ -47622,7 +61466,7 @@ index a3c8056515..6b889e9f81 100644 } -@@ -4143,7 +4193,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) +@@ -4143,7 +4251,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) if (curr_off >= MLX5_HW_MAX_ACTS) goto err_actions_num; @@ -47631,7 +61475,7 @@ index a3c8056515..6b889e9f81 100644 case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_INDIRECT: -@@ -4221,6 +4271,10 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) +@@ -4221,6 +4329,10 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) } at->actions_off[i] = cnt_off; break; @@ -47642,7 +61486,59 @@ index a3c8056515..6b889e9f81 100644 default: type = mlx5_hw_dr_action_types[at->actions[i].type]; at->actions_off[i] = curr_off; -@@ -4534,6 +4588,9 @@ error: +@@ -4262,7 +4374,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, + rm[set_vlan_vid_ix].conf)->vlan_vid != 0); + const struct rte_flow_action_of_set_vlan_vid *conf = + ra[set_vlan_vid_ix].conf; +- rte_be16_t vid = masked ? conf->vlan_vid : 0; + int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0, + NULL, &error); + *spec = (typeof(*spec)) { +@@ -4273,8 +4384,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, + }, + .src = { + .field = RTE_FLOW_FIELD_VALUE, +- .level = vid, +- .offset = 0, + }, + .width = width, + }; +@@ -4286,11 +4395,15 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, + }, + .src = { + .field = RTE_FLOW_FIELD_VALUE, +- .level = masked ? (1U << width) - 1 : 0, +- .offset = 0, + }, + .width = 0xffffffff, + }; ++ if (masked) { ++ uint32_t mask_val = 0xffffffff; ++ ++ rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid)); ++ rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val)); ++ } + ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; + ra[set_vlan_vid_ix].conf = spec; + rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; +@@ -4317,8 +4430,6 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, + }, + .src = { + .field = RTE_FLOW_FIELD_VALUE, +- .level = vid, +- .offset = 0, + }, + .width = width, + }; +@@ -4327,6 +4438,7 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, + .conf = &conf + }; + ++ rte_memcpy(conf.src.value, &vid, sizeof(vid)); + return flow_hw_modify_field_construct(job, act_data, hw_acts, + &modify_action); + } +@@ -4534,6 +4646,9 @@ error: mlx5dr_action_template_destroy(at->tmpl); mlx5_free(at); } @@ -47652,7 +61548,16 @@ index a3c8056515..6b889e9f81 100644 return NULL; } -@@ -4614,8 +4671,9 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4561,7 +4676,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused, + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +- "action template in using"); ++ "action template is in use"); + } + LIST_REMOVE(template, next); + if (template->tmpl) +@@ -4614,9 +4729,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -47661,9 +61566,12 @@ index a3c8056515..6b889e9f81 100644 bool items_end = false; + uint32_t tag_bitmap = 0; ++ if (!mlx5_hw_ctx_validate(dev, error)) ++ return -rte_errno; if (!attr->ingress && !attr->egress && !attr->transfer) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, -@@ -4657,16 +4715,26 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, + "at least one of the direction attributes" +@@ -4657,16 +4775,26 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, switch (type) { case RTE_FLOW_ITEM_TYPE_TAG: { @@ -47693,7 +61601,7 @@ index a3c8056515..6b889e9f81 100644 break; } case MLX5_RTE_FLOW_ITEM_TYPE_TAG: -@@ -4680,6 +4748,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4680,6 +4808,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Unsupported internal tag index"); @@ -47706,7 +61614,7 @@ index a3c8056515..6b889e9f81 100644 break; } case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: -@@ -4790,7 +4864,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, +@@ -4790,7 +4924,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it; struct rte_flow_item *copied_items = NULL; const struct rte_flow_item *tmpl_items; @@ -47715,7 +61623,16 @@ index a3c8056515..6b889e9f81 100644 struct rte_flow_item port = { .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT, .mask = &rte_flow_item_ethdev_mask, -@@ -5271,12 +5345,14 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv) +@@ -4897,7 +5031,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused, + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +- "item template in using"); ++ "item template is in use"); + } + LIST_REMOVE(template, next); + claim_zero(mlx5dr_match_template_destroy(template->mt)); +@@ -5271,12 +5405,14 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv) * * @param dev * Pointer to Ethernet device. @@ -47731,7 +61648,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5295,7 +5371,7 @@ flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) +@@ -5295,7 +5431,7 @@ flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) }, }; @@ -47740,7 +61657,7 @@ index a3c8056515..6b889e9f81 100644 } static __rte_always_inline uint32_t -@@ -5353,12 +5429,15 @@ flow_hw_update_action_mask(struct rte_flow_action *action, +@@ -5353,12 +5489,15 @@ flow_hw_update_action_mask(struct rte_flow_action *action, * * @param dev * Pointer to Ethernet device. @@ -47757,7 +61674,7 @@ index a3c8056515..6b889e9f81 100644 { uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev); uint32_t tag_value = flow_hw_tx_tag_regc_value(dev); -@@ -5444,7 +5523,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) +@@ -5444,7 +5583,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) NULL, NULL); idx++; MLX5_ASSERT(idx <= RTE_DIM(actions_v)); @@ -47766,7 +61683,7 @@ index a3c8056515..6b889e9f81 100644 } static void -@@ -5473,12 +5552,14 @@ flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev) +@@ -5473,12 +5612,14 @@ flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -47782,7 +61699,7 @@ index a3c8056515..6b889e9f81 100644 { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_template_table_attr attr = { -@@ -5496,20 +5577,22 @@ flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) +@@ -5496,20 +5637,22 @@ flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) MLX5_ASSERT(priv->sh->config.dv_esw_en); MLX5_ASSERT(priv->sh->config.repr_matching); @@ -47812,7 +61729,7 @@ index a3c8056515..6b889e9f81 100644 flow_hw_cleanup_tx_repr_tagging(dev); return -rte_errno; } -@@ -5540,12 +5623,15 @@ flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev) +@@ -5540,12 +5683,15 @@ flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -47829,7 +61746,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5575,7 +5661,7 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) +@@ -5575,7 +5721,7 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) }, }; @@ -47838,7 +61755,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -5588,12 +5674,15 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) +@@ -5588,12 +5734,15 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -47855,7 +61772,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5626,7 +5715,7 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) +@@ -5626,7 +5775,7 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) }, }; @@ -47864,7 +61781,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -5636,12 +5725,15 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) +@@ -5636,12 +5785,15 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -47881,7 +61798,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5660,7 +5752,7 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) +@@ -5660,7 +5812,7 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) }, }; @@ -47890,7 +61807,7 @@ index a3c8056515..6b889e9f81 100644 } /* -@@ -5670,12 +5762,15 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) +@@ -5670,12 +5822,15 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -47907,7 +61824,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_pattern_template_attr tx_pa_attr = { .relaxed_matching = 0, -@@ -5696,10 +5791,44 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) +@@ -5696,10 +5851,44 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) .type = RTE_FLOW_ITEM_TYPE_END, }, }; @@ -47955,7 +61872,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -5710,12 +5839,15 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) +@@ -5710,12 +5899,15 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -47972,7 +61889,7 @@ index a3c8056515..6b889e9f81 100644 { uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev); uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev); -@@ -5781,7 +5913,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) +@@ -5781,7 +5973,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) set_reg_v.dst.offset = rte_bsf32(marker_mask); rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits)); rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask)); @@ -47981,7 +61898,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -5793,13 +5925,16 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) +@@ -5793,13 +5985,16 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) * Pointer to Ethernet device. * @param group * Destination group for this action template. @@ -47999,7 +61916,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_actions_template_attr attr = { .transfer = 1, -@@ -5829,8 +5964,8 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, +@@ -5829,8 +6024,8 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, } }; @@ -48010,7 +61927,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -5839,12 +5974,15 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, +@@ -5839,12 +6034,15 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. @@ -48027,7 +61944,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_actions_template_attr attr = { .transfer = 1, -@@ -5874,8 +6012,7 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) +@@ -5874,8 +6072,7 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) } }; @@ -48037,7 +61954,7 @@ index a3c8056515..6b889e9f81 100644 } /* -@@ -5884,12 +6021,15 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) +@@ -5884,12 +6081,15 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -48054,7 +61971,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_actions_template_attr tx_act_attr = { .egress = 1, -@@ -5952,11 +6092,41 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5952,11 +6152,41 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; @@ -48099,7 +62016,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -5969,6 +6139,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5969,6 +6199,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -48108,7 +62025,7 @@ index a3c8056515..6b889e9f81 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -5976,7 +6148,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5976,7 +6208,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) static struct rte_flow_template_table* flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -48118,7 +62035,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -5993,7 +6166,7 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -5993,7 +6226,7 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, .external = false, }; @@ -48127,7 +62044,7 @@ index a3c8056515..6b889e9f81 100644 } -@@ -6007,6 +6180,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -6007,6 +6240,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -48136,7 +62053,7 @@ index a3c8056515..6b889e9f81 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6014,7 +6189,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -6014,7 +6249,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, static struct rte_flow_template_table* flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -48146,7 +62063,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -6031,7 +6207,7 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6031,7 +6267,7 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, .external = false, }; @@ -48155,7 +62072,7 @@ index a3c8056515..6b889e9f81 100644 } /* -@@ -6043,6 +6219,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6043,6 +6279,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -48164,7 +62081,7 @@ index a3c8056515..6b889e9f81 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6050,7 +6228,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6050,7 +6288,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, static struct rte_flow_template_table* flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *pt, @@ -48174,7 +62091,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_template_table_attr tx_tbl_attr = { .flow_attr = { -@@ -6064,14 +6243,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6064,14 +6303,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, .attr = tx_tbl_attr, .external = false, }; @@ -48190,7 +62107,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -6084,6 +6257,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6084,6 +6317,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -48199,7 +62116,7 @@ index a3c8056515..6b889e9f81 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6091,7 +6266,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6091,7 +6326,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, static struct rte_flow_template_table * flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -48209,7 +62126,7 @@ index a3c8056515..6b889e9f81 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -6108,7 +6284,44 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, +@@ -6108,7 +6344,110 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, .external = false, }; @@ -48217,6 +62134,72 @@ index a3c8056515..6b889e9f81 100644 + return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); +} + ++/** ++ * Cleans up all template tables and pattern, and actions templates used for ++ * FDB control flow rules. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ */ ++static void ++flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; ++ ++ if (!priv->hw_ctrl_fdb) ++ return; ++ hw_ctrl_fdb = priv->hw_ctrl_fdb; ++ /* Clean up templates used for LACP default miss table. */ ++ if (hw_ctrl_fdb->hw_lacp_rx_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL)); ++ if (hw_ctrl_fdb->lacp_rx_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->lacp_rx_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl, ++ NULL)); ++ /* Clean up templates used for default Tx metadata copy. */ ++ if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL)); ++ if (hw_ctrl_fdb->tx_meta_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->tx_meta_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl, ++ NULL)); ++ /* Clean up templates used for default FDB jump rule. */ ++ if (hw_ctrl_fdb->hw_esw_zero_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL)); ++ if (hw_ctrl_fdb->jump_one_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->port_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl, ++ NULL)); ++ /* Clean up templates used for default SQ miss flow rules - non-root table. */ ++ if (hw_ctrl_fdb->hw_esw_sq_miss_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL)); ++ if (hw_ctrl_fdb->regc_sq_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->port_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl, ++ NULL)); ++ /* Clean up templates used for default SQ miss flow rules - root table. */ ++ if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL)); ++ if (hw_ctrl_fdb->regc_jump_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, ++ hw_ctrl_fdb->regc_jump_actions_tmpl, NULL)); ++ if (hw_ctrl_fdb->esw_mgr_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl, ++ NULL)); ++ /* Clean up templates structure for FDB control flow rules. */ ++ mlx5_free(hw_ctrl_fdb); ++ priv->hw_ctrl_fdb = NULL; ++} ++ +/* + * Create a table on the root group to for the LACP traffic redirecting. + * @@ -48255,7 +62238,7 @@ index a3c8056515..6b889e9f81 100644 } /** -@@ -6117,114 +6330,155 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, +@@ -6117,142 +6456,159 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. @@ -48266,135 +62249,201 @@ index a3c8056515..6b889e9f81 100644 - * 0 on success, EINVAL otherwise + * 0 on success, negative values otherwise */ - static __rte_unused int +-static __rte_unused int -flow_hw_create_ctrl_tables(struct rte_eth_dev *dev) ++static int +flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL; - struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL; - struct rte_flow_pattern_template *port_items_tmpl = NULL; - struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL; -+ struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL; - struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL; - struct rte_flow_actions_template *port_actions_tmpl = NULL; - struct rte_flow_actions_template *jump_one_actions_tmpl = NULL; - struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL; -+ struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL; +- struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL; +- struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL; +- struct rte_flow_pattern_template *port_items_tmpl = NULL; +- struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL; +- struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL; +- struct rte_flow_actions_template *port_actions_tmpl = NULL; +- struct rte_flow_actions_template *jump_one_actions_tmpl = NULL; +- struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL; ++ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; uint32_t xmeta = priv->sh->config.dv_xmeta_en; uint32_t repr_matching = priv->sh->config.repr_matching; -+ int ret; ++ uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule; - /* Create templates and table for default SQ miss flow rules - root table. */ +- /* Create templates and table for default SQ miss flow rules - root table. */ - esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev); -+ esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); - if (!esw_mgr_items_tmpl) { - DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" - " template for control flows", dev->data->port_id); +- if (!esw_mgr_items_tmpl) { +- DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" +- " template for control flows", dev->data->port_id); - goto error; -+ goto err; - } +- } - regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev); -+ regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error); - if (!regc_jump_actions_tmpl) { - DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" - " for control flows", dev->data->port_id); +- if (!regc_jump_actions_tmpl) { +- DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" +- " for control flows", dev->data->port_id); - goto error; -+ goto err; - } - MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL); - priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table +- } +- MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL); +- priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table - (dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl); -+ (dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error); - if (!priv->hw_esw_sq_miss_root_tbl) { - DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" - " for control flows", dev->data->port_id); +- if (!priv->hw_esw_sq_miss_root_tbl) { +- DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" +- " for control flows", dev->data->port_id); - goto error; -+ goto err; - } - /* Create templates and table for default SQ miss flow rules - non-root table. */ +- } +- /* Create templates and table for default SQ miss flow rules - non-root table. */ - regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev); -+ regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); - if (!regc_sq_items_tmpl) { - DRV_LOG(ERR, "port %u failed to create SQ item template for" - " control flows", dev->data->port_id); +- if (!regc_sq_items_tmpl) { +- DRV_LOG(ERR, "port %u failed to create SQ item template for" +- " control flows", dev->data->port_id); - goto error; -+ goto err; - } +- } - port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev); -+ port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error); - if (!port_actions_tmpl) { - DRV_LOG(ERR, "port %u failed to create port action template" - " for control flows", dev->data->port_id); +- if (!port_actions_tmpl) { +- DRV_LOG(ERR, "port %u failed to create port action template" +- " for control flows", dev->data->port_id); - goto error; -+ goto err; - } - MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL); - priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl, +- } +- MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL); +- priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl, - port_actions_tmpl); -+ port_actions_tmpl, error); - if (!priv->hw_esw_sq_miss_tbl) { - DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" - " for control flows", dev->data->port_id); +- if (!priv->hw_esw_sq_miss_tbl) { +- DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" +- " for control flows", dev->data->port_id); - goto error; -+ goto err; - } - /* Create templates and table for default FDB jump flow rules. */ +- } +- /* Create templates and table for default FDB jump flow rules. */ - port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev); -+ port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error); - if (!port_items_tmpl) { - DRV_LOG(ERR, "port %u failed to create SQ item template for" - " control flows", dev->data->port_id); +- if (!port_items_tmpl) { +- DRV_LOG(ERR, "port %u failed to create SQ item template for" +- " control flows", dev->data->port_id); - goto error; -+ goto err; - } - jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template +- } +- jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template - (dev, MLX5_HW_LOWEST_USABLE_GROUP); -+ (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); - if (!jump_one_actions_tmpl) { - DRV_LOG(ERR, "port %u failed to create jump action template" - " for control flows", dev->data->port_id); +- if (!jump_one_actions_tmpl) { +- DRV_LOG(ERR, "port %u failed to create jump action template" +- " for control flows", dev->data->port_id); - goto error; ++ MLX5_ASSERT(priv->hw_ctrl_fdb == NULL); ++ hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY); ++ if (!hw_ctrl_fdb) { ++ DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates", ++ dev->data->port_id); ++ rte_errno = ENOMEM; + goto err; } - MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL); - priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl, +- MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL); +- priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl, - jump_one_actions_tmpl); -+ jump_one_actions_tmpl, -+ error); - if (!priv->hw_esw_zero_tbl) { - DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" - " for control flows", dev->data->port_id); +- if (!priv->hw_esw_zero_tbl) { +- DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" +- " for control flows", dev->data->port_id); - goto error; -+ goto err; ++ priv->hw_ctrl_fdb = hw_ctrl_fdb; ++ if (fdb_def_rule) { ++ /* Create templates and table for default SQ miss flow rules - root table. */ ++ hw_ctrl_fdb->esw_mgr_items_tmpl = ++ flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->esw_mgr_items_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" ++ " template for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->regc_jump_actions_tmpl = ++ flow_hw_create_ctrl_regc_jump_actions_template(dev, error); ++ if (!hw_ctrl_fdb->regc_jump_actions_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->hw_esw_sq_miss_root_tbl = ++ flow_hw_create_ctrl_sq_miss_root_table ++ (dev, hw_ctrl_fdb->esw_mgr_items_tmpl, ++ hw_ctrl_fdb->regc_jump_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) { ++ DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ /* Create templates and table for default SQ miss flow rules - non-root table. */ ++ hw_ctrl_fdb->regc_sq_items_tmpl = ++ flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->regc_sq_items_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create SQ item template for" ++ " control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->port_actions_tmpl = ++ flow_hw_create_ctrl_port_actions_template(dev, error); ++ if (!hw_ctrl_fdb->port_actions_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create port action template" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->hw_esw_sq_miss_tbl = ++ flow_hw_create_ctrl_sq_miss_table ++ (dev, hw_ctrl_fdb->regc_sq_items_tmpl, ++ hw_ctrl_fdb->port_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) { ++ DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ /* Create templates and table for default FDB jump flow rules. */ ++ hw_ctrl_fdb->port_items_tmpl = ++ flow_hw_create_ctrl_port_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->port_items_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create SQ item template for" ++ " control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->jump_one_actions_tmpl = ++ flow_hw_create_ctrl_jump_actions_template ++ (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); ++ if (!hw_ctrl_fdb->jump_one_actions_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create jump action template" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table ++ (dev, hw_ctrl_fdb->port_items_tmpl, ++ hw_ctrl_fdb->jump_one_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_esw_zero_tbl) { ++ DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } } /* Create templates and table for default Tx metadata copy flow rule. */ if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) { - tx_meta_items_tmpl = flow_hw_create_tx_default_mreg_copy_pattern_template(dev); -+ tx_meta_items_tmpl = +- if (!tx_meta_items_tmpl) { ++ hw_ctrl_fdb->tx_meta_items_tmpl = + flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error); - if (!tx_meta_items_tmpl) { ++ if (!hw_ctrl_fdb->tx_meta_items_tmpl) { DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern" " template for control flows", dev->data->port_id); - goto error; + goto err; } - tx_meta_actions_tmpl = flow_hw_create_tx_default_mreg_copy_actions_template(dev); -+ tx_meta_actions_tmpl = +- if (!tx_meta_actions_tmpl) { ++ hw_ctrl_fdb->tx_meta_actions_tmpl = + flow_hw_create_tx_default_mreg_copy_actions_template(dev, error); - if (!tx_meta_actions_tmpl) { ++ if (!hw_ctrl_fdb->tx_meta_actions_tmpl) { DRV_LOG(ERR, "port %u failed to Tx metadata copy actions" " template for control flows", dev->data->port_id); - goto error; + goto err; } - MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL); +- MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL); - priv->hw_tx_meta_cpy_tbl = flow_hw_create_tx_default_mreg_copy_table(dev, - tx_meta_items_tmpl, tx_meta_actions_tmpl); -+ priv->hw_tx_meta_cpy_tbl = -+ flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl, -+ tx_meta_actions_tmpl, error); - if (!priv->hw_tx_meta_cpy_tbl) { +- if (!priv->hw_tx_meta_cpy_tbl) { ++ hw_ctrl_fdb->hw_tx_meta_cpy_tbl = ++ flow_hw_create_tx_default_mreg_copy_table ++ (dev, hw_ctrl_fdb->tx_meta_items_tmpl, ++ hw_ctrl_fdb->tx_meta_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) { DRV_LOG(ERR, "port %u failed to create table for default" " Tx metadata copy flow rule", dev->data->port_id); - goto error; @@ -48402,22 +62451,25 @@ index a3c8056515..6b889e9f81 100644 + } + } + /* Create LACP default miss table. */ -+ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { -+ lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error); -+ if (!lacp_rx_items_tmpl) { ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { ++ hw_ctrl_fdb->lacp_rx_items_tmpl = ++ flow_hw_create_lacp_rx_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->lacp_rx_items_tmpl) { + DRV_LOG(ERR, "port %u failed to create pattern template" + " for LACP Rx traffic", dev->data->port_id); + goto err; + } -+ lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error); -+ if (!lacp_rx_actions_tmpl) { ++ hw_ctrl_fdb->lacp_rx_actions_tmpl = ++ flow_hw_create_lacp_rx_actions_template(dev, error); ++ if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) { + DRV_LOG(ERR, "port %u failed to create actions template" + " for LACP Rx traffic", dev->data->port_id); + goto err; + } -+ priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl, -+ lacp_rx_actions_tmpl, error); -+ if (!priv->hw_lacp_rx_tbl) { ++ hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table ++ (dev, hw_ctrl_fdb->lacp_rx_items_tmpl, ++ hw_ctrl_fdb->lacp_rx_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_lacp_rx_tbl) { + DRV_LOG(ERR, "port %u failed to create template table for" + " for LACP Rx traffic", dev->data->port_id); + goto err; @@ -48425,48 +62477,41 @@ index a3c8056515..6b889e9f81 100644 } return 0; -error: +- if (priv->hw_esw_zero_tbl) { +- flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL); +- priv->hw_esw_zero_tbl = NULL; +- } +- if (priv->hw_esw_sq_miss_tbl) { +- flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL); +- priv->hw_esw_sq_miss_tbl = NULL; +- } +- if (priv->hw_esw_sq_miss_root_tbl) { +- flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL); +- priv->hw_esw_sq_miss_root_tbl = NULL; +- } +- if (tx_meta_actions_tmpl) +- flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL); +- if (jump_one_actions_tmpl) +- flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL); +- if (port_actions_tmpl) +- flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL); +- if (regc_jump_actions_tmpl) +- flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL); +- if (tx_meta_items_tmpl) +- flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL); +- if (port_items_tmpl) +- flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL); +- if (regc_sq_items_tmpl) +- flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL); +- if (esw_mgr_items_tmpl) +- flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL); ++ +err: -+ /* Do not overwrite the rte_errno. */ -+ ret = -rte_errno; -+ if (ret == 0) -+ ret = rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "Failed to create control tables."); -+ if (priv->hw_tx_meta_cpy_tbl) { -+ flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL); -+ priv->hw_tx_meta_cpy_tbl = NULL; -+ } - if (priv->hw_esw_zero_tbl) { - flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL); - priv->hw_esw_zero_tbl = NULL; -@@ -6237,6 +6491,8 @@ error: - flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL); - priv->hw_esw_sq_miss_root_tbl = NULL; - } -+ if (lacp_rx_actions_tmpl) -+ flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL); - if (tx_meta_actions_tmpl) - flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL); - if (jump_one_actions_tmpl) -@@ -6245,6 +6501,8 @@ error: - flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL); - if (regc_jump_actions_tmpl) - flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL); -+ if (lacp_rx_items_tmpl) -+ flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL); - if (tx_meta_items_tmpl) - flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL); - if (port_items_tmpl) -@@ -6253,7 +6511,7 @@ error: - flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL); - if (esw_mgr_items_tmpl) - flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL); -- return -EINVAL; -+ return ret; ++ flow_hw_cleanup_ctrl_fdb_tables(dev); + return -EINVAL; } - static void -@@ -6376,27 +6634,28 @@ flow_hw_create_vlan(struct rte_eth_dev *dev) +@@ -6376,27 +6732,28 @@ flow_hw_create_vlan(struct rte_eth_dev *dev) MLX5DR_ACTION_FLAG_HWS_FDB }; @@ -48499,15 +62544,61 @@ index a3c8056515..6b889e9f81 100644 } return 0; } -@@ -6807,6 +7066,7 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6760,6 +7117,38 @@ mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev) + } + } + ++static int ++flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr, ++ uint16_t nb_queue, ++ const struct rte_flow_queue_attr *queue_attr[], ++ struct rte_flow_error *error) ++{ ++ uint32_t size; ++ unsigned int i; ++ ++ if (port_attr == NULL) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "Port attributes must be non-NULL"); ++ ++ if (nb_queue == 0) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "At least one flow queue is required"); ++ ++ if (queue_attr == NULL) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "Queue attributes must be non-NULL"); ++ ++ size = queue_attr[0]->size; ++ for (i = 1; i < nb_queue; ++i) { ++ if (queue_attr[i]->size != size) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, ++ "All flow queues must have the same size"); ++ } ++ ++ return 0; ++} ++ + /** + * Configure port HWS resources. + * +@@ -6807,11 +7196,10 @@ flow_hw_configure(struct rte_eth_dev *dev, struct rte_flow_queue_attr ctrl_queue_attr = {0}; bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master); int ret = 0; + uint32_t action_flags; - if (!port_attr || !nb_queue || !queue_attr) { - rte_errno = EINVAL; -@@ -6836,8 +7096,7 @@ flow_hw_configure(struct rte_eth_dev *dev, +- if (!port_attr || !nb_queue || !queue_attr) { +- rte_errno = EINVAL; +- goto err; +- } ++ if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error)) ++ return -rte_errno; + /* In case re-configuring, release existing context at first. */ + if (priv->dr_ctx) { + /* */ +@@ -6836,8 +7224,7 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } @@ -48517,7 +62608,22 @@ index a3c8056515..6b889e9f81 100644 _queue_attr[nb_queue] = &ctrl_queue_attr; priv->acts_ipool = mlx5_ipool_create(&cfg); if (!priv->acts_ipool) -@@ -6926,6 +7185,7 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6845,14 +7232,6 @@ flow_hw_configure(struct rte_eth_dev *dev, + /* Allocate the queue job descriptor LIFO. */ + mem_size = sizeof(priv->hw_q[0]) * nb_q_updated; + for (i = 0; i < nb_q_updated; i++) { +- /* +- * Check if the queues' size are all the same as the +- * limitation from HWS layer. +- */ +- if (_queue_attr[i]->size != _queue_attr[0]->size) { +- rte_errno = EINVAL; +- goto err; +- } + mem_size += (sizeof(struct mlx5_hw_q_job *) + + sizeof(struct mlx5_hw_q_job) + + sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN + +@@ -6926,6 +7305,7 @@ flow_hw_configure(struct rte_eth_dev *dev, priv->nb_queue = nb_q_updated; rte_spinlock_init(&priv->hw_ctrl_lock); LIST_INIT(&priv->hw_ctrl_flows); @@ -48525,7 +62631,7 @@ index a3c8056515..6b889e9f81 100644 ret = flow_hw_create_ctrl_rx_tables(dev); if (ret) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -@@ -6952,23 +7212,34 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6952,23 +7332,34 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) { @@ -48569,7 +62675,7 @@ index a3c8056515..6b889e9f81 100644 } if (port_attr->nb_conn_tracks) { mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated + -@@ -7005,12 +7276,18 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -7005,18 +7396,32 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue); @@ -48590,7 +62696,159 @@ index a3c8056515..6b889e9f81 100644 if (_queue_attr) mlx5_free(_queue_attr); if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE) -@@ -7178,9 +7455,9 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) + priv->hws_strict_queue = 1; + return 0; + err: ++ priv->hws_strict_queue = 0; ++ flow_hw_destroy_vlan(dev); ++ if (priv->hws_age_req) ++ mlx5_hws_age_pool_destroy(priv); ++ if (priv->hws_cpool) { ++ mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); ++ priv->hws_cpool = NULL; ++ } + if (priv->hws_ctpool) { + flow_hw_ct_pool_destroy(dev, priv->hws_ctpool); + priv->hws_ctpool = NULL; +@@ -7025,34 +7430,44 @@ err: + flow_hw_ct_mng_destroy(dev, priv->ct_mng); + priv->ct_mng = NULL; + } +- if (priv->hws_age_req) +- mlx5_hws_age_pool_destroy(priv); +- if (priv->hws_cpool) { +- mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); +- priv->hws_cpool = NULL; +- } ++ flow_hw_cleanup_ctrl_fdb_tables(dev); + flow_hw_free_vport_actions(priv); ++ if (priv->hw_def_miss) { ++ mlx5dr_action_destroy(priv->hw_def_miss); ++ priv->hw_def_miss = NULL; ++ } ++ flow_hw_cleanup_tx_repr_tagging(dev); + for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { +- if (priv->hw_drop[i]) ++ if (priv->hw_drop[i]) { + mlx5dr_action_destroy(priv->hw_drop[i]); +- if (priv->hw_tag[i]) ++ priv->hw_drop[i] = NULL; ++ } ++ if (priv->hw_tag[i]) { + mlx5dr_action_destroy(priv->hw_tag[i]); ++ priv->hw_drop[i] = NULL; ++ } + } +- flow_hw_destroy_vlan(dev); +- if (dr_ctx) ++ mlx5_flow_meter_uninit(dev); ++ flow_hw_cleanup_ctrl_rx_tables(dev); ++ if (dr_ctx) { + claim_zero(mlx5dr_context_close(dr_ctx)); +- for (i = 0; i < nb_q_updated; i++) { +- rte_ring_free(priv->hw_q[i].indir_iq); +- rte_ring_free(priv->hw_q[i].indir_cq); ++ priv->dr_ctx = NULL; ++ } ++ if (priv->hw_q) { ++ for (i = 0; i < nb_q_updated; i++) { ++ rte_ring_free(priv->hw_q[i].indir_iq); ++ rte_ring_free(priv->hw_q[i].indir_cq); ++ } ++ mlx5_free(priv->hw_q); ++ priv->hw_q = NULL; + } +- mlx5_free(priv->hw_q); +- priv->hw_q = NULL; + if (priv->acts_ipool) { + mlx5_ipool_destroy(priv->acts_ipool); + priv->acts_ipool = NULL; + } + if (_queue_attr) + mlx5_free(_queue_attr); ++ priv->nb_queue = 0; + /* Do not overwrite the internal errno information. */ + if (ret) + return ret; +@@ -7071,32 +7486,41 @@ void + flow_hw_resource_release(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; +- struct rte_flow_template_table *tbl; +- struct rte_flow_pattern_template *it; +- struct rte_flow_actions_template *at; ++ struct rte_flow_template_table *tbl, *temp_tbl; ++ struct rte_flow_pattern_template *it, *temp_it; ++ struct rte_flow_actions_template *at, *temp_at; + uint32_t i; + + if (!priv->dr_ctx) + return; + flow_hw_rxq_flag_set(dev, false); + flow_hw_flush_all_ctrl_flows(dev); ++ flow_hw_cleanup_ctrl_fdb_tables(dev); + flow_hw_cleanup_tx_repr_tagging(dev); + flow_hw_cleanup_ctrl_rx_tables(dev); +- while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) { +- tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo); +- flow_hw_table_destroy(dev, tbl, NULL); +- } +- while (!LIST_EMPTY(&priv->flow_hw_tbl)) { +- tbl = LIST_FIRST(&priv->flow_hw_tbl); +- flow_hw_table_destroy(dev, tbl, NULL); +- } +- while (!LIST_EMPTY(&priv->flow_hw_itt)) { +- it = LIST_FIRST(&priv->flow_hw_itt); +- flow_hw_pattern_template_destroy(dev, it, NULL); +- } +- while (!LIST_EMPTY(&priv->flow_hw_at)) { +- at = LIST_FIRST(&priv->flow_hw_at); +- flow_hw_actions_template_destroy(dev, at, NULL); ++ tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo); ++ while (tbl) { ++ temp_tbl = LIST_NEXT(tbl, next); ++ claim_zero(flow_hw_table_destroy(dev, tbl, NULL)); ++ tbl = temp_tbl; ++ } ++ tbl = LIST_FIRST(&priv->flow_hw_tbl); ++ while (tbl) { ++ temp_tbl = LIST_NEXT(tbl, next); ++ claim_zero(flow_hw_table_destroy(dev, tbl, NULL)); ++ tbl = temp_tbl; ++ } ++ it = LIST_FIRST(&priv->flow_hw_itt); ++ while (it) { ++ temp_it = LIST_NEXT(it, next); ++ claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL)); ++ it = temp_it; ++ } ++ at = LIST_FIRST(&priv->flow_hw_at); ++ while (at) { ++ temp_at = LIST_NEXT(at, next); ++ claim_zero(flow_hw_actions_template_destroy(dev, at, NULL)); ++ at = temp_at; + } + for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { + if (priv->hw_drop[i]) +@@ -7104,6 +7528,8 @@ flow_hw_resource_release(struct rte_eth_dev *dev) + if (priv->hw_tag[i]) + mlx5dr_action_destroy(priv->hw_tag[i]); + } ++ if (priv->hw_def_miss) ++ mlx5dr_action_destroy(priv->hw_def_miss); + flow_hw_destroy_vlan(dev); + flow_hw_free_vport_actions(priv); + if (priv->acts_ipool) { +@@ -7130,8 +7556,6 @@ flow_hw_resource_release(struct rte_eth_dev *dev) + } + mlx5_free(priv->hw_q); + priv->hw_q = NULL; +- claim_zero(mlx5dr_context_close(priv->dr_ctx)); +- priv->dr_ctx = NULL; + priv->nb_queue = 0; + } + +@@ -7178,9 +7602,9 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) uint32_t meta_mode = priv->sh->config.dv_xmeta_en; uint8_t masks = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c; uint32_t i, j; @@ -48602,7 +62860,7 @@ index a3c8056515..6b889e9f81 100644 /* * The CAPA is global for common device but only used in net. -@@ -7195,29 +7472,35 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) +@@ -7195,29 +7619,35 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) if (meta_mode == MLX5_XMETA_MODE_META32_HWS) unset |= 1 << (REG_C_1 - REG_C_0); masks &= ~unset; @@ -48656,7 +62914,42 @@ index a3c8056515..6b889e9f81 100644 priv->sh->hws_tags = 1; mlx5_flow_hw_aso_tag = (enum modify_reg)priv->mtr_color_reg; mlx5_flow_hw_avl_tags_init_cnt++; -@@ -8355,6 +8638,10 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = { +@@ -7425,6 +7855,13 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue, + "CT is not enabled"); + return 0; + } ++ if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "CT supports port indexes up to " ++ RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); ++ return 0; ++ } + ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx); + if (!ct) { + rte_flow_error_set(error, rte_errno, +@@ -7566,6 +8003,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, + bool push = true; + bool aso = false; + ++ if (!mlx5_hw_ctx_validate(dev, error)) ++ return NULL; ++ + if (attr) { + MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE); + if (unlikely(!priv->hw_q[queue].job_idx)) { +@@ -8243,6 +8683,10 @@ flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id, + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "empty context"); ++ if (!priv->hws_age_req) ++ return rte_flow_error_set(error, ENOENT, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, "No aging initialized"); + if (priv->hws_strict_queue) { + if (queue_id >= age_info->hw_q_age->nb_rings) + return rte_flow_error_set(error, EINVAL, +@@ -8355,6 +8799,10 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = { * Pointer to flow rule actions. * @param action_template_idx * Index of an action template associated with @p table. @@ -48667,7 +62960,7 @@ index a3c8056515..6b889e9f81 100644 * * @return * 0 on success, negative errno value otherwise and rte_errno set. -@@ -8366,7 +8653,9 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, +@@ -8366,7 +8814,9 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, struct rte_flow_item items[], uint8_t item_template_idx, struct rte_flow_action actions[], @@ -48678,7 +62971,7 @@ index a3c8056515..6b889e9f81 100644 { struct mlx5_priv *priv = proxy_dev->data->dev_private; uint32_t queue = CTRL_QUEUE_ID(priv); -@@ -8413,7 +8702,14 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, +@@ -8413,7 +8863,14 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, } entry->owner_dev = owner_dev; entry->flow = flow; @@ -48694,7 +62987,7 @@ index a3c8056515..6b889e9f81 100644 rte_spinlock_unlock(&priv->hw_ctrl_lock); return 0; error: -@@ -8587,11 +8883,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev) +@@ -8587,11 +9044,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev) mlx5_free(cf); cf = cf_next; } @@ -48719,7 +63012,7 @@ index a3c8056515..6b889e9f81 100644 { uint16_t port_id = dev->data->port_id; struct rte_flow_item_ethdev esw_mgr_spec = { -@@ -8616,6 +8924,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8616,6 +9085,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) }; struct rte_flow_item items[3] = { { 0 } }; struct rte_flow_action actions[3] = { { 0 } }; @@ -48730,27 +63023,44 @@ index a3c8056515..6b889e9f81 100644 struct rte_eth_dev *proxy_dev; struct mlx5_priv *proxy_priv; uint16_t proxy_port_id = dev->data->port_id; -@@ -8671,7 +8983,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8637,8 +9110,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) + proxy_port_id, port_id); + return 0; + } +- if (!proxy_priv->hw_esw_sq_miss_root_tbl || +- !proxy_priv->hw_esw_sq_miss_tbl) { ++ if (!proxy_priv->hw_ctrl_fdb || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) { + DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " + "default flow tables were not created.", + proxy_port_id, port_id); +@@ -8670,8 +9144,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) + actions[2] = (struct rte_flow_action) { .type = RTE_FLOW_ACTION_TYPE_END, }; - ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl, +- ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl, - items, 0, actions, 0); ++ ret = flow_hw_create_ctrl_flow(dev, proxy_dev, ++ proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, + items, 0, actions, 0, &flow_info, external); if (ret) { DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d", port_id, sqn, ret); -@@ -8700,8 +9012,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8700,8 +9175,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) actions[1] = (struct rte_flow_action){ .type = RTE_FLOW_ACTION_TYPE_END, }; -+ flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS; - ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl, +- ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl, - items, 0, actions, 0); ++ flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS; ++ ret = flow_hw_create_ctrl_flow(dev, proxy_dev, ++ proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl, + items, 0, actions, 0, &flow_info, external); if (ret) { DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d", port_id, sqn, ret); -@@ -8710,6 +9023,58 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8710,6 +9187,61 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) return 0; } @@ -48788,10 +63098,13 @@ index a3c8056515..6b889e9f81 100644 + } + proxy_dev = &rte_eth_devices[proxy_port_id]; + proxy_priv = proxy_dev->data->dev_private; ++ /* FDB default flow rules must be enabled. */ ++ MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule); + if (!proxy_priv->dr_ctx) + return 0; -+ if (!proxy_priv->hw_esw_sq_miss_root_tbl || -+ !proxy_priv->hw_esw_sq_miss_tbl) ++ if (!proxy_priv->hw_ctrl_fdb || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) + return 0; + cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows); + while (cf != NULL) { @@ -48809,7 +63122,7 @@ index a3c8056515..6b889e9f81 100644 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) { -@@ -8738,6 +9103,9 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) +@@ -8738,6 +9270,9 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, } }; @@ -48819,16 +63132,36 @@ index a3c8056515..6b889e9f81 100644 struct rte_eth_dev *proxy_dev; struct mlx5_priv *proxy_priv; uint16_t proxy_port_id = dev->data->port_id; -@@ -8768,7 +9136,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) +@@ -8752,6 +9287,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + } + proxy_dev = &rte_eth_devices[proxy_port_id]; + proxy_priv = proxy_dev->data->dev_private; ++ /* FDB default flow rules must be enabled. */ ++ MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule); + if (!proxy_priv->dr_ctx) { + DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured " + "for HWS to create default FDB jump rule. Default rule will " +@@ -8759,7 +9296,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + proxy_port_id, port_id); + return 0; + } +- if (!proxy_priv->hw_esw_zero_tbl) { ++ if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) { + DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " + "default flow tables were not created.", + proxy_port_id, port_id); +@@ -8767,8 +9304,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + return -rte_errno; } return flow_hw_create_ctrl_flow(dev, proxy_dev, - proxy_priv->hw_esw_zero_tbl, +- proxy_priv->hw_esw_zero_tbl, - items, 0, actions, 0); ++ proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl, + items, 0, actions, 0, &flow_info, false); } int -@@ -8814,17 +9182,20 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) +@@ -8814,17 +9351,22 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; @@ -48837,11 +63170,15 @@ index a3c8056515..6b889e9f81 100644 + }; MLX5_ASSERT(priv->master); - if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl) +- if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl) ++ if (!priv->dr_ctx || ++ !priv->hw_ctrl_fdb || ++ !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl) return 0; return flow_hw_create_ctrl_flow(dev, dev, - priv->hw_tx_meta_cpy_tbl, +- priv->hw_tx_meta_cpy_tbl, - eth_all, 0, copy_reg_action, 0); ++ priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl, + eth_all, 0, copy_reg_action, 0, &flow_info, false); } @@ -48851,7 +63188,7 @@ index a3c8056515..6b889e9f81 100644 { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_item_sq sq_spec = { -@@ -8849,6 +9220,10 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8849,6 +9391,10 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) { .type = RTE_FLOW_ACTION_TYPE_END }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -48862,7 +63199,7 @@ index a3c8056515..6b889e9f81 100644 /* It is assumed that caller checked for representor matching. */ MLX5_ASSERT(priv->sh->config.repr_matching); -@@ -8874,7 +9249,44 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8874,7 +9420,44 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP; } return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl, @@ -48900,15 +63237,15 @@ index a3c8056515..6b889e9f81 100644 + .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, + }; + -+ MLX5_ASSERT(priv->master); -+ if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl) ++ if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl) + return 0; -+ return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0, -+ miss_action, 0, &flow_info, false); ++ return flow_hw_create_ctrl_flow(dev, dev, ++ priv->hw_ctrl_fdb->hw_lacp_rx_tbl, ++ eth_lacp, 0, miss_action, 0, &flow_info, false); } static uint32_t -@@ -8989,6 +9401,9 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, +@@ -8989,6 +9572,9 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -48918,7 +63255,7 @@ index a3c8056515..6b889e9f81 100644 if (!eth_spec) return -EINVAL; -@@ -9002,7 +9417,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, +@@ -9002,7 +9588,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type); items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END }; /* Without VLAN filtering, only a single flow rule must be created. */ @@ -48927,7 +63264,7 @@ index a3c8056515..6b889e9f81 100644 } static int -@@ -9018,6 +9433,9 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, +@@ -9018,6 +9604,9 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -48937,7 +63274,7 @@ index a3c8056515..6b889e9f81 100644 unsigned int i; if (!eth_spec) -@@ -9040,7 +9458,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, +@@ -9040,7 +9629,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, }; items[1].spec = &vlan_spec; @@ -48947,7 +63284,7 @@ index a3c8056515..6b889e9f81 100644 return -rte_errno; } return 0; -@@ -9058,6 +9477,9 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, +@@ -9058,6 +9648,9 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -48957,7 +63294,7 @@ index a3c8056515..6b889e9f81 100644 const struct rte_ether_addr cmp = { .addr_bytes = "\x00\x00\x00\x00\x00\x00", }; -@@ -9081,7 +9503,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, +@@ -9081,7 +9674,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, if (!memcmp(mac, &cmp, sizeof(*mac))) continue; memcpy(ð_spec.dst.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN); @@ -48967,7 +63304,7 @@ index a3c8056515..6b889e9f81 100644 return -rte_errno; } return 0; -@@ -9100,6 +9523,9 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, +@@ -9100,6 +9694,9 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -48977,7 +63314,7 @@ index a3c8056515..6b889e9f81 100644 const struct rte_ether_addr cmp = { .addr_bytes = "\x00\x00\x00\x00\x00\x00", }; -@@ -9131,7 +9557,8 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, +@@ -9131,7 +9728,8 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, }; items[1].spec = &vlan_spec; @@ -48987,6 +63324,157 @@ index a3c8056515..6b889e9f81 100644 return -rte_errno; } } +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +index 08f8aad70a..bcaf518227 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +@@ -618,6 +618,7 @@ mlx5_flow_meter_profile_get(struct rte_eth_dev *dev, + meter_profile_id); + } + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Callback to add MTR profile with HWS. + * +@@ -707,6 +708,7 @@ mlx5_flow_meter_profile_hws_delete(struct rte_eth_dev *dev, + memset(fmp, 0, sizeof(struct mlx5_flow_meter_profile)); + return 0; + } ++#endif + + /** + * Find policy by id. +@@ -849,6 +851,7 @@ mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev, + return 0; + } + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Callback to check MTR policy action validate for HWS + * +@@ -885,6 +888,7 @@ mlx5_flow_meter_policy_hws_validate(struct rte_eth_dev *dev, + } + return 0; + } ++#endif + + static int + __mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev, +@@ -1211,6 +1215,7 @@ mlx5_flow_meter_policy_get(struct rte_eth_dev *dev, + &policy_idx); + } + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Callback to delete MTR policy for HWS. + * +@@ -1547,7 +1552,7 @@ policy_add_err: + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to create meter policy."); + } +- ++#endif + /** + * Check meter validation. + * +@@ -1915,6 +1920,7 @@ error: + NULL, "Failed to create devx meter."); + } + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Create meter rules. + * +@@ -1998,6 +2004,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, + __atomic_add_fetch(&policy->ref_cnt, 1, __ATOMIC_RELAXED); + return 0; + } ++#endif + + static int + mlx5_flow_meter_params_flush(struct rte_eth_dev *dev, +@@ -2482,6 +2489,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = { + .stats_read = mlx5_flow_meter_stats_read, + }; + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { + .capabilities_get = mlx5_flow_mtr_cap_get, + .meter_profile_add = mlx5_flow_meter_profile_hws_add, +@@ -2500,6 +2508,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { + .stats_update = NULL, + .stats_read = NULL, + }; ++#endif + + /** + * Get meter operations. +@@ -2515,12 +2524,16 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { + int + mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) + { ++#if defined(HAVE_MLX5_HWS_SUPPORT) + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->sh->config.dv_flow_en == 2) + *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_hws_ops; + else + *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; ++#else ++ *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; ++#endif + return 0; + } + +@@ -2899,7 +2912,6 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) + struct mlx5_flow_meter_profile *fmp; + struct mlx5_legacy_flow_meter *legacy_fm; + struct mlx5_flow_meter_info *fm; +- struct mlx5_flow_meter_policy *policy; + struct mlx5_flow_meter_sub_policy *sub_policy; + void *tmp; + uint32_t i, mtr_idx, policy_idx; +@@ -2967,15 +2979,20 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) + mlx5_l3t_destroy(priv->policy_idx_tbl); + priv->policy_idx_tbl = NULL; + } ++#if defined(HAVE_MLX5_HWS_SUPPORT) + if (priv->mtr_policy_arr) { ++ struct mlx5_flow_meter_policy *policy; ++ + for (i = 0; i < priv->mtr_config.nb_meter_policies; i++) { + policy = mlx5_flow_meter_policy_find(dev, i, + &policy_idx); +- if (policy->initialized) ++ if (policy->initialized) { + mlx5_flow_meter_policy_hws_delete(dev, i, + error); ++ } + } + } ++#endif + if (priv->mtr_profile_tbl) { + MLX5_L3T_FOREACH(priv->mtr_profile_tbl, i, entry) { + fmp = entry; +@@ -2989,14 +3006,17 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) + mlx5_l3t_destroy(priv->mtr_profile_tbl); + priv->mtr_profile_tbl = NULL; + } ++#if defined(HAVE_MLX5_HWS_SUPPORT) + if (priv->mtr_profile_arr) { + for (i = 0; i < priv->mtr_config.nb_meter_profiles; i++) { + fmp = mlx5_flow_meter_profile_find(priv, i); +- if (fmp->initialized) ++ if (fmp->initialized) { + mlx5_flow_meter_profile_hws_delete(dev, i, + error); ++ } + } + } ++#endif + /* Delete default policy table. */ + mlx5_flow_destroy_def_policy(dev); + if (priv->sh->refcnt == 1) diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c b/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c index 28ea28bfbe..1e9c7cf7c5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_verbs.c @@ -49019,10 +63507,60 @@ index 28ea28bfbe..1e9c7cf7c5 100644 error); if (ret < 0) diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -index 51704ef754..791fde4458 100644 +index 51704ef754..8415aa411f 100644 --- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c +++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -@@ -306,26 +306,25 @@ mlx5_hws_cnt_svc(void *opaque) +@@ -25,28 +25,32 @@ static void + __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool) + { + uint32_t preload; +- uint32_t q_num = cpool->cache->q_num; ++ uint32_t q_num; + uint32_t cnt_num = mlx5_hws_cnt_pool_get_size(cpool); + cnt_id_t cnt_id; + uint32_t qidx, iidx = 0; + struct rte_ring *qcache = NULL; + +- /* +- * Counter ID order is important for tracking the max number of in used +- * counter for querying, which means counter internal index order must +- * be from zero to the number user configured, i.e: 0 - 8000000. +- * Need to load counter ID in this order into the cache firstly, +- * and then the global free list. +- * In the end, user fetch the counter from minimal to the maximum. +- */ +- preload = RTE_MIN(cpool->cache->preload_sz, cnt_num / q_num); +- for (qidx = 0; qidx < q_num; qidx++) { +- for (; iidx < preload * (qidx + 1); iidx++) { +- cnt_id = mlx5_hws_cnt_id_gen(cpool, iidx); +- qcache = cpool->cache->qcache[qidx]; +- if (qcache) +- rte_ring_enqueue_elem(qcache, &cnt_id, +- sizeof(cnt_id)); ++ /* If counter cache was disabled, only free list must prepopulated. */ ++ if (cpool->cache != NULL) { ++ q_num = cpool->cache->q_num; ++ /* ++ * Counter ID order is important for tracking the max number of in used ++ * counter for querying, which means counter internal index order must ++ * be from zero to the number user configured, i.e: 0 - 8000000. ++ * Need to load counter ID in this order into the cache firstly, ++ * and then the global free list. ++ * In the end, user fetch the counter from minimal to the maximum. ++ */ ++ preload = RTE_MIN(cpool->cache->preload_sz, cnt_num / q_num); ++ for (qidx = 0; qidx < q_num; qidx++) { ++ for (; iidx < preload * (qidx + 1); iidx++) { ++ cnt_id = mlx5_hws_cnt_id_gen(cpool, iidx); ++ qcache = cpool->cache->qcache[qidx]; ++ if (qcache) ++ rte_ring_enqueue_elem(qcache, &cnt_id, ++ sizeof(cnt_id)); ++ } + } + } + for (; iidx < cnt_num; iidx++) { +@@ -306,26 +310,25 @@ mlx5_hws_cnt_svc(void *opaque) (struct mlx5_dev_ctx_shared *)opaque; uint64_t interval = (uint64_t)sh->cnt_svc->query_interval * (US_PER_S / MS_PER_S); @@ -49060,17 +63598,108 @@ index 51704ef754..791fde4458 100644 query_us = query_cycle / (rte_get_timer_hz() / US_PER_S); sleep_us = interval - query_us; if (interval > query_us) -@@ -410,8 +409,7 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, +@@ -334,6 +337,55 @@ mlx5_hws_cnt_svc(void *opaque) + return NULL; + } + ++static bool ++mlx5_hws_cnt_should_enable_cache(const struct mlx5_hws_cnt_pool_cfg *pcfg, ++ const struct mlx5_hws_cache_param *ccfg) ++{ ++ /* ++ * Enable cache if and only if there are enough counters requested ++ * to populate all of the caches. ++ */ ++ return pcfg->request_num >= ccfg->q_num * ccfg->size; ++} ++ ++static struct mlx5_hws_cnt_pool_caches * ++mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg, ++ const struct mlx5_hws_cache_param *ccfg) ++{ ++ struct mlx5_hws_cnt_pool_caches *cache; ++ char mz_name[RTE_MEMZONE_NAMESIZE]; ++ uint32_t qidx; ++ ++ /* If counter pool is big enough, setup the counter pool cache. */ ++ cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, ++ sizeof(*cache) + ++ sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) ++ * ccfg->q_num, 0, SOCKET_ID_ANY); ++ if (cache == NULL) ++ return NULL; ++ /* Store the necessary cache parameters. */ ++ cache->fetch_sz = ccfg->fetch_sz; ++ cache->preload_sz = ccfg->preload_sz; ++ cache->threshold = ccfg->threshold; ++ cache->q_num = ccfg->q_num; ++ for (qidx = 0; qidx < ccfg->q_num; qidx++) { ++ snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); ++ cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, ++ SOCKET_ID_ANY, ++ RING_F_SP_ENQ | RING_F_SC_DEQ | ++ RING_F_EXACT_SZ); ++ if (cache->qcache[qidx] == NULL) ++ goto error; ++ } ++ return cache; ++ ++error: ++ while (qidx--) ++ rte_ring_free(cache->qcache[qidx]); ++ mlx5_free(cache); ++ return NULL; ++} ++ + struct mlx5_hws_cnt_pool * + mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + const struct mlx5_hws_cnt_pool_cfg *pcfg, +@@ -342,7 +394,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + char mz_name[RTE_MEMZONE_NAMESIZE]; + struct mlx5_hws_cnt_pool *cntp; + uint64_t cnt_num = 0; +- uint32_t qidx; + + MLX5_ASSERT(pcfg); + MLX5_ASSERT(ccfg); +@@ -352,17 +403,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + return NULL; + + cntp->cfg = *pcfg; +- cntp->cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, +- sizeof(*cntp->cache) + +- sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) +- * ccfg->q_num, 0, SOCKET_ID_ANY); +- if (cntp->cache == NULL) +- goto error; +- /* store the necessary cache parameters. */ +- cntp->cache->fetch_sz = ccfg->fetch_sz; +- cntp->cache->preload_sz = ccfg->preload_sz; +- cntp->cache->threshold = ccfg->threshold; +- cntp->cache->q_num = ccfg->q_num; + if (pcfg->request_num > sh->hws_max_nb_counters) { + DRV_LOG(ERR, "Counter number %u " + "is greater than the maximum supported (%u).", +@@ -409,14 +449,10 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + DRV_LOG(ERR, "failed to create reuse list ring"); goto error; } - for (qidx = 0; qidx < ccfg->q_num; qidx++) { +- for (qidx = 0; qidx < ccfg->q_num; qidx++) { - snprintf(mz_name, sizeof(mz_name), "%s_cache/%u", pcfg->name, - qidx); -+ snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); - cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, - SOCKET_ID_ANY, - RING_F_SP_ENQ | RING_F_SC_DEQ | -@@ -634,7 +632,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, +- cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, +- SOCKET_ID_ANY, +- RING_F_SP_ENQ | RING_F_SC_DEQ | +- RING_F_EXACT_SZ); +- if (cntp->cache->qcache[qidx] == NULL) ++ /* Allocate counter cache only if needed. */ ++ if (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) { ++ cntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg); ++ if (cntp->cache == NULL) + goto error; + } + /* Initialize the time for aging-out calculation. */ +@@ -634,7 +670,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, SOCKET_ID_ANY); if (mp_name == NULL) goto error; @@ -49079,7 +63708,7 @@ index 51704ef754..791fde4458 100644 dev->data->port_id); pcfg.name = mp_name; pcfg.request_num = pattr->nb_counters; -@@ -660,6 +658,10 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, +@@ -660,6 +696,10 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, if (ret != 0) goto error; priv->sh->cnt_svc->refcnt++; @@ -49090,7 +63719,7 @@ index 51704ef754..791fde4458 100644 return cpool; error: mlx5_hws_cnt_pool_destroy(priv->sh, cpool); -@@ -672,6 +674,13 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, +@@ -672,6 +712,15 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, { if (cpool == NULL) return; @@ -49099,12 +63728,14 @@ index 51704ef754..791fde4458 100644 + * Maybe blocked for at most 200ms here. + */ + rte_spinlock_lock(&sh->cpool_lock); -+ LIST_REMOVE(cpool, next); ++ /* Try to remove cpool before it was added to list caused segfault. */ ++ if (!LIST_EMPTY(&sh->hws_cpool_list) && cpool->next.le_prev) ++ LIST_REMOVE(cpool, next); + rte_spinlock_unlock(&sh->cpool_lock); if (--sh->cnt_svc->refcnt == 0) mlx5_hws_cnt_svc_deinit(sh); mlx5_hws_cnt_pool_action_destroy(cpool); -@@ -1229,11 +1238,13 @@ mlx5_hws_age_pool_destroy(struct mlx5_priv *priv) +@@ -1229,11 +1278,13 @@ mlx5_hws_age_pool_destroy(struct mlx5_priv *priv) { struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv); @@ -49119,7 +63750,7 @@ index 51704ef754..791fde4458 100644 #endif diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h -index 030dcead86..b5c19a8e2c 100644 +index 030dcead86..72751f3330 100644 --- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h +++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h @@ -97,6 +97,7 @@ struct mlx5_hws_cnt_pool_caches { @@ -49138,8 +63769,41 @@ index 030dcead86..b5c19a8e2c 100644 } __rte_cache_aligned; /* HWS AGE status. */ +@@ -531,6 +533,32 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue, + return 0; + } + ++/** ++ * Decide if the given queue can be used to perform counter allocation/deallcation ++ * based on counter configuration ++ * ++ * @param[in] priv ++ * Pointer to the port private data structure. ++ * @param[in] queue ++ * Pointer to the queue index. ++ * ++ * @return ++ * @p queue if cache related to the queue can be used. NULL otherwise. ++ */ ++static __rte_always_inline uint32_t * ++mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue) ++{ ++ if (priv && priv->hws_cpool) { ++ /* Do not use queue cache if counter cache is disabled. */ ++ if (priv->hws_cpool->cache == NULL) ++ return NULL; ++ return queue; ++ } ++ /* This case should not be reached if counter pool was successfully configured. */ ++ MLX5_ASSERT(false); ++ return NULL; ++} ++ + static __rte_always_inline unsigned int + mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool) + { diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c -index 917c517b83..b41f7a51f5 100644 +index 917c517b83..56e5568f33 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rx.c +++ b/dpdk/drivers/net/mlx5/mlx5_rx.c @@ -39,7 +39,8 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, @@ -49247,7 +63911,7 @@ index 917c517b83..b41f7a51f5 100644 } return ret; default: -@@ -559,19 +596,24 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +@@ -559,19 +596,25 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) * @param[out] mcqe * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not * written. @@ -49260,7 +63924,8 @@ index 917c517b83..b41f7a51f5 100644 - * 0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE, - * otherwise the packet size in regular RxQ, and striding byte - * count format in mprq case. -+ * 0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, ++ * 0 in case of empty CQE, ++ * MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, + * MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset, + * otherwise the packet size in regular RxQ, + * and striding byte count format in mprq case. @@ -49278,7 +63943,7 @@ index 917c517b83..b41f7a51f5 100644 uint16_t idx, end; do { -@@ -620,7 +662,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +@@ -620,7 +663,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, * compressed. */ } else { @@ -49286,7 +63951,7 @@ index 917c517b83..b41f7a51f5 100644 int8_t op_own; uint32_t cq_ci; -@@ -628,10 +669,12 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +@@ -628,10 +670,17 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { if (unlikely(ret == MLX5_CQE_STATUS_ERR || rxq->err_state)) { @@ -49300,10 +63965,15 @@ index 917c517b83..b41f7a51f5 100644 + if (ret == MLX5_RECOVERY_ERROR_RET || + ret == MLX5_RECOVERY_COMPLETED_RET) + return MLX5_CRITICAL_ERROR_CQE_RET; ++ if (!mprq && ret == MLX5_RECOVERY_IGNORE_RET) { ++ *skip_cnt = 1; ++ ++rxq->cq_ci; ++ return MLX5_ERROR_CQE_MASK; ++ } } else { return 0; } -@@ -684,8 +727,15 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, +@@ -684,8 +733,15 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, } } if (unlikely(rxq->err_state)) { @@ -49319,7 +63989,7 @@ index 917c517b83..b41f7a51f5 100644 } else { return len; } -@@ -837,6 +887,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -837,6 +893,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) int len = 0; /* keep its value across iterations. */ while (pkts_n) { @@ -49327,36 +63997,34 @@ index 917c517b83..b41f7a51f5 100644 unsigned int idx = rq_ci & wqe_cnt; volatile struct mlx5_wqe_data_seg *wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; -@@ -875,11 +926,24 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -875,11 +932,23 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } if (!pkt) { cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; - len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); - if (len <= 0) { -- rte_mbuf_raw_free(rep); -- if (unlikely(len == MLX5_ERROR_CQE_RET)) + len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe, &skip_cnt, false); + if (unlikely(len & MLX5_ERROR_CQE_MASK)) { ++ /* We drop packets with non-critical errors */ + rte_mbuf_raw_free(rep); +- if (unlikely(len == MLX5_ERROR_CQE_RET)) + if (len == MLX5_CRITICAL_ERROR_CQE_RET) { -+ rte_mbuf_raw_free(rep); rq_ci = rxq->rq_ci << sges_n; + break; + } ++ /* Skip specified amount of error CQEs packets */ + rq_ci >>= sges_n; + rq_ci += skip_cnt; + rq_ci <<= sges_n; -+ idx = rq_ci & wqe_cnt; -+ wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; -+ seg = (*rxq->elts)[idx]; -+ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; -+ len = len & ~MLX5_ERROR_CQE_MASK; ++ MLX5_ASSERT(!pkt); ++ continue; + } + if (len == 0) { + rte_mbuf_raw_free(rep); break; } pkt = seg; -@@ -981,6 +1045,7 @@ mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, +@@ -981,6 +1050,7 @@ mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp, tcp->cksum = 0; csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4); csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff); @@ -49364,7 +64032,7 @@ index 917c517b83..b41f7a51f5 100644 csum = (~csum) & 0xffff; if (csum == 0) csum = 0xffff; -@@ -1089,6 +1154,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1089,6 +1159,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16_t strd_cnt; uint16_t strd_idx; uint32_t byte_cnt; @@ -49372,7 +64040,7 @@ index 917c517b83..b41f7a51f5 100644 volatile struct mlx5_mini_cqe8 *mcqe = NULL; enum mlx5_rqx_code rxq_code; -@@ -1101,14 +1167,26 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +@@ -1101,14 +1172,26 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) buf = (*rxq->mprq_bufs)[rq_ci & wq_mask]; } cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; @@ -49726,10 +64394,102 @@ index 185d2695db..ab69af0c55 100644 *err |= _mm_cvtsi128_si64(opcode); /* D.5 fill in mbuf - rearm_data and packet_type. */ diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c -index f64fa3587b..615e1d073d 100644 +index f64fa3587b..f4ac58e2f9 100644 --- a/dpdk/drivers/net/mlx5/mlx5_stats.c +++ b/dpdk/drivers/net/mlx5/mlx5_stats.c -@@ -288,10 +288,9 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev, +@@ -39,24 +39,36 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) + { + struct mlx5_priv *priv = dev->data->dev_private; +- unsigned int i; +- uint64_t counters[n]; ++ uint64_t counters[MLX5_MAX_XSTATS]; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ unsigned int i; ++ uint16_t stats_n = 0; ++ uint16_t stats_n_2nd = 0; + uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + + if (n >= mlx5_stats_n && stats) { +- int stats_n; + int ret; + +- stats_n = mlx5_os_get_stats_n(dev); +- if (stats_n < 0) +- return stats_n; +- if (xstats_ctrl->stats_n != stats_n) ++ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); ++ if (ret < 0) ++ return ret; ++ /* ++ * The number of statistics fetched via "ETH_SS_STATS" may vary because ++ * of the port configuration each time. This is also true between 2 ++ * ports. There might be a case that the numbers are the same even if ++ * configurations are different. ++ * It is not recommended to change the configuration without using ++ * RTE API. The port(traffic) restart may trigger another initialization ++ * to make sure the map are correct. ++ */ ++ if (xstats_ctrl->stats_n != stats_n || ++ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) + mlx5_os_stats_init(dev); +- ret = mlx5_os_read_dev_counters(dev, counters); +- if (ret) ++ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); ++ if (ret < 0) + return ret; +- for (i = 0; i != mlx5_stats_n; ++i) { ++ for (i = 0; i != mlx5_stats_n; i++) { + stats[i].id = i; + if (xstats_ctrl->info[i].dev) { + uint64_t wrap_n; +@@ -225,30 +237,32 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; +- int stats_n; + unsigned int i; + uint64_t *counters; + int ret; ++ uint16_t stats_n = 0; ++ uint16_t stats_n_2nd = 0; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + +- stats_n = mlx5_os_get_stats_n(dev); +- if (stats_n < 0) { ++ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); ++ if (ret < 0) { + DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, +- strerror(-stats_n)); +- return stats_n; ++ strerror(-ret)); ++ return ret; + } +- if (xstats_ctrl->stats_n != stats_n) ++ if (xstats_ctrl->stats_n != stats_n || ++ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) + mlx5_os_stats_init(dev); +- counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * +- xstats_ctrl->mlx5_stats_n, 0, +- SOCKET_ID_ANY); ++ /* Considering to use stack directly. */ ++ counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * xstats_ctrl->mlx5_stats_n, ++ 0, SOCKET_ID_ANY); + if (!counters) { +- DRV_LOG(WARNING, "port %u unable to allocate memory for xstats " +- "counters", ++ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats counters", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; + } +- ret = mlx5_os_read_dev_counters(dev, counters); ++ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); + if (ret) { + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); +@@ -288,10 +302,9 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev, if (n >= mlx5_xstats_n && xstats_names) { for (i = 0; i != mlx5_xstats_n; ++i) { @@ -49742,7 +64502,7 @@ index f64fa3587b..615e1d073d 100644 } mlx5_xstats_n = mlx5_txpp_xstats_get_names(dev, xstats_names, diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c -index f54443ed1a..b12a1dc1c7 100644 +index f54443ed1a..1cb0b56ae1 100644 --- a/dpdk/drivers/net/mlx5/mlx5_trigger.c +++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c @@ -226,17 +226,17 @@ mlx5_rxq_start(struct rte_eth_dev *dev) @@ -49833,11 +64593,15 @@ index f54443ed1a..b12a1dc1c7 100644 return -rte_errno; } else { return 0; -@@ -1494,13 +1499,13 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) +@@ -1493,14 +1498,16 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) + if (!txq) continue; queue = mlx5_txq_get_sqn(txq); - if ((priv->representor || priv->master) && config->dv_esw_en) { +- if ((priv->representor || priv->master) && config->dv_esw_en) { - if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue)) { ++ if ((priv->representor || priv->master) && ++ config->dv_esw_en && ++ config->fdb_def_rule) { + if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) { mlx5_txq_release(dev, i); goto error; @@ -49849,16 +64613,34 @@ index f54443ed1a..b12a1dc1c7 100644 mlx5_txq_release(dev, i); goto error; } -@@ -1519,6 +1524,9 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) +@@ -1519,6 +1526,9 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) } if (priv->isolated) return 0; -+ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) + if (mlx5_flow_hw_lacp_rx_flow(dev)) + goto error; if (dev->data->promiscuous) flags |= MLX5_CTRL_PROMISCUOUS; if (dev->data->all_multicast) +@@ -1624,14 +1634,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) + DRV_LOG(INFO, "port %u FDB default rule is disabled", + dev->data->port_id); + } +- if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { + ret = mlx5_flow_lacp_miss(dev); + if (ret) + DRV_LOG(INFO, "port %u LACP rule cannot be created - " + "forward LACP to kernel.", dev->data->port_id); + else +- DRV_LOG(INFO, "LACP traffic will be missed in port %u." +- , dev->data->port_id); ++ DRV_LOG(INFO, "LACP traffic will be missed in port %u.", ++ dev->data->port_id); + } + if (priv->isolated) + return 0; diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.c b/dpdk/drivers/net/mlx5/mlx5_tx.c index a13c7e937c..14e1487e59 100644 --- a/dpdk/drivers/net/mlx5/mlx5_tx.c @@ -49948,21 +64730,28 @@ index f853a67f58..0e1da1d5f5 100644 } return n_used + n_txpp; diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c -index 5543f2c570..d617784dba 100644 +index 5543f2c570..46badcd0cc 100644 --- a/dpdk/drivers/net/mlx5/mlx5_txq.c +++ b/dpdk/drivers/net/mlx5/mlx5_txq.c -@@ -1310,8 +1310,16 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num) +@@ -1310,8 +1310,23 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num) return -rte_errno; } #ifdef HAVE_MLX5_HWS_SUPPORT - if (priv->sh->config.dv_flow_en == 2) - return mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num); + if (priv->sh->config.dv_flow_en == 2) { -+ if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true)) -+ return -rte_errno; ++ bool sq_miss_created = false; ++ ++ if (priv->sh->config.fdb_def_rule) { ++ if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true)) ++ return -rte_errno; ++ sq_miss_created = true; ++ } ++ + if (priv->sh->config.repr_matching && + mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) { -+ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num); ++ if (sq_miss_created) ++ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num); + return -rte_errno; + } + return 0; @@ -49970,11 +64759,80 @@ index 5543f2c570..d617784dba 100644 #endif flow = mlx5_flow_create_devx_sq_miss_flow(dev, sq_num); if (flow > 0) +diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c +index b295702fd4..bbe07cd7a3 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_utils.c ++++ b/dpdk/drivers/net/mlx5/mlx5_utils.c +@@ -379,7 +379,8 @@ _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) + idx -= 1; + trunk_idx = mlx5_trunk_idx_get(pool, idx); + trunk = lc->trunks[trunk_idx]; +- MLX5_ASSERT(trunk); ++ if (!trunk) ++ return NULL; + entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); + return &trunk->data[entry_idx * pool->cfg.size]; + } diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c -index 88d8213f55..a31e1b5494 100644 +index 88d8213f55..49f750be68 100644 --- a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c +++ b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c -@@ -416,3 +416,33 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) +@@ -178,20 +178,29 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + return -ENOTSUP; + } + +-/** ++/* + * Query the number of statistics provided by ETHTOOL. + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param n_stats ++ * Pointer to number of stats to store. ++ * @param n_stats_sec ++ * Pointer to number of stats to store for the 2nd port of the bond. + * + * @return +- * Number of statistics on success, negative errno value otherwise and +- * rte_errno is set. ++ * 0 on success, negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_os_get_stats_n(struct rte_eth_dev *dev) ++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec) + { + RTE_SET_USED(dev); ++ RTE_SET_USED(bond_master); ++ RTE_SET_USED(n_stats); ++ RTE_SET_USED(n_stats_sec); + return -ENOTSUP; + } + +@@ -221,6 +230,8 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. + * @param[out] stats + * Counters table output buffer. + * +@@ -229,9 +240,10 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + * rte_errno is set. + */ + int +-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) ++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) + { + RTE_SET_USED(dev); ++ RTE_SET_USED(bond_master); + RTE_SET_USED(stats); + return -ENOTSUP; + } +@@ -416,3 +428,33 @@ int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) RTE_SET_USED(dev); return -ENOTSUP; } @@ -50058,10 +64916,20 @@ index 77f04cc931..f401264b61 100644 sh->dev_cap.ind_table_max_size); } diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -index c4355a3f64..ab643709ee 100644 +index c4355a3f64..f281d1d7f8 100644 --- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c +++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -@@ -376,6 +376,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) +@@ -198,7 +198,8 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, +- RTE_PTYPE_L4_UDP ++ RTE_PTYPE_L4_UDP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; +@@ -376,6 +377,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) goto out; } @@ -50072,7 +64940,7 @@ index c4355a3f64..ab643709ee 100644 /* start tx queues */ for (i = 0; i < dev->data->nb_tx_queues; i++) dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; -@@ -400,6 +404,7 @@ static int +@@ -400,6 +405,7 @@ static int mvneta_dev_stop(struct rte_eth_dev *dev) { struct mvneta_priv *priv = dev->data->dev_private; @@ -50080,7 +64948,7 @@ index c4355a3f64..ab643709ee 100644 dev->data->dev_started = 0; -@@ -412,6 +417,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) +@@ -412,6 +418,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) priv->ppio = NULL; @@ -50096,7 +64964,7 @@ index c4355a3f64..ab643709ee 100644 } diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c -index 8fd3211283..177b8165f3 100644 +index 8fd3211283..15083d249c 100644 --- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c @@ -951,6 +951,9 @@ mrvl_dev_start(struct rte_eth_dev *dev) @@ -50123,6 +64991,16 @@ index 8fd3211283..177b8165f3 100644 return mrvl_dev_set_link_down(dev); } +@@ -1767,7 +1777,8 @@ mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L4_TCP, +- RTE_PTYPE_L4_UDP ++ RTE_PTYPE_L4_UDP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; diff --git a/dpdk/drivers/net/netvsc/hn_rndis.c b/dpdk/drivers/net/netvsc/hn_rndis.c index e6f1f28768..fe36274df8 100644 --- a/dpdk/drivers/net/netvsc/hn_rndis.c @@ -50146,8 +65024,34 @@ index e6f1f28768..fe36274df8 100644 PMD_DRV_LOG(ERR, "missing RNDIS header %u", len); return; +diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c +index bc6f60c64a..6496979f28 100644 +--- a/dpdk/drivers/net/netvsc/hn_rxtx.c ++++ b/dpdk/drivers/net/netvsc/hn_rxtx.c +@@ -612,7 +612,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + RTE_PTYPE_L4_MASK); + + if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { +- m->vlan_tci = info->vlan_info; ++ m->vlan_tci = RTE_VLAN_TCI_MAKE(NDIS_VLAN_INFO_ID(info->vlan_info), ++ NDIS_VLAN_INFO_PRI(info->vlan_info), ++ NDIS_VLAN_INFO_CFI(info->vlan_info)); + m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; + + /* NDIS always strips tag, put it back if necessary */ +@@ -1332,7 +1334,9 @@ static void hn_encap(struct rndis_packet_msg *pkt, + if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, + NDIS_PKTINFO_TYPE_VLAN); +- *pi_data = m->vlan_tci; ++ *pi_data = NDIS_VLAN_INFO_MAKE(RTE_VLAN_TCI_ID(m->vlan_tci), ++ RTE_VLAN_TCI_PRI(m->vlan_tci), ++ RTE_VLAN_TCI_DEI(m->vlan_tci)); + } + + if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.c b/dpdk/drivers/net/nfp/flower/nfp_flower.c -index e447258d97..5896d208d0 100644 +index e447258d97..360345c50b 100644 --- a/dpdk/drivers/net/nfp/flower/nfp_flower.c +++ b/dpdk/drivers/net/nfp/flower/nfp_flower.c @@ -25,7 +25,6 @@ @@ -50192,16 +65096,79 @@ index e447258d97..5896d208d0 100644 } if (rte_eal_process_type() == RTE_PROC_PRIMARY) -@@ -217,8 +224,6 @@ nfp_flower_pf_close(struct rte_eth_dev *dev) - - nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); +@@ -175,63 +182,6 @@ nfp_flower_pf_stop(struct rte_eth_dev *dev) + return 0; + } +-/* Reset and stop device. The device can not be restarted. */ +-static int +-nfp_flower_pf_close(struct rte_eth_dev *dev) +-{ +- uint16_t i; +- struct nfp_net_hw *hw; +- struct nfp_pf_dev *pf_dev; +- struct nfp_net_txq *this_tx_q; +- struct nfp_net_rxq *this_rx_q; +- struct nfp_flower_representor *repr; +- struct nfp_app_fw_flower *app_fw_flower; +- +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return 0; +- +- repr = (struct nfp_flower_representor *)dev->data->dev_private; +- hw = repr->app_fw_flower->pf_hw; +- pf_dev = hw->pf_dev; +- app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv); +- +- /* +- * We assume that the DPDK application is stopping all the +- * threads/queues before calling the device close function. +- */ +- nfp_pf_repr_disable_queues(dev); +- +- /* Clear queues */ +- for (i = 0; i < dev->data->nb_tx_queues; i++) { +- this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; +- nfp_net_reset_tx_queue(this_tx_q); +- } +- +- for (i = 0; i < dev->data->nb_rx_queues; i++) { +- this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i]; +- nfp_net_reset_rx_queue(this_rx_q); +- } +- +- /* Cancel possible impending LSC work here before releasing the port*/ +- rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); +- +- nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); +- - rte_eth_dev_release_port(dev); - - /* Now it is safe to free all PF resources */ - PMD_DRV_LOG(INFO, "Freeing PF resources"); - nfp_cpp_area_free(pf_dev->ctrl_area); -@@ -451,7 +456,7 @@ nfp_flower_pf_recv_pkts(void *rx_queue, +- /* Now it is safe to free all PF resources */ +- PMD_DRV_LOG(INFO, "Freeing PF resources"); +- nfp_cpp_area_free(pf_dev->ctrl_area); +- nfp_cpp_area_free(pf_dev->hwqueues_area); +- free(pf_dev->hwinfo); +- free(pf_dev->sym_tbl); +- nfp_cpp_free(pf_dev->cpp); +- rte_free(app_fw_flower); +- rte_free(pf_dev); +- +- return 0; +-} +- + static const struct eth_dev_ops nfp_flower_pf_vnic_ops = { + .dev_infos_get = nfp_net_infos_get, + .link_update = nfp_net_link_update, +@@ -239,7 +189,6 @@ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = { + + .dev_start = nfp_flower_pf_start, + .dev_stop = nfp_flower_pf_stop, +- .dev_close = nfp_flower_pf_close, + }; + + static inline void +@@ -451,7 +400,7 @@ nfp_flower_pf_recv_pkts(void *rx_queue, rxds->vals[1] = 0; dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb)); rxds->fld.dd = 0; @@ -50210,7 +65177,7 @@ index e447258d97..5896d208d0 100644 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; nb_hold++; -@@ -631,13 +636,6 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) +@@ -631,13 +580,6 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) pf_dev = hw->pf_dev; pci_dev = hw->pf_dev->pci_dev; @@ -50224,7 +65191,7 @@ index e447258d97..5896d208d0 100644 hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->subsystem_device_id = pci_dev->id.subsystem_device_id; -@@ -666,6 +664,9 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) +@@ -666,6 +608,9 @@ nfp_flower_init_vnic_common(struct nfp_net_hw *hw, const char *vnic_type) hw->mtu = hw->max_mtu; hw->flbufsz = DEFAULT_FLBUF_SIZE; @@ -50234,7 +65201,7 @@ index e447258d97..5896d208d0 100644 /* read the Rx offset configured from firmware */ if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) hw->rx_offset = NFP_NET_RX_OFFSET; -@@ -695,6 +696,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -695,6 +640,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) int ret = 0; uint16_t n_txq; uint16_t n_rxq; @@ -50242,7 +65209,7 @@ index e447258d97..5896d208d0 100644 unsigned int numa_node; struct rte_mempool *mp; struct nfp_net_rxq *rxq; -@@ -703,6 +705,9 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -703,6 +649,9 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) struct rte_eth_dev *eth_dev; const struct rte_memzone *tz; struct nfp_app_fw_flower *app_fw_flower; @@ -50252,7 +65219,7 @@ index e447258d97..5896d208d0 100644 /* Set up some pointers here for ease of use */ pf_dev = hw->pf_dev; -@@ -734,9 +739,14 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -734,9 +683,14 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) goto eth_dev_cleanup; } @@ -50268,7 +65235,7 @@ index e447258d97..5896d208d0 100644 4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node); if (app_fw_flower->ctrl_pktmbuf_pool == NULL) { PMD_INIT_LOG(ERR, "Create mbuf pool for ctrl vnic failed"); -@@ -773,6 +783,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -773,6 +727,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) eth_dev->data->nb_rx_queues = n_txq; eth_dev->data->dev_private = hw; @@ -50276,7 +65243,7 @@ index e447258d97..5896d208d0 100644 /* Set up the Rx queues */ for (i = 0; i < n_rxq; i++) { rxq = rte_zmalloc_socket("ethdev RX queue", -@@ -811,7 +822,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -811,7 +766,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ @@ -50285,7 +65252,7 @@ index e447258d97..5896d208d0 100644 sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN, numa_node); if (tz == NULL) { -@@ -830,7 +841,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -830,7 +785,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) sizeof(*rxq->rxbufs) * CTRL_VNIC_NB_DESC, RTE_CACHE_LINE_SIZE, numa_node); if (rxq->rxbufs == NULL) { @@ -50294,7 +65261,7 @@ index e447258d97..5896d208d0 100644 rte_free(rxq); ret = -ENOMEM; goto rx_queue_setup_cleanup; -@@ -848,6 +859,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -848,6 +803,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC)); } @@ -50302,7 +65269,7 @@ index e447258d97..5896d208d0 100644 /* Set up the Tx queues */ for (i = 0; i < n_txq; i++) { txq = rte_zmalloc_socket("ethdev TX queue", -@@ -866,7 +878,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -866,7 +822,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) * handle the maximum ring size is allocated in order to allow for * resizing in later calls to the queue setup function. */ @@ -50311,7 +65278,7 @@ index e447258d97..5896d208d0 100644 sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN, numa_node); if (tz == NULL) { -@@ -896,7 +908,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) +@@ -896,7 +852,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw) sizeof(*txq->txbufs) * CTRL_VNIC_NB_DESC, RTE_CACHE_LINE_SIZE, numa_node); if (txq->txbufs == NULL) { @@ -50320,7 +65287,7 @@ index e447258d97..5896d208d0 100644 rte_free(txq); ret = -ENOMEM; goto tx_queue_setup_cleanup; -@@ -921,7 +933,7 @@ tx_queue_setup_cleanup: +@@ -921,7 +877,7 @@ tx_queue_setup_cleanup: txq = eth_dev->data->tx_queues[i]; if (txq != NULL) { rte_free(txq->txbufs); @@ -50329,7 +65296,7 @@ index e447258d97..5896d208d0 100644 rte_free(txq); } } -@@ -930,7 +942,7 @@ rx_queue_setup_cleanup: +@@ -930,7 +886,7 @@ rx_queue_setup_cleanup: rxq = eth_dev->data->rx_queues[i]; if (rxq != NULL) { rte_free(rxq->rxbufs); @@ -50338,7 +65305,7 @@ index e447258d97..5896d208d0 100644 rte_free(rxq); } } -@@ -951,28 +963,35 @@ static void +@@ -951,28 +907,37 @@ static void nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw) { uint32_t i; @@ -50355,6 +65322,8 @@ index e447258d97..5896d208d0 100644 + pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1; + ++ nfp_net_disable_queues(eth_dev); ++ + snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name); for (i = 0; i < hw->max_tx_queues; i++) { txq = eth_dev->data->tx_queues[i]; @@ -50376,6 +65345,41 @@ index e447258d97..5896d208d0 100644 rte_free(rxq); } } +@@ -1202,6 +1167,22 @@ app_cleanup: + return ret; + } + ++void ++nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev) ++{ ++ struct nfp_app_fw_flower *app_fw_flower; ++ ++ app_fw_flower = pf_dev->app_fw_priv; ++ nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw); ++ nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area); ++ nfp_cpp_area_free(pf_dev->ctrl_area); ++ rte_free(app_fw_flower->pf_hw); ++ nfp_flow_priv_uninit(pf_dev); ++ if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) ++ PMD_DRV_LOG(WARNING, "Failed to free switch domain for device"); ++ rte_free(app_fw_flower); ++} ++ + int + nfp_secondary_init_app_fw_flower(struct nfp_cpp *cpp) + { +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.h b/dpdk/drivers/net/nfp/flower/nfp_flower.h +index c05a761a95..d82e35f093 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower.h ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower.h +@@ -85,6 +85,7 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower) + } + + int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev); ++void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev); + int nfp_secondary_init_app_fw_flower(struct nfp_cpp *cpp); + uint16_t nfp_flower_pf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c index 3631e764fe..1c6340f3d7 100644 --- a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c @@ -50390,7 +65394,7 @@ index 3631e764fe..1c6340f3d7 100644 nb_hold++; diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c -index 5809c838b3..32c4574bdc 100644 +index 5809c838b3..d5aed8791a 100644 --- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c +++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c @@ -300,6 +300,7 @@ nfp_flower_repr_dev_start(struct rte_eth_dev *dev) @@ -50433,7 +65437,164 @@ index 5809c838b3..32c4574bdc 100644 return 0; } -@@ -528,7 +540,7 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { +@@ -512,12 +524,156 @@ nfp_flower_repr_tx_burst(void *tx_queue, + return sent; + } + ++static void ++nfp_flower_repr_free_queue(struct nfp_flower_representor *repr) ++{ ++ uint16_t i; ++ struct rte_eth_dev *eth_dev = repr->eth_dev; ++ ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ rte_free(eth_dev->data->tx_queues[i]); ++ ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ rte_free(eth_dev->data->rx_queues[i]); ++} ++ ++static void ++nfp_flower_pf_repr_close_queue(struct nfp_flower_representor *repr) ++{ ++ struct rte_eth_dev *eth_dev = repr->eth_dev; ++ ++ /* ++ * We assume that the DPDK application is stopping all the ++ * threads/queues before calling the device close function. ++ */ ++ nfp_net_disable_queues(eth_dev); ++ ++ /* Clear queues */ ++ nfp_net_close_tx_queue(eth_dev); ++ nfp_net_close_rx_queue(eth_dev); ++} ++ ++static void ++nfp_flower_repr_close_queue(struct nfp_flower_representor *repr) ++{ ++ switch (repr->repr_type) { ++ case NFP_REPR_TYPE_PHYS_PORT: ++ nfp_flower_repr_free_queue(repr); ++ break; ++ case NFP_REPR_TYPE_PF: ++ nfp_flower_pf_repr_close_queue(repr); ++ break; ++ case NFP_REPR_TYPE_VF: ++ nfp_flower_repr_free_queue(repr); ++ break; ++ default: ++ PMD_DRV_LOG(ERR, "Unsupported repr port type."); ++ break; ++ } ++} ++ ++static int ++nfp_flower_repr_uninit(struct rte_eth_dev *eth_dev) ++{ ++ uint16_t index; ++ struct nfp_flower_representor *repr; ++ ++ repr = eth_dev->data->dev_private; ++ rte_ring_free(repr->ring); ++ ++ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { ++ index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); ++ repr->app_fw_flower->phy_reprs[index] = NULL; ++ } else { ++ index = repr->vf_id; ++ repr->app_fw_flower->vf_reprs[index] = NULL; ++ } ++ ++ return 0; ++} ++ ++static int ++nfp_flower_pf_repr_uninit(struct rte_eth_dev *eth_dev) ++{ ++ struct nfp_flower_representor *repr = eth_dev->data->dev_private; ++ ++ repr->app_fw_flower->pf_repr = NULL; ++ ++ return 0; ++} ++ ++static void ++nfp_flower_repr_free(struct nfp_flower_representor *repr, ++ enum nfp_repr_type repr_type) ++{ ++ switch (repr_type) { ++ case NFP_REPR_TYPE_PHYS_PORT: ++ nfp_flower_repr_uninit(repr->eth_dev); ++ break; ++ case NFP_REPR_TYPE_PF: ++ nfp_flower_pf_repr_uninit(repr->eth_dev); ++ break; ++ case NFP_REPR_TYPE_VF: ++ nfp_flower_repr_uninit(repr->eth_dev); ++ break; ++ default: ++ PMD_DRV_LOG(ERR, "Unsupported repr port type."); ++ break; ++ } ++} ++ ++/* Reset and stop device. The device can not be restarted. */ ++static int ++nfp_flower_repr_dev_close(struct rte_eth_dev *dev) ++{ ++ uint16_t i; ++ struct nfp_net_hw *hw; ++ struct nfp_pf_dev *pf_dev; ++ struct nfp_flower_representor *repr; ++ struct nfp_app_fw_flower *app_fw_flower; ++ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ ++ repr = dev->data->dev_private; ++ app_fw_flower = repr->app_fw_flower; ++ hw = app_fw_flower->pf_hw; ++ pf_dev = hw->pf_dev; ++ ++ if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC) ++ return -EINVAL; ++ ++ nfp_flower_repr_close_queue(repr); ++ ++ nfp_flower_repr_free(repr, repr->repr_type); ++ ++ for (i = 0; i < MAX_FLOWER_VFS; i++) { ++ if (app_fw_flower->vf_reprs[i] != NULL) ++ return 0; ++ } ++ ++ for (i = 0; i < MAX_FLOWER_PHYPORTS; i++) { ++ if (app_fw_flower->phy_reprs[i] != NULL) ++ return 0; ++ } ++ ++ if (app_fw_flower->pf_repr != NULL) ++ return 0; ++ ++ /* Now it is safe to free all PF resources */ ++ nfp_uninit_app_fw_flower(pf_dev); ++ nfp_pf_uninit(pf_dev); ++ ++ return 0; ++} ++ + static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { + .dev_infos_get = nfp_flower_repr_dev_infos_get, + + .dev_start = nfp_flower_pf_start, + .dev_configure = nfp_flower_repr_dev_configure, + .dev_stop = nfp_flower_pf_stop, ++ .dev_close = nfp_flower_repr_dev_close, + + .rx_queue_setup = nfp_pf_repr_rx_queue_setup, + .tx_queue_setup = nfp_pf_repr_tx_queue_setup, +@@ -528,7 +684,7 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { .stats_reset = nfp_flower_repr_stats_reset, .promiscuous_enable = nfp_net_promisc_enable, @@ -50442,7 +65603,15 @@ index 5809c838b3..32c4574bdc 100644 .mac_addr_set = nfp_flower_repr_mac_addr_set, }; -@@ -549,7 +561,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { +@@ -539,6 +695,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { + .dev_start = nfp_flower_repr_dev_start, + .dev_configure = nfp_flower_repr_dev_configure, + .dev_stop = nfp_flower_repr_dev_stop, ++ .dev_close = nfp_flower_repr_dev_close, + + .rx_queue_setup = nfp_flower_repr_rx_queue_setup, + .tx_queue_setup = nfp_flower_repr_tx_queue_setup, +@@ -549,7 +706,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { .stats_reset = nfp_flower_repr_stats_reset, .promiscuous_enable = nfp_net_promisc_enable, @@ -50451,7 +65620,15 @@ index 5809c838b3..32c4574bdc 100644 .mac_addr_set = nfp_flower_repr_mac_addr_set, -@@ -637,6 +649,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, +@@ -628,6 +785,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev, + + repr->app_fw_flower->pf_repr = repr; + repr->app_fw_flower->pf_hw->eth_dev = eth_dev; ++ repr->eth_dev = eth_dev; + + return 0; + } +@@ -637,6 +795,7 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, void *init_params) { int ret; @@ -50459,7 +65636,7 @@ index 5809c838b3..32c4574bdc 100644 unsigned int numa_node; char ring_name[RTE_ETH_NAME_MAX_LEN]; struct nfp_app_fw_flower *app_fw_flower; -@@ -710,10 +723,13 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, +@@ -710,10 +869,15 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, } /* Add repr to correct array */ @@ -50474,10 +65651,46 @@ index 5809c838b3..32c4574bdc 100644 + index = repr->vf_id; + app_fw_flower->vf_reprs[index] = repr; + } ++ ++ repr->eth_dev = eth_dev; return 0; -@@ -730,7 +746,9 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -725,12 +889,43 @@ ring_cleanup: + return ret; + } + ++static void ++nfp_flower_repr_free_all(struct nfp_app_fw_flower *app_fw_flower) ++{ ++ uint32_t i; ++ struct nfp_flower_representor *repr; ++ ++ for (i = 0; i < MAX_FLOWER_VFS; i++) { ++ repr = app_fw_flower->vf_reprs[i]; ++ if (repr != NULL) { ++ nfp_flower_repr_free(repr, NFP_REPR_TYPE_VF); ++ app_fw_flower->vf_reprs[i] = NULL; ++ } ++ } ++ ++ for (i = 0; i < MAX_FLOWER_PHYPORTS; i++) { ++ repr = app_fw_flower->phy_reprs[i]; ++ if (repr != NULL) { ++ nfp_flower_repr_free(repr, NFP_REPR_TYPE_PHYS_PORT); ++ app_fw_flower->phy_reprs[i] = NULL; ++ } ++ } ++ ++ repr = app_fw_flower->pf_repr; ++ if (repr != NULL) { ++ nfp_flower_repr_free(repr, NFP_REPR_TYPE_PF); ++ app_fw_flower->pf_repr = NULL; ++ } ++} ++ + static int + nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) { int i; int ret; @@ -50487,7 +65700,7 @@ index 5809c838b3..32c4574bdc 100644 struct nfp_eth_table *nfp_eth_table; struct nfp_eth_table_port *eth_port; struct nfp_flower_representor flower_repr = { -@@ -753,7 +771,13 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -753,7 +948,13 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) /* PF vNIC reprs get a random MAC address */ rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); @@ -50502,7 +65715,14 @@ index 5809c838b3..32c4574bdc 100644 /* Create a eth_dev for this representor */ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, -@@ -775,7 +799,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -769,13 +970,14 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + eth_port = &nfp_eth_table->ports[i]; + flower_repr.repr_type = NFP_REPR_TYPE_PHYS_PORT; + flower_repr.port_id = nfp_flower_get_phys_port_id(eth_port->index); +- flower_repr.nfp_idx = eth_port->eth_index; ++ flower_repr.nfp_idx = eth_port->index; + flower_repr.vf_id = i + 1; + /* Copy the real mac of the interface to the representor struct */ rte_ether_addr_copy((struct rte_ether_addr *)eth_port->mac_addr, &flower_repr.mac_addr); @@ -50512,7 +65732,16 @@ index 5809c838b3..32c4574bdc 100644 /* * Create a eth_dev for this representor -@@ -806,7 +831,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +@@ -791,7 +993,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + } + + if (i < app_fw_flower->num_phyport_reprs) +- return ret; ++ goto repr_free; + + /* + * Now allocate eth_dev's for VF representors. +@@ -806,7 +1008,8 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) /* VF reprs get a random MAC address */ rte_eth_random_addr(flower_repr.mac_addr.addr_bytes); @@ -50522,6 +65751,76 @@ index 5809c838b3..32c4574bdc 100644 /* This will also allocate private memory for the device*/ ret = rte_eth_dev_create(eth_dev->device, flower_repr.name, +@@ -819,9 +1022,14 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + } + + if (i < app_fw_flower->num_vf_reprs) +- return ret; ++ goto repr_free; + + return 0; ++ ++repr_free: ++ nfp_flower_repr_free_all(app_fw_flower); ++ ++ return ret; + } + + int +@@ -839,10 +1047,9 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower) + pci_dev = pf_dev->pci_dev; + + /* Allocate a switch domain for the flower app */ +- if (app_fw_flower->switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID && +- rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id)) { ++ ret = rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id); ++ if (ret != 0) + PMD_INIT_LOG(WARNING, "failed to allocate switch domain for device"); +- } + + /* Now parse PCI device args passed for representor info */ + if (pci_dev->device.devargs != NULL) { +@@ -882,8 +1089,15 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower) + ret = nfp_flower_repr_alloc(app_fw_flower); + if (ret != 0) { + PMD_INIT_LOG(ERR, "representors allocation failed"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto domain_free; + } + + return 0; ++ ++domain_free: ++ if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) ++ PMD_INIT_LOG(WARNING, "Failed to free switch domain for device"); ++ ++ return ret; + } +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h +index 685cbe46b4..c04321455c 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h +@@ -34,6 +34,7 @@ struct nfp_flower_representor { + struct rte_ring *ring; + struct rte_eth_link link; + struct rte_eth_stats repr_stats; ++ struct rte_eth_dev *eth_dev; + }; + + int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower); +diff --git a/dpdk/drivers/net/nfp/meson.build b/dpdk/drivers/net/nfp/meson.build +index 7416fd3706..bc3bc737a0 100644 +--- a/dpdk/drivers/net/nfp/meson.build ++++ b/dpdk/drivers/net/nfp/meson.build +@@ -4,6 +4,7 @@ + if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on 64-bit Linux' ++ subdir_done() + endif + sources = files( + 'flower/nfp_flower.c', diff --git a/dpdk/drivers/net/nfp/nfp_common.c b/dpdk/drivers/net/nfp/nfp_common.c index 71711bfa22..33613bb2b3 100644 --- a/dpdk/drivers/net/nfp/nfp_common.c @@ -50638,7 +65937,7 @@ index 71711bfa22..33613bb2b3 100644 * Local variables: * c-file-style: "Linux" diff --git a/dpdk/drivers/net/nfp/nfp_common.h b/dpdk/drivers/net/nfp/nfp_common.h -index 36c19b47e4..d1a07f5a72 100644 +index 36c19b47e4..5b5c0aa7d3 100644 --- a/dpdk/drivers/net/nfp/nfp_common.h +++ b/dpdk/drivers/net/nfp/nfp_common.h @@ -111,6 +111,7 @@ struct nfp_net_adapter; @@ -50649,17 +65948,18 @@ index 36c19b47e4..d1a07f5a72 100644 #include #include -@@ -447,6 +448,8 @@ void nfp_net_close_rx_queue(struct rte_eth_dev *dev); +@@ -447,6 +448,9 @@ void nfp_net_close_rx_queue(struct rte_eth_dev *dev); void nfp_net_stop_tx_queue(struct rte_eth_dev *dev); void nfp_net_close_tx_queue(struct rte_eth_dev *dev); int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, size_t idx, uint16_t port); +int nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name); +void nfp_net_irq_unmask(struct rte_eth_dev *dev); ++void nfp_pf_uninit(struct nfp_pf_dev *pf_dev); #define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ (&((struct nfp_net_adapter *)adapter)->hw) diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c -index 0956ea81df..9f940a12b6 100644 +index 0956ea81df..68fd67a024 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev.c @@ -70,6 +70,7 @@ nfp_net_start(struct rte_eth_dev *dev) @@ -50691,15 +65991,119 @@ index 0956ea81df..9f940a12b6 100644 return 0; -@@ -298,7 +304,6 @@ nfp_net_close(struct rte_eth_dev *dev) +@@ -258,6 +264,45 @@ nfp_net_set_link_down(struct rte_eth_dev *dev) + hw->nfp_idx, 0); + } + ++static void ++nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev, ++ uint8_t id) ++{ ++ struct nfp_app_fw_nic *app_fw_nic; ++ ++ app_fw_nic = pf_dev->app_fw_priv; ++ if (app_fw_nic->ports[id] != NULL) ++ app_fw_nic->ports[id] = NULL; ++} ++ ++static void ++nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev) ++{ ++ nfp_cpp_area_release_free(pf_dev->ctrl_area); ++ rte_free(pf_dev->app_fw_priv); ++} ++ ++void ++nfp_pf_uninit(struct nfp_pf_dev *pf_dev) ++{ ++ nfp_cpp_area_release_free(pf_dev->hwqueues_area); ++ free(pf_dev->sym_tbl); ++ free(pf_dev->nfp_eth_table); ++ free(pf_dev->hwinfo); ++ nfp_cpp_free(pf_dev->cpp); ++ rte_free(pf_dev); ++} ++ ++static int ++nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev) ++{ ++ free(pf_dev->sym_tbl); ++ nfp_cpp_free(pf_dev->cpp); ++ rte_free(pf_dev); ++ ++ return 0; ++} ++ + /* Reset and stop device. The device can not be restarted. */ + static int + nfp_net_close(struct rte_eth_dev *dev) +@@ -268,8 +313,19 @@ nfp_net_close(struct rte_eth_dev *dev) + struct nfp_app_fw_nic *app_fw_nic; + int i; + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ /* ++ * In secondary process, a released eth device can be found by its name ++ * in shared memory. ++ * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the ++ * eth device has been released. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ if (dev->state == RTE_ETH_DEV_UNUSED) ++ return 0; ++ ++ nfp_pf_secondary_uninit(dev->process_private); + return 0; ++ } + + PMD_INIT_LOG(DEBUG, "Close"); + +@@ -297,8 +353,11 @@ nfp_net_close(struct rte_eth_dev *dev) + /* Only free PF resources after all physical ports have been closed */ /* Mark this port as unused and free device priv resources*/ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); - app_fw_nic->ports[hw->idx] = NULL; +- app_fw_nic->ports[hw->idx] = NULL; - rte_eth_dev_release_port(dev); ++ ++ if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC) ++ return -EINVAL; ++ ++ nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx); for (i = 0; i < app_fw_nic->total_phyports; i++) { /* Check to see if ports are still in use */ -@@ -517,14 +522,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -306,26 +365,15 @@ nfp_net_close(struct rte_eth_dev *dev) + return 0; + } + +- /* Now it is safe to free all PF resources */ +- PMD_INIT_LOG(INFO, "Freeing PF resources"); +- nfp_cpp_area_free(pf_dev->ctrl_area); +- nfp_cpp_area_free(pf_dev->hwqueues_area); +- free(pf_dev->hwinfo); +- free(pf_dev->sym_tbl); +- nfp_cpp_free(pf_dev->cpp); +- rte_free(app_fw_nic); +- rte_free(pf_dev); +- ++ /* Enable in nfp_net_start() */ + rte_intr_disable(pci_dev->intr_handle); + +- /* unregister callback func from eal lib */ ++ /* Register in nfp_net_init() */ + rte_intr_callback_unregister(pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, (void *)dev); + +- /* +- * The ixgbe PMD disables the pcie master on the +- * device. The i40e does not... +- */ ++ nfp_uninit_app_fw_nic(pf_dev); ++ nfp_pf_uninit(pf_dev); + + return 0; + } +@@ -517,14 +565,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) /* Use backpointer to the CoreNIC app struct */ app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); @@ -50714,7 +66118,7 @@ index 0956ea81df..9f940a12b6 100644 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; if (port < 0 || port > 7) { PMD_DRV_LOG(ERR, "Port value is wrong"); -@@ -572,6 +569,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -572,6 +612,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); @@ -50724,7 +66128,7 @@ index 0956ea81df..9f940a12b6 100644 if (nfp_net_ethdev_ops_mount(hw, eth_dev)) return -EINVAL; -@@ -609,6 +609,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -609,6 +652,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; @@ -50732,7 +66136,7 @@ index 0956ea81df..9f940a12b6 100644 /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) -@@ -690,6 +691,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -690,6 +734,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_dev_interrupt_handler, (void *)eth_dev); /* Telling the firmware about the LSC interrupt entry */ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); @@ -50741,7 +66145,7 @@ index 0956ea81df..9f940a12b6 100644 /* Recording current stats counters values */ nfp_net_stats_reset(eth_dev); -@@ -724,7 +727,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +@@ -724,7 +770,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) goto load_fw; /* Then try the PCI name */ snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, @@ -50750,7 +66154,19 @@ index 0956ea81df..9f940a12b6 100644 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) -@@ -930,9 +933,11 @@ app_cleanup: +@@ -917,10 +963,9 @@ port_cleanup: + struct rte_eth_dev *tmp_dev; + tmp_dev = app_fw_nic->ports[i]->eth_dev; + rte_eth_dev_release_port(tmp_dev); +- app_fw_nic->ports[i] = NULL; + } + } +- nfp_cpp_area_free(pf_dev->ctrl_area); ++ nfp_cpp_area_release_free(pf_dev->ctrl_area); + app_cleanup: + rte_free(app_fw_nic); + +@@ -930,9 +975,11 @@ app_cleanup: static int nfp_pf_init(struct rte_pci_device *pci_dev) { @@ -50763,7 +66179,7 @@ index 0956ea81df..9f940a12b6 100644 struct nfp_cpp *cpp; enum nfp_app_fw_id app_fw_id; struct nfp_pf_dev *pf_dev; -@@ -976,6 +981,10 @@ nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -976,6 +1023,10 @@ nfp_pf_init(struct rte_pci_device *pci_dev) goto hwinfo_cleanup; } @@ -50774,7 +66190,7 @@ index 0956ea81df..9f940a12b6 100644 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { PMD_INIT_LOG(ERR, "Error when uploading firmware"); ret = -EIO; -@@ -1032,7 +1041,8 @@ nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -1032,7 +1083,8 @@ nfp_pf_init(struct rte_pci_device *pci_dev) goto pf_cleanup; } @@ -50784,8 +66200,17 @@ index 0956ea81df..9f940a12b6 100644 addr, NFP_QCP_QUEUE_AREA_SZ, &pf_dev->hwqueues_area); if (pf_dev->hw_queues == NULL) { +@@ -1078,7 +1130,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + return 0; + + hwqueues_cleanup: +- nfp_cpp_area_free(pf_dev->hwqueues_area); ++ nfp_cpp_area_release_free(pf_dev->hwqueues_area); + pf_cleanup: + rte_free(pf_dev); + sym_tbl_cleanup: diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -index d1427b63bc..435127604a 100644 +index d1427b63bc..60e90cd5cb 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c @@ -45,6 +45,7 @@ nfp_netvf_start(struct rte_eth_dev *dev) @@ -50842,7 +66267,16 @@ index d1427b63bc..435127604a 100644 if (nfp_netvf_ethdev_ops_mount(hw, eth_dev)) return -EINVAL; -@@ -366,6 +367,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -319,8 +320,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + +- rte_eth_copy_pci_info(eth_dev, pci_dev); +- + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; +@@ -366,6 +365,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; @@ -50850,7 +66284,7 @@ index d1427b63bc..435127604a 100644 /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) -@@ -450,6 +452,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -450,6 +450,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) (void *)eth_dev); /* Telling the firmware about the LSC interrupt entry */ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); @@ -50860,7 +66294,7 @@ index d1427b63bc..435127604a 100644 nfp_net_stats_reset(eth_dev); } diff --git a/dpdk/drivers/net/nfp/nfp_flow.c b/dpdk/drivers/net/nfp/nfp_flow.c -index 6f79d950db..faa0eda325 100644 +index 6f79d950db..17c091ffa0 100644 --- a/dpdk/drivers/net/nfp/nfp_flow.c +++ b/dpdk/drivers/net/nfp/nfp_flow.c @@ -285,7 +285,7 @@ nfp_check_mask_remove(struct nfp_flow_priv *priv, @@ -51340,7 +66774,37 @@ index 6f79d950db..faa0eda325 100644 if (!tp_set_flag) { position += sizeof(struct nfp_fl_act_set_tport); tp_set_flag = true; -@@ -3484,7 +3559,7 @@ nfp_flow_process(struct nfp_flower_representor *representor, +@@ -3370,7 +3445,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + ttl_tos_flag = true; + } + } else { +- nfp_flow_action_set_hl(position, action, ttl_tos_flag); ++ nfp_flow_action_set_hl(position, action, tc_hl_flag); + if (!tc_hl_flag) { + position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); + tc_hl_flag = true; +@@ -3387,7 +3462,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP"); +- nfp_flow_action_set_tc(position, action, ttl_tos_flag); ++ nfp_flow_action_set_tc(position, action, tc_hl_flag); + if (!tc_hl_flag) { + position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); + tc_hl_flag = true; +@@ -3442,6 +3517,11 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + total_actions++; + } + ++ if (nfp_flow->install_flag && total_actions == 0) { ++ PMD_DRV_LOG(ERR, "The action list is empty"); ++ return -ENOTSUP; ++ } ++ + if (drop_flag) + nfp_flow_meta->shortcut = rte_cpu_to_be_32(NFP_FL_SC_ACT_DROP); + else if (total_actions > 1) +@@ -3484,7 +3564,7 @@ nfp_flow_process(struct nfp_flower_representor *representor, return NULL; } @@ -51349,7 +66813,7 @@ index 6f79d950db..faa0eda325 100644 if (nfp_flow == NULL) { PMD_DRV_LOG(ERR, "Alloc nfp flow failed."); goto free_stats; -@@ -3592,6 +3667,7 @@ nfp_flow_teardown(struct nfp_flow_priv *priv, +@@ -3592,6 +3672,7 @@ nfp_flow_teardown(struct nfp_flow_priv *priv, nfp_flow_meta = nfp_flow->payload.meta; mask_data = nfp_flow->payload.mask_data; mask_len = nfp_flow_meta->mask_len << NFP_FL_LW_SIZ; @@ -51357,7 +66821,7 @@ index 6f79d950db..faa0eda325 100644 if (!nfp_check_mask_remove(priv, mask_data, mask_len, &nfp_flow_meta->flags)) { PMD_DRV_LOG(ERR, "nfp mask del check failed."); -@@ -3791,14 +3867,21 @@ nfp_flow_flush(struct rte_eth_dev *dev, +@@ -3791,14 +3872,21 @@ nfp_flow_flush(struct rte_eth_dev *dev, void *next_data; uint32_t iter = 0; const void *next_key; @@ -51382,7 +66846,7 @@ index 6f79d950db..faa0eda325 100644 } return ret; -@@ -3809,6 +3892,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, +@@ -3809,6 +3897,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, struct rte_flow *nfp_flow, void *data) { @@ -51390,7 +66854,7 @@ index 6f79d950db..faa0eda325 100644 uint32_t ctx_id; struct rte_flow *flow; struct nfp_flow_priv *priv; -@@ -3823,6 +3907,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, +@@ -3823,6 +3912,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, } query = (struct rte_flow_query_count *)data; @@ -51398,7 +66862,7 @@ index 6f79d950db..faa0eda325 100644 memset(query, 0, sizeof(*query)); ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id); -@@ -3834,7 +3919,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, +@@ -3834,7 +3924,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, query->bytes = stats->bytes; query->hits_set = 1; query->bytes_set = 1; @@ -51407,7 +66871,7 @@ index 6f79d950db..faa0eda325 100644 stats->pkts = 0; stats->bytes = 0; } -@@ -3981,11 +4066,21 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) +@@ -3981,11 +4071,21 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) size_t stats_size; uint64_t ctx_count; uint64_t ctx_split; @@ -51430,7 +66894,7 @@ index 6f79d950db..faa0eda325 100644 .entries = NFP_MASK_TABLE_ENTRIES, .hash_func = rte_jhash, .socket_id = rte_socket_id(), -@@ -3994,7 +4089,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) +@@ -3994,7 +4094,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) }; struct rte_hash_parameters flow_hash_params = { @@ -51439,7 +66903,7 @@ index 6f79d950db..faa0eda325 100644 .hash_func = rte_jhash, .socket_id = rte_socket_id(), .key_len = sizeof(uint32_t), -@@ -4002,7 +4097,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) +@@ -4002,7 +4102,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) }; struct rte_hash_parameters pre_tun_hash_params = { @@ -51759,6 +67223,19 @@ index 37799af558..014f6c9df8 100644 if (!*area) goto err_eio; +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c +index 318c5800d7..b9de16e889 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c +@@ -151,7 +151,7 @@ nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + if (tmp != key) + return NFP_ERRPTR(EEXIST); + +- mutex = calloc(sizeof(*mutex), 1); ++ mutex = calloc(1, sizeof(*mutex)); + if (!mutex) + return NFP_ERRPTR(ENOMEM); + diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c b/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c index 56bbf05cd8..21879f7eb6 100644 --- a/dpdk/drivers/net/nfp/nfpcore/nfp_rtsym.c @@ -51952,11 +67429,33 @@ index 56bbf05cd8..21879f7eb6 100644 if (!mem) { printf("Failed to map symbol %s\n", name); return NULL; +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_devids.h b/dpdk/drivers/net/ngbe/base/ngbe_devids.h +index 83eedf423e..e1efa62015 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_devids.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_devids.h +@@ -83,6 +83,7 @@ + #define NGBE_YT8521S_SFP_GPIO 0x0062 + #define NGBE_INTERNAL_YT8521S_SFP_GPIO 0x0064 + #define NGBE_LY_YT8521S_SFP 0x0070 ++#define NGBE_RGMII_FPGA 0x0080 + #define NGBE_WOL_SUP 0x4000 + #define NGBE_NCSI_SUP 0x8000 + diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.c b/dpdk/drivers/net/ngbe/base/ngbe_hw.c -index 283cdca367..27243d85c8 100644 +index 283cdca367..b9bb861adc 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_hw.c +++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.c -@@ -1541,11 +1541,15 @@ s32 ngbe_clear_vfta(struct ngbe_hw *hw) +@@ -173,6 +173,9 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw) + ngbe_reset_misc_em(hw); + hw->mac.clear_hw_cntrs(hw); + ++ if (!((hw->sub_device_id & NGBE_OEM_MASK) == NGBE_RGMII_FPGA)) ++ hw->phy.set_phy_power(hw, false); ++ + msec_delay(50); + + /* Store the permanent mac address */ +@@ -1541,11 +1544,15 @@ s32 ngbe_clear_vfta(struct ngbe_hw *hw) s32 ngbe_check_mac_link_em(struct ngbe_hw *hw, u32 *speed, bool *link_up, bool link_up_wait_to_complete) { @@ -52041,7 +67540,7 @@ index b2fbc4f74d..6093ee7d5c 100644 #define RTL_GSR 0x10 #define RTL_GSR_ST MS16(0, 0x7) diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c -index c88946f7c3..754faadd6a 100644 +index c88946f7c3..f6c979b1e7 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c +++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c @@ -100,11 +100,17 @@ s32 ngbe_write_phy_reg_sds_ext_yt(struct ngbe_hw *hw, @@ -52130,7 +67629,17 @@ index c88946f7c3..754faadd6a 100644 /*disable 100/10base-T Self-negotiation ability*/ ngbe_read_phy_reg_mdi(hw, YT_ANA, 0, &value); value &= ~(YT_ANA_100BASET_FULL | YT_ANA_100BASET_HALF | -@@ -279,10 +296,12 @@ skip_an: +@@ -273,16 +290,22 @@ skip_an: + value |= value_r4; + ngbe_write_phy_reg_mdi(hw, YT_ANA, 0, value); + ++ /* config for yt8531sh-ca */ ++ ngbe_write_phy_reg_ext_yt(hw, YT_SPEC_CONF, 0, ++ YT_SPEC_CONF_8531SH_CA); ++ + /* software reset to make the above configuration + * take effect + */ ngbe_read_phy_reg_mdi(hw, YT_BCR, 0, &value); value |= YT_BCR_RESET; ngbe_write_phy_reg_mdi(hw, YT_BCR, 0, value); @@ -52143,7 +67652,7 @@ index c88946f7c3..754faadd6a 100644 ngbe_read_phy_reg_ext_yt(hw, YT_RGMII_CONF1, 0, &value); value |= YT_RGMII_CONF1_MODE; ngbe_write_phy_reg_ext_yt(hw, YT_RGMII_CONF1, 0, value); -@@ -297,6 +316,7 @@ skip_an: +@@ -297,6 +320,7 @@ skip_an: ngbe_read_phy_reg_ext_yt(hw, YT_CHIP, 0, &value); value &= ~YT_SMI_PHY_SW_RST; ngbe_write_phy_reg_ext_yt(hw, YT_CHIP, 0, value); @@ -52151,7 +67660,7 @@ index c88946f7c3..754faadd6a 100644 hw->phy.set_phy_power(hw, true); } else if ((value & YT_CHIP_MODE_MASK) == YT_CHIP_MODE_SEL(5)) { -@@ -320,7 +340,9 @@ skip_an: +@@ -320,7 +344,9 @@ skip_an: } /* duplex full */ value |= YT_BCR_DUPLEX | YT_BCR_RESET; @@ -52161,7 +67670,7 @@ index c88946f7c3..754faadd6a 100644 goto skip_an_sr; } -@@ -339,19 +361,23 @@ skip_an: +@@ -339,19 +365,23 @@ skip_an: /* duplex full */ value |= YT_BCR_DUPLEX | YT_BCR_RESET; @@ -52185,7 +67694,7 @@ index c88946f7c3..754faadd6a 100644 return 0; } -@@ -366,6 +392,7 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw) +@@ -366,6 +396,7 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw) hw->phy.type != ngbe_phy_yt8521s_sfi) return NGBE_ERR_PHY_TYPE; @@ -52193,7 +67702,7 @@ index c88946f7c3..754faadd6a 100644 /* check chip_mode first */ ngbe_read_phy_reg_ext_yt(hw, YT_CHIP, 0, &ctrl); if (ctrl & YT_CHIP_MODE_MASK) { -@@ -395,6 +422,7 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw) +@@ -395,6 +426,7 @@ s32 ngbe_reset_phy_yt(struct ngbe_hw *hw) msleep(1); } } @@ -52201,7 +67710,7 @@ index c88946f7c3..754faadd6a 100644 if (i == YT_PHY_RST_WAIT_PERIOD) { DEBUGOUT("PHY reset polling failed to complete."); -@@ -409,7 +437,9 @@ s32 ngbe_get_phy_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) +@@ -409,7 +441,9 @@ s32 ngbe_get_phy_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) u16 value; s32 status = 0; @@ -52211,7 +67720,7 @@ index c88946f7c3..754faadd6a 100644 value &= YT_FANA_PAUSE_MASK; *pause_bit = (u8)(value >> 7); -@@ -421,7 +451,9 @@ s32 ngbe_get_phy_lp_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) +@@ -421,7 +455,9 @@ s32 ngbe_get_phy_lp_advertised_pause_yt(struct ngbe_hw *hw, u8 *pause_bit) u16 value; s32 status = 0; @@ -52221,7 +67730,7 @@ index c88946f7c3..754faadd6a 100644 value &= YT_FLPAR_PAUSE_MASK; *pause_bit = (u8)(value >> 7); -@@ -433,10 +465,12 @@ s32 ngbe_set_phy_pause_adv_yt(struct ngbe_hw *hw, u16 pause_bit) +@@ -433,10 +469,12 @@ s32 ngbe_set_phy_pause_adv_yt(struct ngbe_hw *hw, u16 pause_bit) u16 value; s32 status = 0; @@ -52234,7 +67743,7 @@ index c88946f7c3..754faadd6a 100644 return status; } -@@ -453,6 +487,7 @@ s32 ngbe_check_phy_link_yt(struct ngbe_hw *hw, +@@ -453,6 +491,7 @@ s32 ngbe_check_phy_link_yt(struct ngbe_hw *hw, /* Initialize speed and link to default case */ *link_up = false; *speed = NGBE_LINK_SPEED_UNKNOWN; @@ -52242,7 +67751,7 @@ index c88946f7c3..754faadd6a 100644 ngbe_write_phy_reg_ext_yt(hw, YT_SMI_PHY, 0, 0); ngbe_read_phy_reg_mdi(hw, YT_INTR_STATUS, 0, &insr); -@@ -472,6 +507,7 @@ s32 ngbe_check_phy_link_yt(struct ngbe_hw *hw, +@@ -472,6 +511,7 @@ s32 ngbe_check_phy_link_yt(struct ngbe_hw *hw, *link_up = true; } @@ -52250,7 +67759,7 @@ index c88946f7c3..754faadd6a 100644 if (*link_up) { if (phy_speed == YT_SPST_SPEED_1000M) *speed = NGBE_LINK_SPEED_1GB_FULL; -@@ -488,6 +524,7 @@ s32 ngbe_set_phy_power_yt(struct ngbe_hw *hw, bool on) +@@ -488,6 +528,7 @@ s32 ngbe_set_phy_power_yt(struct ngbe_hw *hw, bool on) { u16 value = 0; @@ -52258,7 +67767,7 @@ index c88946f7c3..754faadd6a 100644 /* power down/up in fiber mode */ hw->phy.read_reg(hw, YT_BCR, 0, &value); if (on) -@@ -504,6 +541,7 @@ s32 ngbe_set_phy_power_yt(struct ngbe_hw *hw, bool on) +@@ -504,6 +545,7 @@ s32 ngbe_set_phy_power_yt(struct ngbe_hw *hw, bool on) else value |= YT_BCR_PWDN; ngbe_write_phy_reg_mdi(hw, YT_BCR, 0, value); @@ -52266,6 +67775,19 @@ index c88946f7c3..754faadd6a 100644 return 0; } +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h +index ddf992e79a..c45bec7ce7 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h +@@ -32,6 +32,8 @@ + #define YT_MISC 0xA006 + #define YT_MISC_FIBER_PRIO MS16(8, 0x1) /* 0 for UTP */ + #define YT_MISC_RESV MS16(0, 0x1) ++#define YT_SPEC_CONF 0xA023 ++#define YT_SPEC_CONF_8531SH_CA 0x4031 + + /* SDS EXT */ + #define YT_AUTO 0xA5 diff --git a/dpdk/drivers/net/ngbe/base/ngbe_type.h b/dpdk/drivers/net/ngbe/base/ngbe_type.h index aa5c41146c..8a7d2cd331 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_type.h @@ -52329,7 +67851,7 @@ index aa5c41146c..8a7d2cd331 100644 u64 rx_qp_packets; u64 tx_qp_packets; diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -index afdb3ad41f..08e14a05c9 100644 +index afdb3ad41f..443bd9fef9 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c +++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c @@ -90,6 +90,7 @@ static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); @@ -52350,6 +67872,15 @@ index afdb3ad41f..08e14a05c9 100644 HW_XSTAT(rx_management_packets), HW_XSTAT(tx_management_packets), HW_XSTAT(rx_management_dropped), +@@ -543,7 +546,7 @@ static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) + if (ethdev == NULL) + return 0; + +- return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit); ++ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit); + } + + static struct rte_pci_driver rte_ngbe_pmd = { @@ -972,9 +975,6 @@ ngbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -52421,7 +67952,18 @@ index afdb3ad41f..08e14a05c9 100644 ngbe_pf_reset_hw(hw); ngbe_dev_stop(dev); -@@ -1869,24 +1875,6 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +@@ -1805,7 +1811,9 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; +- dev_info->max_rx_pktlen = 15872; ++ dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD; ++ dev_info->min_mtu = RTE_ETHER_MIN_MTU; ++ dev_info->max_mtu = NGBE_MAX_MTU; + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = pci_dev->max_vfs; +@@ -1869,24 +1877,6 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } @@ -52446,7 +67988,7 @@ index afdb3ad41f..08e14a05c9 100644 /* return 0 means link status changed, -1 means not changed */ int ngbe_dev_link_update_share(struct rte_eth_dev *dev, -@@ -1896,7 +1884,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1896,7 +1886,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, struct rte_eth_link link; u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; u32 lan_speed = 0; @@ -52454,7 +67996,7 @@ index afdb3ad41f..08e14a05c9 100644 bool link_up; int err; int wait = 1; -@@ -1910,9 +1897,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1910,9 +1899,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, hw->mac.get_link_status = true; @@ -52464,7 +68006,7 @@ index afdb3ad41f..08e14a05c9 100644 /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) wait = 0; -@@ -1927,7 +1911,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1927,7 +1913,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, if (!link_up) return rte_eth_linkstatus_set(dev, &link); @@ -52472,7 +68014,7 @@ index afdb3ad41f..08e14a05c9 100644 link.link_status = RTE_ETH_LINK_UP; link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; -@@ -1961,6 +1944,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1961,6 +1946,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); } @@ -52481,7 +68023,7 @@ index afdb3ad41f..08e14a05c9 100644 } return rte_eth_linkstatus_set(dev, &link); -@@ -2380,6 +2365,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +@@ -2380,6 +2367,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return -EIO; } @@ -52576,10 +68118,18 @@ index afdb3ad41f..08e14a05c9 100644 ngbe_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h -index 8d500fd38c..bb96f6a5e7 100644 +index 8d500fd38c..d6c56dcce4 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h +++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.h -@@ -341,7 +341,6 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, +@@ -31,6 +31,7 @@ + + #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ + ++#define NGBE_MAX_MTU 9414 + /* The overhead from MTU to max frame size. */ + #define NGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + +@@ -341,7 +342,6 @@ void ngbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); void ngbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask); @@ -52588,7 +68138,7 @@ index 8d500fd38c..bb96f6a5e7 100644 struct ngbe_hw_stats *hw_stats); diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c -index 9fd24fa444..54a6f6a887 100644 +index 9fd24fa444..8490b08318 100644 --- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c +++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c @@ -24,15 +24,11 @@ @@ -52788,7 +68338,15 @@ index 9fd24fa444..54a6f6a887 100644 rxd = *rxdp; PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " -@@ -1939,12 +1914,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) +@@ -1816,6 +1791,7 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq) + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); ++ rte_memzone_free(txq->mz); + } + rte_free(txq); + } +@@ -1939,12 +1915,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | @@ -52801,7 +68359,31 @@ index 9fd24fa444..54a6f6a887 100644 RTE_ETH_TX_OFFLOAD_MULTI_SEGS; if (hw->is_pf) -@@ -2237,6 +2208,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) +@@ -2024,6 +1996,7 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ txq->mz = tz; + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; +@@ -2126,6 +2099,7 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq) + ngbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq->sw_sc_ring); ++ rte_memzone_free(rxq->mz); + rte_free(rxq); + } + } +@@ -2216,6 +2190,7 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; ++ rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + } +@@ -2237,6 +2212,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev) RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_KEEP_CRC | RTE_ETH_RX_OFFLOAD_VLAN_FILTER | @@ -52809,7 +68391,15 @@ index 9fd24fa444..54a6f6a887 100644 RTE_ETH_RX_OFFLOAD_SCATTER; if (hw->is_pf) -@@ -2460,6 +2432,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2305,6 +2281,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ rxq->mz = rz; + /* + * Zero init all the descriptors in the ring. + */ +@@ -2460,6 +2437,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) if (txq != NULL) { txq->ops->release_mbufs(txq); txq->ops->reset(txq); @@ -52817,7 +68407,7 @@ index 9fd24fa444..54a6f6a887 100644 } } -@@ -2469,6 +2442,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2469,6 +2447,7 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) if (rxq != NULL) { ngbe_rx_queue_release_mbufs(rxq); ngbe_reset_rx_queue(adapter, rxq); @@ -52825,6 +68415,26 @@ index 9fd24fa444..54a6f6a887 100644 } } } +diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.h b/dpdk/drivers/net/ngbe/ngbe_rxtx.h +index 9130f9d0df..2914b9a756 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.h ++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.h +@@ -276,6 +276,7 @@ struct ngbe_rx_queue { + struct rte_mbuf fake_mbuf; + /** hold packets to return to application */ + struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2]; ++ const struct rte_memzone *mz; + }; + + /** +@@ -353,6 +354,7 @@ struct ngbe_tx_queue { + uint8_t tx_deferred_start; /**< not in global dev start */ + + const struct ngbe_txq_ops *ops; /**< txq ops */ ++ const struct rte_memzone *mz; + }; + + struct ngbe_txq_ops { diff --git a/dpdk/drivers/net/null/rte_eth_null.c b/dpdk/drivers/net/null/rte_eth_null.c index 47d9554ec5..1fbe572bd1 100644 --- a/dpdk/drivers/net/null/rte_eth_null.c @@ -52935,7 +68545,7 @@ index d52a3e73d5..2b97f0163e 100644 } diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c -index 0352a57950..551f3cf193 100644 +index 0352a57950..0073dd7405 100644 --- a/dpdk/drivers/net/pfe/pfe_ethdev.c +++ b/dpdk/drivers/net/pfe/pfe_ethdev.c @@ -241,6 +241,7 @@ pfe_eth_open(struct rte_eth_dev *dev) @@ -52977,6 +68587,16 @@ index 0352a57950..551f3cf193 100644 return 0; } +@@ -509,7 +520,8 @@ pfe_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, +- RTE_PTYPE_L4_SCTP ++ RTE_PTYPE_L4_SCTP, ++ RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == pfe_recv_pkts || diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c index a4923670d6..22cd470646 100644 --- a/dpdk/drivers/net/qede/qede_ethdev.c @@ -53371,7 +68991,7 @@ index bcf6664460..1b90cf7a21 100644 } diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c -index f2a6c33a19..2fd46c6b0b 100644 +index f2a6c33a19..a6bf6eec55 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c @@ -559,7 +559,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, @@ -53420,7 +69040,15 @@ index f2a6c33a19..2fd46c6b0b 100644 &l4_cksum, &l4_phdr_cksum, &l4_raw_cksum); iovecs[k].iov_base = m_copy; -@@ -2267,29 +2276,6 @@ set_remote_iface(const char *key __rte_unused, +@@ -1853,6 +1862,7 @@ tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_SCTP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; +@@ -2267,29 +2277,6 @@ set_remote_iface(const char *key __rte_unused, return 0; } @@ -53450,7 +69078,7 @@ index f2a6c33a19..2fd46c6b0b 100644 static int set_mac_type(const char *key __rte_unused, const char *value, -@@ -2303,15 +2289,15 @@ set_mac_type(const char *key __rte_unused, +@@ -2303,15 +2290,15 @@ set_mac_type(const char *key __rte_unused, if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) { static int iface_idx; @@ -56730,11 +72358,365 @@ index 20c310e5e7..d9bb65831a 100644 __u8 input_len = sizeof(v6_tuple) / sizeof(__u32); if (rsskey->hash_fields & (1 << HASH_FIELD_IPV6_L3)) +diff --git a/dpdk/drivers/net/tap/tap_flow.c b/dpdk/drivers/net/tap/tap_flow.c +index efe66fe059..7468c3f0ea 100644 +--- a/dpdk/drivers/net/tap/tap_flow.c ++++ b/dpdk/drivers/net/tap/tap_flow.c +@@ -11,6 +11,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -1082,8 +1083,11 @@ priv_flow_process(struct pmd_internals *pmd, + } + /* use flower filter type */ + tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower"); +- if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0) +- goto exit_item_not_supported; ++ if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0) { ++ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, ++ actions, "could not allocated netlink msg"); ++ goto exit_return_error; ++ } + } + for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { + const struct tap_flow_items *token = NULL; +@@ -1199,9 +1203,12 @@ actions: + if (action) + goto exit_action_not_supported; + action = 1; +- if (!queue || +- (queue->index > pmd->dev->data->nb_rx_queues - 1)) +- goto exit_action_not_supported; ++ if (queue->index >= pmd->dev->data->nb_rx_queues) { ++ rte_flow_error_set(error, ERANGE, ++ RTE_FLOW_ERROR_TYPE_ACTION, actions, ++ "queue index out of range"); ++ goto exit_return_error; ++ } + if (flow) { + struct action_data adata = { + .id = "skbedit", +@@ -1227,7 +1234,7 @@ actions: + if (!pmd->rss_enabled) { + err = rss_enable(pmd, attr, error); + if (err) +- goto exit_action_not_supported; ++ goto exit_return_error; + } + if (flow) + err = rss_add_actions(flow, pmd, rss, error); +@@ -1235,7 +1242,7 @@ actions: + goto exit_action_not_supported; + } + if (err) +- goto exit_action_not_supported; ++ goto exit_return_error; + } + /* When fate is unknown, drop traffic. */ + if (!action) { +@@ -1258,6 +1265,7 @@ exit_item_not_supported: + exit_action_not_supported: + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, + actions, "action not supported"); ++exit_return_error: + return -rte_errno; + } + +@@ -1290,9 +1298,7 @@ tap_flow_validate(struct rte_eth_dev *dev, + * In those rules, the handle (uint32_t) is the part that would identify + * specifically each rule. + * +- * On 32-bit architectures, the handle can simply be the flow's pointer address. +- * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently) +- * unique handle. ++ * Use jhash of the flow pointer to make a unique handle. + * + * @param[in, out] flow + * The flow that needs its handle set. +@@ -1302,16 +1308,18 @@ tap_flow_set_handle(struct rte_flow *flow) + { + union { + struct rte_flow *flow; +- const void *key; +- } tmp; +- uint32_t handle = 0; ++ uint32_t words[sizeof(flow) / sizeof(uint32_t)]; ++ } tmp = { ++ .flow = flow, ++ }; ++ uint32_t handle; ++ static uint64_t hash_seed; + +- tmp.flow = flow; ++ if (hash_seed == 0) ++ hash_seed = rte_rand(); ++ ++ handle = rte_jhash_32b(tmp.words, sizeof(flow) / sizeof(uint32_t), hash_seed); + +- if (sizeof(flow) > 4) +- handle = rte_jhash(tmp.key, sizeof(flow), 1); +- else +- handle = (uintptr_t)flow; + /* must be at least 1 to avoid letting the kernel choose one for us */ + if (!handle) + handle = 1; +@@ -1587,7 +1595,7 @@ tap_flow_isolate(struct rte_eth_dev *dev, + * If netdevice is there, setup appropriate flow rules immediately. + * Otherwise it will be set when bringing up the netdevice (tun_alloc). + */ +- if (!process_private->rxq_fds[0]) ++ if (process_private->rxq_fds[0] == -1) + return 0; + if (set) { + struct rte_flow *remote_flow; +diff --git a/dpdk/drivers/net/tap/tap_netlink.c b/dpdk/drivers/net/tap/tap_netlink.c +index 75af3404b0..d9c260127d 100644 +--- a/dpdk/drivers/net/tap/tap_netlink.c ++++ b/dpdk/drivers/net/tap/tap_netlink.c +@@ -72,7 +72,8 @@ tap_nl_init(uint32_t nl_groups) + + #ifdef NETLINK_EXT_ACK + /* Ask for extended ACK response. on older kernel will ignore request. */ +- setsockopt(fd, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)); ++ if (setsockopt(fd, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) ++ TAP_LOG(NOTICE, "Unable to request netlink error information"); + #endif + + if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) { +diff --git a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c b/dpdk/drivers/net/thunderx/base/nicvf_mbox.c +index 5993eec4e6..0e0176974d 100644 +--- a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c ++++ b/dpdk/drivers/net/thunderx/base/nicvf_mbox.c +@@ -485,3 +485,15 @@ nicvf_mbox_reset_xcast(struct nicvf *nic) + mbx.msg.msg = NIC_MBOX_MSG_RESET_XCAST; + nicvf_mbox_send_msg_to_pf(nic, &mbx); + } ++ ++int ++nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t mode, uint64_t mac) ++{ ++ struct nic_mbx mbx = { .msg = { 0 } }; ++ ++ mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; ++ mbx.xcast.mode = mode; ++ mbx.xcast.mac = mac; ++ ++ return nicvf_mbox_send_msg_to_pf(nic, &mbx); ++} +diff --git a/dpdk/drivers/net/thunderx/base/nicvf_mbox.h b/dpdk/drivers/net/thunderx/base/nicvf_mbox.h +index 322c8159cb..47f3d13755 100644 +--- a/dpdk/drivers/net/thunderx/base/nicvf_mbox.h ++++ b/dpdk/drivers/net/thunderx/base/nicvf_mbox.h +@@ -45,6 +45,8 @@ + #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ + #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ + #define NIC_MBOX_MSG_RESET_XCAST 0xF2 /* Reset DCAM filtering mode */ ++#define NIC_MBOX_MSG_ADD_MCAST 0xF3 /* ADD MAC to DCAM filters */ ++#define NIC_MBOX_MSG_SET_XCAST 0xF4 /* Set MCAST/BCAST Rx mode */ + #define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */ + + /* Get vNIC VF configuration */ +@@ -190,6 +192,12 @@ struct change_link_mode_msg { + + }; + ++struct xcast { ++ uint8_t msg; ++ uint8_t mode; ++ uint64_t mac:48; ++}; ++ + struct nic_mbx { + /* 128 bit shared memory between PF and each VF */ + union { +@@ -209,6 +217,7 @@ union { + struct reset_stat_cfg reset_stat; + struct set_link_state set_link; + struct change_link_mode_msg mode; ++ struct xcast xcast; + }; + }; + +@@ -239,5 +248,6 @@ void nicvf_mbox_cfg_done(struct nicvf *nic); + void nicvf_mbox_link_change(struct nicvf *nic); + void nicvf_mbox_reset_xcast(struct nicvf *nic); + int nicvf_mbox_change_mode(struct nicvf *nic, struct change_link_mode *cfg); ++int nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t mode, uint64_t mac); + + #endif /* __THUNDERX_NICVF_MBOX__ */ +diff --git a/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/dpdk/drivers/net/thunderx/nicvf_ethdev.c +index ab1e714d97..7045a71dca 100644 +--- a/dpdk/drivers/net/thunderx/nicvf_ethdev.c ++++ b/dpdk/drivers/net/thunderx/nicvf_ethdev.c +@@ -58,6 +58,10 @@ RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE); + #define NICVF_QLM_MODE_SGMII 7 + #define NICVF_QLM_MODE_XFI 12 + ++#define BCAST_ACCEPT 0x01 ++#define CAM_ACCEPT (1 << 3) ++#define BGX_MCAST_MODE(x) ((x) << 1) ++ + enum nicvf_link_speed { + NICVF_LINK_SPEED_SGMII, + NICVF_LINK_SPEED_XAUI, +@@ -392,12 +396,14 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, ++ RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_tunnel[] = { + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, ++ RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; + +@@ -2183,9 +2189,22 @@ nicvf_eth_dev_uninit(struct rte_eth_dev *dev) + nicvf_dev_close(dev); + return 0; + } ++ ++static inline uint64_t ether_addr_to_u64(uint8_t *addr) ++{ ++ uint64_t u = 0; ++ int i; ++ ++ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) ++ u = u << 8 | addr[i]; ++ ++ return u; ++} ++ + static int + nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) + { ++ uint8_t dmac_ctrl_reg = 0; + int ret; + struct rte_pci_device *pci_dev; + struct nicvf *nic = nicvf_pmd_priv(eth_dev); +@@ -2309,6 +2328,15 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) + goto malloc_fail; + } + ++ /* set DMAC CTRL reg to allow MAC */ ++ dmac_ctrl_reg = BCAST_ACCEPT | BGX_MCAST_MODE(2) | CAM_ACCEPT; ++ ret = nicvf_mbox_set_xcast(nic, dmac_ctrl_reg, ++ ether_addr_to_u64(nic->mac_addr)); ++ if (ret) { ++ PMD_INIT_LOG(ERR, "Failed to set mac addr"); ++ goto malloc_fail; ++ } ++ + ret = nicvf_set_first_skip(eth_dev); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure first skip"); +diff --git a/dpdk/drivers/net/txgbe/base/meson.build b/dpdk/drivers/net/txgbe/base/meson.build +index a81d6890fe..4cf90a394a 100644 +--- a/dpdk/drivers/net/txgbe/base/meson.build ++++ b/dpdk/drivers/net/txgbe/base/meson.build +@@ -22,6 +22,6 @@ foreach flag: error_cflags + endforeach + + base_lib = static_library('txgbe_base', sources, +- dependencies: [static_rte_eal, static_rte_net], ++ dependencies: [static_rte_eal, static_rte_net, static_rte_bus_pci], + c_args: c_args) + base_objs = base_lib.extract_all_objects(recursive: true) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.c b/dpdk/drivers/net/txgbe/base/txgbe_hw.c -index 8966453a03..de96549ae8 100644 +index 8966453a03..b59667f1d7 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_hw.c +++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.c -@@ -2273,10 +2273,24 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, +@@ -462,7 +462,7 @@ void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw) + **/ + s32 txgbe_stop_hw(struct txgbe_hw *hw) + { +- u32 reg_val; ++ s32 status = 0; + u16 i; + + /* +@@ -484,16 +484,26 @@ s32 txgbe_stop_hw(struct txgbe_hw *hw) + wr32(hw, TXGBE_ICR(0), TXGBE_ICR_MASK); + wr32(hw, TXGBE_ICR(1), TXGBE_ICR_MASK); + +- /* Disable the transmit unit. Each queue must be disabled. */ +- for (i = 0; i < hw->mac.max_tx_queues; i++) +- wr32(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_FLUSH); ++ wr32(hw, TXGBE_BMECTL, 0x3); + + /* Disable the receive unit by stopping each queue */ +- for (i = 0; i < hw->mac.max_rx_queues; i++) { +- reg_val = rd32(hw, TXGBE_RXCFG(i)); +- reg_val &= ~TXGBE_RXCFG_ENA; +- wr32(hw, TXGBE_RXCFG(i), reg_val); +- } ++ for (i = 0; i < hw->mac.max_rx_queues; i++) ++ wr32(hw, TXGBE_RXCFG(i), 0); ++ ++ /* flush all queues disables */ ++ txgbe_flush(hw); ++ msec_delay(2); ++ ++ /* Prevent the PCI-E bus from hanging by disabling PCI-E master ++ * access and verify no pending requests ++ */ ++ status = txgbe_set_pcie_master(hw, false); ++ if (status) ++ return status; ++ ++ /* Disable the transmit unit. Each queue must be disabled. */ ++ for (i = 0; i < hw->mac.max_tx_queues; i++) ++ wr32(hw, TXGBE_TXCFG(i), 0); + + /* flush all queues disables */ + txgbe_flush(hw); +@@ -1174,6 +1184,38 @@ out: + } + } + ++s32 txgbe_set_pcie_master(struct txgbe_hw *hw, bool enable) ++{ ++ struct rte_pci_device *pci_dev = (struct rte_pci_device *)hw->back; ++ s32 status = 0; ++ u32 i; ++ ++ if (rte_pci_set_bus_master(pci_dev, enable) < 0) { ++ DEBUGOUT("Cannot configure PCI bus master."); ++ return -1; ++ } ++ ++ if (enable) ++ goto out; ++ ++ /* Exit if master requests are blocked */ ++ if (!(rd32(hw, TXGBE_BMEPEND))) ++ goto out; ++ ++ /* Poll for master request bit to clear */ ++ for (i = 0; i < TXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { ++ usec_delay(100); ++ if (!(rd32(hw, TXGBE_BMEPEND))) ++ goto out; ++ } ++ ++ DEBUGOUT("PCIe transaction pending bit also did not clear."); ++ status = TXGBE_ERR_MASTER_REQUESTS_PENDING; ++ ++out: ++ return status; ++} ++ + /** + * txgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure +@@ -2273,10 +2315,24 @@ s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw, } if (speed & TXGBE_LINK_SPEED_1GB_FULL) { @@ -56759,7 +72741,7 @@ index 8966453a03..de96549ae8 100644 /* Set the module link speed */ switch (hw->phy.media_type) { case txgbe_media_type_fiber: -@@ -2987,10 +3001,6 @@ void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +@@ -2987,10 +3043,6 @@ void txgbe_disable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) { u32 esdp_reg = rd32(hw, TXGBE_GPIODATA); @@ -56770,7 +72752,7 @@ index 8966453a03..de96549ae8 100644 if (txgbe_close_notify(hw)) txgbe_led_off(hw, TXGBE_LEDCTL_UP | TXGBE_LEDCTL_10G | TXGBE_LEDCTL_1G | TXGBE_LEDCTL_ACTIVE); -@@ -3038,10 +3048,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) +@@ -3038,10 +3090,6 @@ void txgbe_enable_tx_laser_multispeed_fiber(struct txgbe_hw *hw) **/ void txgbe_flap_tx_laser_multispeed_fiber(struct txgbe_hw *hw) { @@ -56781,7 +72763,7 @@ index 8966453a03..de96549ae8 100644 if (hw->mac.autotry_restart) { txgbe_disable_tx_laser_multispeed_fiber(hw); txgbe_enable_tx_laser_multispeed_fiber(hw); -@@ -3432,18 +3438,9 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) +@@ -3432,18 +3480,9 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw) autoc = hw->mac.autoc_read(hw); mac_reset_top: @@ -56803,6 +72785,18 @@ index 8966453a03..de96549ae8 100644 usec_delay(10); txgbe_reset_misc(hw); +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_hw.h b/dpdk/drivers/net/txgbe/base/txgbe_hw.h +index 7031589f7c..4bf9da2d4c 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_hw.h ++++ b/dpdk/drivers/net/txgbe/base/txgbe_hw.h +@@ -40,6 +40,7 @@ s32 txgbe_setup_fc(struct txgbe_hw *hw); + s32 txgbe_validate_mac_addr(u8 *mac_addr); + s32 txgbe_acquire_swfw_sync(struct txgbe_hw *hw, u32 mask); + void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask); ++s32 txgbe_set_pcie_master(struct txgbe_hw *hw, bool enable); + + s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); + s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mng.c b/dpdk/drivers/net/txgbe/base/txgbe_mng.c index df7145094f..029a0a1fe1 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_mng.c @@ -56830,6 +72824,18 @@ index df7145094f..029a0a1fe1 100644 if (!buf_len) goto rel_out; +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h +index b62c0b0824..0d9492c3cb 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h ++++ b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + + #include "../txgbe_logs.h" + diff --git a/dpdk/drivers/net/txgbe/base/txgbe_phy.c b/dpdk/drivers/net/txgbe/base/txgbe_phy.c index 9f46d5bdb0..a7c11c50df 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_phy.c @@ -56924,10 +72930,29 @@ index 9f46d5bdb0..a7c11c50df 100644 void txgbe_bp_down_event(struct txgbe_hw *hw) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_regs.h b/dpdk/drivers/net/txgbe/base/txgbe_regs.h -index 911bb6e04e..79290a7afe 100644 +index 911bb6e04e..a2984f1106 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_regs.h +++ b/dpdk/drivers/net/txgbe/base/txgbe_regs.h -@@ -1579,6 +1579,7 @@ enum txgbe_5tuple_protocol { +@@ -1022,6 +1022,8 @@ enum txgbe_5tuple_protocol { + #define TXGBE_MACRXFLT_CTL_PASS LS(3, 6, 0x3) + #define TXGBE_MACRXFLT_RXALL MS(31, 0x1) + ++#define TXGBE_MAC_WDG_TIMEOUT 0x01100C ++ + /****************************************************************************** + * Statistic Registers + ******************************************************************************/ +@@ -1236,6 +1238,9 @@ enum txgbe_5tuple_protocol { + #define TXGBE_TCPTMR 0x000170 + #define TXGBE_ITRSEL 0x000180 + ++#define TXGBE_BMECTL 0x012020 ++#define TXGBE_BMEPEND 0x000168 ++ + /* P2V Mailbox */ + #define TXGBE_MBMEM(i) (0x005000 + 0x40 * (i)) /* 0-63 */ + #define TXGBE_MBCTL(i) (0x000600 + 4 * (i)) /* 0-63 */ +@@ -1579,6 +1584,7 @@ enum txgbe_5tuple_protocol { #define TXGBE_GPIOINTMASK 0x014834 #define TXGBE_GPIOINTTYPE 0x014838 #define TXGBE_GPIOINTSTAT 0x014840 @@ -56935,7 +72960,7 @@ index 911bb6e04e..79290a7afe 100644 #define TXGBE_GPIOEOI 0x01484C -@@ -1884,7 +1885,19 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual, +@@ -1884,7 +1890,19 @@ po32m(struct txgbe_hw *hw, u32 reg, u32 mask, u32 expect, u32 *actual, } /* flush all write operations */ @@ -56957,10 +72982,18 @@ index 911bb6e04e..79290a7afe 100644 #define rd32a(hw, reg, idx) ( \ rd32((hw), (reg) + ((idx) << 2))) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_type.h b/dpdk/drivers/net/txgbe/base/txgbe_type.h -index c3486b472f..75e839b7de 100644 +index c3486b472f..f52736cae9 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_type.h +++ b/dpdk/drivers/net/txgbe/base/txgbe_type.h -@@ -783,6 +783,7 @@ struct txgbe_hw { +@@ -29,6 +29,7 @@ + #define TXGBE_FDIRCMD_CMD_POLL 10 + #define TXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + #define TXGBE_SPI_TIMEOUT 10000 ++#define TXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + + #define TXGBE_ALIGN 128 /* as intel did */ + +@@ -783,6 +784,7 @@ struct txgbe_hw { bool allow_unsupported_sfp; bool need_crosstalk_fix; bool dev_start; @@ -56969,7 +73002,7 @@ index c3486b472f..75e839b7de 100644 uint64_t isb_dma; diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -index 86ef979b29..001f8c6473 100644 +index 86ef979b29..2ed5ee683f 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c @@ -179,7 +179,9 @@ static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { @@ -56990,7 +73023,7 @@ index 86ef979b29..001f8c6473 100644 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); -@@ -591,6 +594,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -591,11 +594,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return 0; } @@ -56998,7 +73031,126 @@ index 86ef979b29..001f8c6473 100644 rte_eth_copy_pci_info(eth_dev, pci_dev); hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; -@@ -1494,6 +1498,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) + + /* Vendor and Device ID need to be set before init of shared code */ ++ hw->back = pci_dev; + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) { +@@ -729,6 +734,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC); ++ rte_free(eth_dev->data->mac_addrs); ++ eth_dev->data->mac_addrs = NULL; + return -ENOMEM; + } + +@@ -896,6 +903,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); ++ rte_hash_free(fdir_info->hash_handle); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; +@@ -931,6 +939,7 @@ static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); ++ rte_hash_free(l2_tn_info->hash_handle); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; +@@ -958,7 +967,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) + if (!ethdev) + return 0; + +- return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit); ++ return rte_eth_dev_pci_generic_remove(pci_dev, eth_txgbe_dev_uninit); + } + + static struct rte_pci_driver rte_txgbe_pmd = { +@@ -994,41 +1003,25 @@ txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + } + + static void +-txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++txgbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on) + { +- struct txgbe_hw *hw = TXGBE_DEV_HW(dev); +- struct txgbe_rx_queue *rxq; +- bool restart; +- uint32_t rxcfg, rxbal, rxbah; +- + if (on) + txgbe_vlan_hw_strip_enable(dev, queue); + else + txgbe_vlan_hw_strip_disable(dev, queue); ++} + +- rxq = dev->data->rx_queues[queue]; +- rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx)); +- rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx)); +- rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); +- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { +- restart = (rxcfg & TXGBE_RXCFG_ENA) && +- !(rxcfg & TXGBE_RXCFG_VLAN); +- rxcfg |= TXGBE_RXCFG_VLAN; +- } else { +- restart = (rxcfg & TXGBE_RXCFG_ENA) && +- (rxcfg & TXGBE_RXCFG_VLAN); +- rxcfg &= ~TXGBE_RXCFG_VLAN; +- } +- rxcfg &= ~TXGBE_RXCFG_ENA; ++static void ++txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++{ ++ struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + +- if (restart) { +- /* set vlan strip for ring */ +- txgbe_dev_rx_queue_stop(dev, queue); +- wr32(hw, TXGBE_RXBAL(rxq->reg_idx), rxbal); +- wr32(hw, TXGBE_RXBAH(rxq->reg_idx), rxbah); +- wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxcfg); +- txgbe_dev_rx_queue_start(dev, queue); ++ if (!hw->adapter_stopped) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return; + } ++ ++ txgbe_vlan_strip_q_set(dev, queue, on); + } + + static int +@@ -1253,9 +1246,9 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) + rxq = dev->data->rx_queues[i]; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) +- txgbe_vlan_strip_queue_set(dev, i, 1); ++ txgbe_vlan_strip_q_set(dev, i, 1); + else +- txgbe_vlan_strip_queue_set(dev, i, 0); ++ txgbe_vlan_strip_q_set(dev, i, 0); + } + } + +@@ -1317,6 +1310,13 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) + static int + txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) + { ++ struct txgbe_hw *hw = TXGBE_DEV_HW(dev); ++ ++ if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return -EPERM; ++ } ++ + txgbe_config_vlan_strip_on_all_queues(dev, mask); + + txgbe_vlan_offload_config(dev, mask); +@@ -1494,6 +1494,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } @@ -57018,7 +73170,7 @@ index 86ef979b29..001f8c6473 100644 } return 0; } -@@ -1530,6 +1547,25 @@ txgbe_dev_configure(struct rte_eth_dev *dev) +@@ -1530,6 +1543,25 @@ txgbe_dev_configure(struct rte_eth_dev *dev) return 0; } @@ -57044,7 +73196,7 @@ index 86ef979b29..001f8c6473 100644 static void txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) { -@@ -1647,7 +1683,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1647,7 +1679,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Stop the link setup handler before resetting the HW. */ @@ -57053,10 +73205,12 @@ index 86ef979b29..001f8c6473 100644 /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); -@@ -1668,6 +1704,10 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1668,6 +1700,12 @@ txgbe_dev_start(struct rte_eth_dev *dev) hw->mac.get_link_status = true; hw->dev_start = true; ++ txgbe_set_pcie_master(hw, true); ++ + /* workaround for GPIO intr lost when mng_veto bit is set */ + if (txgbe_check_reset_blocked(hw)) + txgbe_reinit_gpio_intr(hw); @@ -57064,7 +73218,7 @@ index 86ef979b29..001f8c6473 100644 /* configure PF module if SRIOV enabled */ txgbe_pf_host_configure(dev); -@@ -1786,6 +1826,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1786,6 +1824,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed = (TXGBE_LINK_SPEED_100M_FULL | TXGBE_LINK_SPEED_1GB_FULL | TXGBE_LINK_SPEED_10GB_FULL); @@ -57072,7 +73226,7 @@ index 86ef979b29..001f8c6473 100644 } else { if (*link_speeds & RTE_ETH_LINK_SPEED_10G) speed |= TXGBE_LINK_SPEED_10GB_FULL; -@@ -1797,6 +1838,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1797,6 +1836,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed |= TXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & RTE_ETH_LINK_SPEED_100M) speed |= TXGBE_LINK_SPEED_100M_FULL; @@ -57080,7 +73234,7 @@ index 86ef979b29..001f8c6473 100644 } err = hw->mac.setup_link(hw, speed, link_up); -@@ -1875,15 +1917,19 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1875,15 +1915,19 @@ txgbe_dev_stop(struct rte_eth_dev *dev) struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); if (hw->adapter_stopped) @@ -57102,7 +73256,7 @@ index 86ef979b29..001f8c6473 100644 /* reset the NIC */ txgbe_pf_reset_hw(hw); hw->adapter_stopped = 0; -@@ -1894,14 +1940,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1894,14 +1938,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; @@ -57117,7 +73271,13 @@ index 86ef979b29..001f8c6473 100644 txgbe_dev_clear_queues(dev); /* Clear stored conf */ -@@ -1932,6 +1970,16 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1928,10 +1964,22 @@ txgbe_dev_stop(struct rte_eth_dev *dev) + adapter->rss_reta_updated = 0; + wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); + ++ txgbe_set_pcie_master(hw, true); ++ + hw->adapter_stopped = true; dev->data->dev_started = 0; hw->dev_start = false; @@ -57134,7 +73294,7 @@ index 86ef979b29..001f8c6473 100644 return 0; } -@@ -1991,6 +2039,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -1991,12 +2039,17 @@ txgbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -57144,7 +73304,15 @@ index 86ef979b29..001f8c6473 100644 txgbe_pf_reset_hw(hw); ret = txgbe_dev_stop(dev); -@@ -2019,8 +2070,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) + + txgbe_dev_free_queues(dev); + ++ txgbe_set_pcie_master(hw, false); ++ + /* reprogram the RAR[0] in case user changed it. */ + txgbe_set_rar(hw, 0, hw->mac.addr, 0, true); + +@@ -2019,8 +2072,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_delay_ms(100); } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); @@ -57155,7 +73323,18 @@ index 86ef979b29..001f8c6473 100644 /* uninitialize PF if max_vfs not zero */ txgbe_pf_host_uninit(dev); -@@ -2690,11 +2742,52 @@ txgbe_dev_setup_link_alarm_handler(void *param) +@@ -2605,7 +2659,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; +- dev_info->max_rx_pktlen = 15872; ++ dev_info->max_rx_pktlen = TXGBE_MAX_MTU + TXGBE_ETH_OVERHEAD; ++ dev_info->min_mtu = RTE_ETHER_MIN_MTU; ++ dev_info->max_mtu = TXGBE_MAX_MTU; + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = pci_dev->max_vfs; +@@ -2690,11 +2746,52 @@ txgbe_dev_setup_link_alarm_handler(void *param) intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; } @@ -57208,7 +73387,15 @@ index 86ef979b29..001f8c6473 100644 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct rte_eth_link link; u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; -@@ -2731,10 +2824,24 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2702,6 +2799,7 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, + bool link_up; + int err; + int wait = 1; ++ u32 reg; + + memset(&link, 0, sizeof(link)); + link.link_status = RTE_ETH_LINK_DOWN; +@@ -2731,10 +2829,24 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, if ((hw->subsystem_device_id & 0xFF) == TXGBE_DEV_ID_KR_KX_KX4) { hw->mac.bp_down_event(hw); @@ -57237,19 +73424,24 @@ index 86ef979b29..001f8c6473 100644 } return rte_eth_linkstatus_set(dev, &link); } else if (!hw->dev_start) { -@@ -2773,6 +2880,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2773,6 +2885,16 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, break; } + /* Re configure MAC RX */ -+ if (hw->mac.type == txgbe_mac_raptor) ++ if (hw->mac.type == txgbe_mac_raptor) { ++ reg = rd32(hw, TXGBE_MACRXCFG); ++ wr32(hw, TXGBE_MACRXCFG, reg); + wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_PROMISC, + TXGBE_MACRXFLT_PROMISC); ++ reg = rd32(hw, TXGBE_MAC_WDG_TIMEOUT); ++ wr32(hw, TXGBE_MAC_WDG_TIMEOUT, reg); ++ } + return rte_eth_linkstatus_set(dev, &link); } -@@ -2949,9 +3061,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2949,9 +3071,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) wr32(hw, TXGBE_PX_INTA, 1); @@ -57259,7 +73451,7 @@ index 86ef979b29..001f8c6473 100644 /* read-on-clear nic registers here */ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; PMD_DRV_LOG(DEBUG, "eicr %x", eicr); -@@ -2974,6 +3083,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2974,6 +3093,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, if (eicr & TXGBE_ICRMISC_GPIO) intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; @@ -57268,7 +73460,7 @@ index 86ef979b29..001f8c6473 100644 return 0; } -@@ -3143,7 +3254,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) +@@ -3143,7 +3264,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) } /* restore original mask */ @@ -57278,8 +73470,118 @@ index 86ef979b29..001f8c6473 100644 intr->mask = intr->mask_orig; intr->mask_orig = 0; +@@ -3481,12 +3603,8 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + return -EINVAL; + } + +- if (hw->mode) +- wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, +- TXGBE_FRAME_SIZE_MAX); +- else +- wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, +- TXGBE_FRMSZ_MAX(frame_size)); ++ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK, ++ TXGBE_FRMSZ_MAX(frame_size)); + + return 0; + } +@@ -3637,13 +3755,13 @@ txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (queue_id < 32) { +- mask = rd32(hw, TXGBE_IMS(0)); +- mask &= (1 << queue_id); +- wr32(hw, TXGBE_IMS(0), mask); ++ mask = rd32(hw, TXGBE_IMC(0)); ++ mask |= (1 << queue_id); ++ wr32(hw, TXGBE_IMC(0), mask); + } else if (queue_id < 64) { +- mask = rd32(hw, TXGBE_IMS(1)); +- mask &= (1 << (queue_id - 32)); +- wr32(hw, TXGBE_IMS(1), mask); ++ mask = rd32(hw, TXGBE_IMC(1)); ++ mask |= (1 << (queue_id - 32)); ++ wr32(hw, TXGBE_IMC(1), mask); + } + rte_intr_enable(intr_handle); + +@@ -3658,11 +3776,11 @@ txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) + + if (queue_id < 32) { + mask = rd32(hw, TXGBE_IMS(0)); +- mask &= ~(1 << queue_id); ++ mask |= (1 << queue_id); + wr32(hw, TXGBE_IMS(0), mask); + } else if (queue_id < 64) { + mask = rd32(hw, TXGBE_IMS(1)); +- mask &= ~(1 << (queue_id - 32)); ++ mask |= (1 << (queue_id - 32)); + wr32(hw, TXGBE_IMS(1), mask); + } + +@@ -3696,7 +3814,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, + wr32(hw, TXGBE_IVARMISC, tmp); + } else { + /* rx or tx causes */ +- /* Workaround for ICR lost */ ++ msix_vector |= TXGBE_IVAR_VLD; /* Workaround for ICR lost */ + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); + tmp &= ~(0xFF << idx); +@@ -3802,6 +3920,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, + struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); + uint32_t syn_info; + uint32_t synqf; ++ uint16_t queue; + + if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; +@@ -3811,7 +3930,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, + if (add) { + if (syn_info & TXGBE_SYNCLS_ENA) + return -EINVAL; +- synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue); ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue; ++ else ++ queue = filter->queue; ++ synqf = (uint32_t)TXGBE_SYNCLS_QPID(queue); + synqf |= TXGBE_SYNCLS_ENA; + + if (filter->hig_pri) +@@ -3880,7 +4003,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + wr32(hw, TXGBE_5TFPORT(i), sdpqf); + wr32(hw, TXGBE_5TFCTL0(i), ftqf); + +- l34timir |= TXGBE_5TFCTL1_QP(filter->queue); ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ l34timir |= TXGBE_5TFCTL1_QP(RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue); ++ else ++ l34timir |= TXGBE_5TFCTL1_QP(filter->queue); + wr32(hw, TXGBE_5TFCTL1(i), l34timir); + } + +@@ -4164,7 +4290,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + if (add) { + etqf = TXGBE_ETFLT_ENA; + etqf |= TXGBE_ETFLT_ETID(filter->ether_type); +- etqs |= TXGBE_ETCLS_QPID(filter->queue); ++ if (RTE_ETH_DEV_SRIOV(dev).active) { ++ int pool, queue; ++ ++ pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx; ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue; ++ etqf |= TXGBE_ETFLT_POOLENA; ++ etqf |= TXGBE_ETFLT_POOL(pool); ++ etqs |= TXGBE_ETCLS_QPID(queue); ++ } else { ++ etqs |= TXGBE_ETCLS_QPID(filter->queue); ++ } + etqs |= TXGBE_ETCLS_QENA; + + ethertype_filter.ethertype = filter->ether_type; diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.h b/dpdk/drivers/net/txgbe/txgbe_ethdev.h -index 6a18865e23..545ce4c9e1 100644 +index 6a18865e23..4625236cb7 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.h +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.h @@ -40,6 +40,7 @@ @@ -57290,6 +73592,15 @@ index 6a18865e23..545ce4c9e1 100644 #ifndef NBBY #define NBBY 8 /* number of bits in a byte */ +@@ -54,7 +55,7 @@ + #define TXGBE_5TUPLE_MAX_PRI 7 + #define TXGBE_5TUPLE_MIN_PRI 1 + +- ++#define TXGBE_MAX_MTU 9414 + /* The overhead from MTU to max frame size. */ + #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + @@ -369,6 +370,9 @@ struct txgbe_adapter { /* For RSS reta table update */ @@ -57311,7 +73622,7 @@ index 6a18865e23..545ce4c9e1 100644 void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c -index 3b1f7c913b..f1341fbf7e 100644 +index 3b1f7c913b..92603fccc2 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev_vf.c @@ -165,6 +165,7 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev) @@ -57330,7 +73641,16 @@ index 3b1f7c913b..f1341fbf7e 100644 rte_eth_copy_pci_info(eth_dev, pci_dev); hw->device_id = pci_dev->id.device_id; -@@ -618,7 +620,7 @@ txgbevf_dev_start(struct rte_eth_dev *dev) +@@ -293,6 +295,8 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev) + err = hw->mac.start_hw(hw); + if (err) { + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", err); ++ rte_free(eth_dev->data->mac_addrs); ++ eth_dev->data->mac_addrs = NULL; + return -EIO; + } + +@@ -618,7 +622,7 @@ txgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Stop the link setup handler before resetting the HW. */ @@ -57339,7 +73659,27 @@ index 3b1f7c913b..f1341fbf7e 100644 err = hw->mac.reset_hw(hw); if (err) { -@@ -720,7 +722,7 @@ txgbevf_dev_stop(struct rte_eth_dev *dev) +@@ -668,8 +672,10 @@ txgbevf_dev_start(struct rte_eth_dev *dev) + * now only one vector is used for Rx queue + */ + intr_vector = 1; +- if (rte_intr_efd_enable(intr_handle, intr_vector)) ++ if (rte_intr_efd_enable(intr_handle, intr_vector)) { ++ txgbe_dev_clear_queues(dev); + return -1; ++ } + } + + if (rte_intr_dp_is_en(intr_handle)) { +@@ -677,6 +683,7 @@ txgbevf_dev_start(struct rte_eth_dev *dev) + dev->data->nb_rx_queues)) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); ++ txgbe_dev_clear_queues(dev); + return -ENOMEM; + } + } +@@ -720,7 +727,7 @@ txgbevf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -57348,6 +73688,64 @@ index 3b1f7c913b..f1341fbf7e 100644 txgbevf_intr_disable(dev); +@@ -963,7 +970,7 @@ txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction, + wr32(hw, TXGBE_VFIVARMISC, tmp); + } else { + /* rx or tx cause */ +- /* Workaround for ICR lost */ ++ msix_vector |= TXGBE_VFIVAR_VLD; /* Workaround for ICR lost */ + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1)); + tmp &= ~(0xFF << idx); +@@ -1199,9 +1206,13 @@ static int + txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) + { + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); ++ int mode = TXGBEVF_XCAST_MODE_NONE; + int ret; + +- switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_NONE)) { ++ if (dev->data->all_multicast) ++ mode = TXGBEVF_XCAST_MODE_ALLMULTI; ++ ++ switch (hw->mac.update_xcast_mode(hw, mode)) { + case 0: + ret = 0; + break; +@@ -1222,6 +1233,9 @@ txgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret; + ++ if (dev->data->promiscuous) ++ return 0; ++ + switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_ALLMULTI)) { + case 0: + ret = 0; +@@ -1243,6 +1257,9 @@ txgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + int ret; + ++ if (dev->data->promiscuous) ++ return 0; ++ + switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_MULTI)) { + case 0: + ret = 0; +diff --git a/dpdk/drivers/net/txgbe/txgbe_fdir.c b/dpdk/drivers/net/txgbe/txgbe_fdir.c +index a198b6781b..f627ab681d 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_fdir.c ++++ b/dpdk/drivers/net/txgbe/txgbe_fdir.c +@@ -844,6 +844,9 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev, + return -EINVAL; + } + ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue; ++ + node = txgbe_fdir_filter_lookup(info, &rule->input); + if (node) { + if (!update) { diff --git a/dpdk/drivers/net/txgbe/txgbe_ptypes.c b/dpdk/drivers/net/txgbe/txgbe_ptypes.c index e1299d7363..c444d5d3f1 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ptypes.c @@ -57397,7 +73795,7 @@ index fa6c347d53..6fa8147f05 100644 + #endif /* _TXGBE_PTYPE_H_ */ diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c -index ac1bba08a3..24fc34d3c4 100644 +index ac1bba08a3..b0ec1c96d7 100644 --- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c +++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c @@ -516,20 +516,21 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) @@ -57427,15 +73825,35 @@ index ac1bba08a3..24fc34d3c4 100644 ptype |= RTE_PTYPE_L2_ETHER_VLAN; /* L3 level */ -@@ -571,7 +572,6 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) - ptype |= RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | - RTE_PTYPE_TUNNEL_GRE; +@@ -563,30 +564,30 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) + switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) { + case RTE_MBUF_F_TX_TUNNEL_VXLAN: + case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_GRENAT; ++ ptype |= RTE_PTYPE_TUNNEL_GRENAT; + break; + case RTE_MBUF_F_TX_TUNNEL_GRE: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_GRE; - ptype |= RTE_PTYPE_INNER_L2_ETHER; ++ ptype |= RTE_PTYPE_TUNNEL_GRE; break; case RTE_MBUF_F_TX_TUNNEL_GENEVE: - ptype |= RTE_PTYPE_L2_ETHER | -@@ -587,6 +587,16 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_GENEVE; +- ptype |= RTE_PTYPE_INNER_L2_ETHER; ++ ptype |= RTE_PTYPE_TUNNEL_GENEVE; + break; + case RTE_MBUF_F_TX_TUNNEL_IPIP: + case RTE_MBUF_F_TX_TUNNEL_IP: +- ptype |= RTE_PTYPE_L2_ETHER | +- RTE_PTYPE_L3_IPV4 | +- RTE_PTYPE_TUNNEL_IP; ++ ptype |= RTE_PTYPE_TUNNEL_IP; break; } @@ -57452,52 +73870,109 @@ index ac1bba08a3..24fc34d3c4 100644 return txgbe_encode_ptype(ptype); } -@@ -694,22 +704,24 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) +@@ -657,11 +658,20 @@ txgbe_xmit_cleanup(struct txgbe_tx_queue *txq) + return 0; + } + ++#define GRE_CHECKSUM_PRESENT 0x8000 ++#define GRE_KEY_PRESENT 0x2000 ++#define GRE_SEQUENCE_PRESENT 0x1000 ++#define GRE_EXT_LEN 4 ++#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\ ++ GRE_SEQUENCE_PRESENT) ++ + static inline uint8_t + txgbe_get_tun_len(struct rte_mbuf *mbuf) + { + struct txgbe_genevehdr genevehdr; + const struct txgbe_genevehdr *gh; ++ const struct txgbe_grehdr *grh; ++ struct txgbe_grehdr grehdr; + uint8_t tun_len; + + switch (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { +@@ -674,11 +684,16 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) + + sizeof(struct txgbe_vxlanhdr); + break; + case RTE_MBUF_F_TX_TUNNEL_GRE: +- tun_len = sizeof(struct txgbe_nvgrehdr); ++ tun_len = sizeof(struct txgbe_grehdr); ++ grh = rte_pktmbuf_read(mbuf, ++ mbuf->outer_l2_len + mbuf->outer_l3_len, ++ sizeof(grehdr), &grehdr); ++ if (grh->flags & rte_cpu_to_be_16(GRE_SUPPORTED_FIELDS)) ++ tun_len += GRE_EXT_LEN; + break; + case RTE_MBUF_F_TX_TUNNEL_GENEVE: +- gh = rte_pktmbuf_read(mbuf, +- mbuf->outer_l2_len + mbuf->outer_l3_len, ++ gh = rte_pktmbuf_read(mbuf, mbuf->outer_l2_len + ++ mbuf->outer_l3_len + sizeof(struct txgbe_udphdr), + sizeof(genevehdr), &genevehdr); + tun_len = sizeof(struct txgbe_udphdr) + + sizeof(struct txgbe_genevehdr) +@@ -692,25 +707,26 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) + } + static inline uint8_t - txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) +-txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) ++txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt, uint8_t tun_len) { - uint64_t l2_none, l2_mac, l2_mac_vlan; -+ uint64_t l2_vxlan, l2_vxlan_mac, l2_vxlan_mac_vlan; -+ uint64_t l2_gre, l2_gre_mac, l2_gre_mac_vlan; ++ uint64_t inner_l2_len; uint8_t ptid = 0; - if ((tx_pkt->ol_flags & (RTE_MBUF_F_TX_TUNNEL_VXLAN | - RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) == 0) - return ptid; -+ l2_vxlan = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); -+ l2_vxlan_mac = l2_vxlan + sizeof(struct rte_ether_hdr); -+ l2_vxlan_mac_vlan = l2_vxlan_mac + sizeof(struct rte_vlan_hdr); ++ inner_l2_len = tx_pkt->l2_len - tun_len; - l2_none = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); - l2_mac = l2_none + sizeof(struct rte_ether_hdr); - l2_mac_vlan = l2_mac + sizeof(struct rte_vlan_hdr); -+ l2_gre = sizeof(struct txgbe_grehdr); -+ l2_gre_mac = l2_gre + sizeof(struct rte_ether_hdr); -+ l2_gre_mac_vlan = l2_gre_mac + sizeof(struct rte_vlan_hdr); - +- - if (tx_pkt->l2_len == l2_none) -+ if (tx_pkt->l2_len == l2_vxlan || tx_pkt->l2_len == l2_gre) ++ switch (inner_l2_len) { ++ case 0: ptid = TXGBE_PTID_TUN_EIG; - else if (tx_pkt->l2_len == l2_mac) -+ else if (tx_pkt->l2_len == l2_vxlan_mac || tx_pkt->l2_len == l2_gre_mac) ++ break; ++ case sizeof(struct rte_ether_hdr): ptid = TXGBE_PTID_TUN_EIGM; - else if (tx_pkt->l2_len == l2_mac_vlan) -+ else if (tx_pkt->l2_len == l2_vxlan_mac_vlan || -+ tx_pkt->l2_len == l2_gre_mac_vlan) ++ break; ++ case sizeof(struct rte_ether_hdr) + sizeof(struct rte_vlan_hdr): ptid = TXGBE_PTID_TUN_EIGMV; ++ break; ++ default: ++ ptid = TXGBE_PTID_TUN_EI; ++ } return ptid; -@@ -776,8 +788,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + } +@@ -776,10 +792,7 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* If hardware offload required */ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; if (tx_ol_req) { - tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req, - tx_pkt->packet_type); +- if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) +- tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); + tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req); - if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) - tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); tx_offload.l2_len = tx_pkt->l2_len; -@@ -1465,11 +1476,22 @@ txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; +@@ -788,6 +801,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt); ++ if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) ++ tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt, ++ tx_offload.outer_tun_len); + + #ifdef RTE_LIB_SECURITY + if (use_ipsec) { +@@ -1465,11 +1481,22 @@ txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * of accesses cannot be reordered by the compiler. If they were * not volatile, they could be reordered which could lead to * using invalid descriptor fields when read from rxd. @@ -57520,7 +73995,7 @@ index ac1bba08a3..24fc34d3c4 100644 rxd = *rxdp; /* -@@ -1715,32 +1737,10 @@ txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, +@@ -1715,32 +1742,10 @@ txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, next_desc: /* @@ -57557,7 +74032,7 @@ index ac1bba08a3..24fc34d3c4 100644 */ rxdp = &rx_ring[rx_id]; staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status); -@@ -1748,6 +1748,12 @@ next_desc: +@@ -1748,6 +1753,12 @@ next_desc: if (!(staterr & TXGBE_RXD_STAT_DD)) break; @@ -57570,7 +74045,47 @@ index ac1bba08a3..24fc34d3c4 100644 rxd = *rxdp; PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " -@@ -2795,6 +2801,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2124,6 +2135,7 @@ txgbe_tx_queue_release(struct txgbe_tx_queue *txq) + if (txq != NULL && txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); ++ rte_memzone_free(txq->mz); + rte_free(txq); + } + } +@@ -2335,6 +2347,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ txq->mz = tz; + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; +@@ -2452,6 +2465,7 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq) + txgbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq->sw_sc_ring); ++ rte_memzone_free(rxq->mz); + rte_free(rxq); + } + } +@@ -2545,6 +2559,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; ++ rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + } +@@ -2625,6 +2640,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ rxq->mz = rz; + /* + * Zero init all the descriptors in the ring. + */ +@@ -2795,6 +2811,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txq->ops->release_mbufs(txq); txq->ops->reset(txq); } @@ -57579,7 +74094,7 @@ index ac1bba08a3..24fc34d3c4 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { -@@ -2804,6 +2812,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2804,6 +2822,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txgbe_rx_queue_release_mbufs(rxq); txgbe_reset_rx_queue(adapter, rxq); } @@ -57588,7 +74103,7 @@ index ac1bba08a3..24fc34d3c4 100644 } } -@@ -4382,7 +4392,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) +@@ -4382,7 +4402,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM); @@ -57597,7 +74112,7 @@ index ac1bba08a3..24fc34d3c4 100644 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size); wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl); -@@ -4994,6 +5004,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -4994,6 +5014,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); @@ -57606,7 +74121,7 @@ index ac1bba08a3..24fc34d3c4 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; -@@ -5008,6 +5020,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -5008,6 +5030,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); @@ -57615,6 +74130,48 @@ index ac1bba08a3..24fc34d3c4 100644 rte_wmb(); wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1); } +@@ -5055,6 +5079,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, + uint32_t reta; + uint16_t i; + uint16_t j; ++ uint16_t queue; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, +@@ -5087,7 +5112,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, + for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) { + if (j == conf->conf.queue_num) + j = 0; +- reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF); ++ if (RTE_ETH_DEV_SRIOV(dev).active) ++ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + ++ conf->conf.queue[j]; ++ else ++ queue = conf->conf.queue[j]; ++ reta = (reta >> 8) | LS32(queue, 24, 0xFF); + if ((i & 3) == 3) + wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta); + } +diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.h b/dpdk/drivers/net/txgbe/txgbe_rxtx.h +index 27d4c842c0..c579e1a9f2 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_rxtx.h ++++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.h +@@ -314,6 +314,7 @@ struct txgbe_rx_queue { + struct rte_mbuf fake_mbuf; + /** hold packets to return to application */ + struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST * 2]; ++ const struct rte_memzone *mz; + }; + + /** +@@ -402,6 +403,7 @@ struct txgbe_tx_queue { + uint8_t using_ipsec; + /**< indicates that IPsec TX feature is in use */ + #endif ++ const struct rte_memzone *mz; + }; + + struct txgbe_txq_ops { diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c index b152279fac..acba97dba3 100644 --- a/dpdk/drivers/net/vhost/rte_eth_vhost.c @@ -58152,10 +74709,19 @@ index b152279fac..acba97dba3 100644 return 0; diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c -index 760ba4e368..5e9ed47551 100644 +index 760ba4e368..1d9a168f92 100644 --- a/dpdk/drivers/net/virtio/virtio_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_ethdev.c -@@ -1797,22 +1797,25 @@ static int +@@ -1320,6 +1320,8 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, + struct virtio_net_ctrl_mac *tbl + = rte_is_multicast_ether_addr(addr) ? mc : uc; + ++ if (rte_is_zero_ether_addr(addr)) ++ break; + memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN); + } + +@@ -1797,22 +1799,25 @@ static int virtio_configure_intr(struct rte_eth_dev *dev) { struct virtio_hw *hw = dev->data->dev_private; @@ -58186,7 +74752,7 @@ index 760ba4e368..5e9ed47551 100644 } if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { -@@ -1833,12 +1836,13 @@ virtio_configure_intr(struct rte_eth_dev *dev) +@@ -1833,12 +1838,13 @@ virtio_configure_intr(struct rte_eth_dev *dev) */ if (virtio_intr_enable(dev) < 0) { PMD_DRV_LOG(ERR, "interrupt enable failed"); @@ -58203,7 +74769,7 @@ index 760ba4e368..5e9ed47551 100644 } return 0; -@@ -2161,7 +2165,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) +@@ -2161,7 +2167,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) eth_dev->device->numa_node); if (!hw->rss_key) { PMD_INIT_LOG(ERR, "Failed to allocate RSS key"); @@ -58212,7 +74778,7 @@ index 760ba4e368..5e9ed47551 100644 } } -@@ -2183,7 +2187,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) +@@ -2183,7 +2189,7 @@ virtio_dev_rss_init(struct rte_eth_dev *eth_dev) eth_dev->device->numa_node); if (!hw->rss_reta) { PMD_INIT_LOG(ERR, "Failed to allocate RSS reta"); @@ -58221,7 +74787,7 @@ index 760ba4e368..5e9ed47551 100644 } hw->rss_rx_queues = 0; -@@ -2223,7 +2227,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) +@@ -2223,7 +2229,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) /* Tell the host we've known how to drive the device. */ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); if (virtio_ethdev_negotiate_features(hw, req_features) < 0) @@ -58230,6 +74796,15 @@ index 760ba4e368..5e9ed47551 100644 hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM); +@@ -2233,8 +2239,6 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + else + eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; + +- eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; +- + /* Setting up rx_header size for the device */ + if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || + virtio_with_feature(hw, VIRTIO_F_VERSION_1) || @@ -2305,7 +2309,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (config->mtu < RTE_ETHER_MIN_MTU) { PMD_INIT_LOG(ERR, "invalid max MTU value (%u)", @@ -58454,6 +75029,20 @@ index d9d40832e0..c3e686cf0c 100644 tmp = (tmp & 0xffff) + (tmp >> 16); new_cksum = tmp; +diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c +index 3c05ac9cc0..c10252506b 100644 +--- a/dpdk/drivers/net/virtio/virtio_user/vhost_user.c ++++ b/dpdk/drivers/net/virtio/virtio_user/vhost_user.c +@@ -128,7 +128,8 @@ vhost_user_write(int fd, struct vhost_user_msg *msg, int *fds, int fd_num) + cmsg->cmsg_len = CMSG_LEN(fd_size); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; +- memcpy(CMSG_DATA(cmsg), fds, fd_size); ++ if (fd_size > 0) ++ memcpy(CMSG_DATA(cmsg), fds, fd_size); + + do { + r = sendmsg(fd, &msgh, 0); diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c index 19599aa3f6..697a8dcd6b 100644 --- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -58515,10 +75104,26 @@ index f5d8b40cad..5c9230cfe1 100644 idx++; if (idx >= vq->vq_nentries) { diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c -index fd946dec5c..a0959b0c80 100644 +index fd946dec5c..c1c7539fff 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c -@@ -957,6 +957,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -257,6 +257,7 @@ vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw) + vmxnet3_disable_intr(hw, i); + } + ++#ifndef RTE_EXEC_ENV_FREEBSD + /* + * Enable all intrs used by the device + */ +@@ -280,6 +281,7 @@ vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw) + vmxnet3_enable_intr(hw, i); + } + } ++#endif + + /* + * Gets tx data ring descriptor size. +@@ -957,6 +959,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) { int ret; struct vmxnet3_hw *hw = dev->data->dev_private; @@ -58526,7 +75131,23 @@ index fd946dec5c..a0959b0c80 100644 PMD_INIT_FUNC_TRACE(); -@@ -1058,6 +1059,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -1035,6 +1038,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + /* Setting proper Rx Mode and issue Rx Mode Update command */ + vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); + ++#ifndef RTE_EXEC_ENV_FREEBSD + /* Setup interrupt callback */ + rte_intr_callback_register(dev->intr_handle, + vmxnet3_interrupt_handler, dev); +@@ -1046,6 +1050,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + + /* enable all intrs */ + vmxnet3_enable_all_intrs(hw); ++#endif + + vmxnet3_process_events(dev); + +@@ -1058,6 +1063,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) */ __vmxnet3_dev_link_update(dev, 0); @@ -58538,7 +75159,7 @@ index fd946dec5c..a0959b0c80 100644 return VMXNET3_SUCCESS; } -@@ -1070,6 +1076,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) +@@ -1070,6 +1080,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) struct rte_eth_link link; struct vmxnet3_hw *hw = dev->data->dev_private; struct rte_intr_handle *intr_handle = dev->intr_handle; @@ -58546,7 +75167,7 @@ index fd946dec5c..a0959b0c80 100644 int ret; PMD_INIT_FUNC_TRACE(); -@@ -1125,6 +1132,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) +@@ -1125,6 +1136,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) hw->adapter_stopped = 1; dev->data->dev_started = 0; @@ -58558,6 +75179,33 @@ index fd946dec5c..a0959b0c80 100644 return 0; } +@@ -1810,11 +1826,13 @@ done: + static int + vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) + { ++#ifndef RTE_EXEC_ENV_FREEBSD + struct vmxnet3_hw *hw = dev->data->dev_private; + + vmxnet3_enable_intr(hw, + rte_intr_vec_list_index_get(dev->intr_handle, + queue_id)); ++#endif + + return 0; + } +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h +index 74154e3a1a..ae8542811a 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h +@@ -7,7 +7,7 @@ + + extern int vmxnet3_logtype_init; + #define PMD_INIT_LOG(level, fmt, args...) \ +- rte_log(RTE_LOG_ ## level, vmxnet3_logtype_driver, \ ++ rte_log(RTE_LOG_ ## level, vmxnet3_logtype_init, \ + "%s(): " fmt "\n", __func__, ## args) + #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c index a875ffec07..14c6504505 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -58939,6 +75587,72 @@ index 6e6624e5a3..1d84e422d4 100644 pthread_mutex_lock(&priv->steer_update_lock); mlx5_vdpa_steer_unset(priv); pthread_mutex_unlock(&priv->steer_update_lock); +diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c +index 4d819763d8..b14174e02d 100644 +--- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c ++++ b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c +@@ -244,22 +244,30 @@ mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv) + return max; + } + ++static void ++mlx5_vdpa_drain_cq_one(struct mlx5_vdpa_priv *priv, ++ struct mlx5_vdpa_virtq *virtq) ++{ ++ struct mlx5_vdpa_cq *cq = &virtq->eqp.cq; ++ ++ mlx5_vdpa_queue_complete(cq); ++ if (cq->cq_obj.cq) { ++ cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX); ++ virtq->eqp.qp_pi = 0; ++ if (!cq->armed) ++ mlx5_vdpa_cq_arm(priv, cq); ++ } ++} ++ + void + mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv) + { ++ struct mlx5_vdpa_virtq *virtq; + unsigned int i; + + for (i = 0; i < priv->caps.max_num_virtio_queues; i++) { +- struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq; +- +- mlx5_vdpa_queue_complete(cq); +- if (cq->cq_obj.cq) { +- cq->cq_obj.cqes[0].wqe_counter = +- rte_cpu_to_be_16(UINT16_MAX); +- priv->virtqs[i].eqp.qp_pi = 0; +- if (!cq->armed) +- mlx5_vdpa_cq_arm(priv, cq); +- } ++ virtq = &priv->virtqs[i]; ++ mlx5_vdpa_drain_cq_one(priv, virtq); + } + } + +@@ -658,6 +666,7 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n, + if (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) { + /* Reuse existing resources. */ + eqp->cq.callfd = callfd; ++ mlx5_vdpa_drain_cq_one(priv, virtq); + /* FW will set event qp to error state in q destroy. */ + if (reset && !mlx5_vdpa_qps2rst2rts(eqp)) + rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)), +diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c +index edb7e35c2c..7e43719f53 100644 +--- a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c ++++ b/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c +@@ -13,8 +13,6 @@ + #include "sfc_vdpa.h" + #include "sfc_vdpa_ops.h" + +-extern uint32_t sfc_logtype_driver; +- + #ifndef PAGE_SIZE + #define PAGE_SIZE (sysconf(_SC_PAGESIZE)) + #endif diff --git a/dpdk/examples/cmdline/parse_obj_list.h b/dpdk/examples/cmdline/parse_obj_list.h index 6516d3e2c2..1223ac1e8b 100644 --- a/dpdk/examples/cmdline/parse_obj_list.h @@ -59191,6 +75905,38 @@ index a80d8b3e4d..bf08d1b995 100644 } } +diff --git a/dpdk/examples/fips_validation/fips_validation_rsa.c b/dpdk/examples/fips_validation/fips_validation_rsa.c +index f675b51051..55f81860a0 100644 +--- a/dpdk/examples/fips_validation/fips_validation_rsa.c ++++ b/dpdk/examples/fips_validation/fips_validation_rsa.c +@@ -328,6 +328,9 @@ parse_test_rsa_json_interim_writeback(struct fips_val *val) + if (prepare_vec_rsa() < 0) + return -1; + ++ if (!vec.rsa.e.val) ++ return -1; ++ + writeback_hex_str("", info.one_line_text, &vec.rsa.n); + obj = json_string(info.one_line_text); + json_object_set_new(json_info.json_write_group, "n", obj); +@@ -474,7 +477,7 @@ fips_test_randomize_message(struct fips_val *msg, struct fips_val *rand) + uint16_t rv_len; + + if (!msg->val || !rand->val || rand->len > RV_BUF_LEN +- || msg->len > FIPS_TEST_JSON_BUF_LEN) ++ || msg->len > (FIPS_TEST_JSON_BUF_LEN - 1)) + return -EINVAL; + + memset(rv, 0, sizeof(rv)); +@@ -503,7 +506,7 @@ fips_test_randomize_message(struct fips_val *msg, struct fips_val *rand) + m[i + j] ^= rv[j]; + + m[i + j] = ((uint8_t *)&rv_bitlen)[0]; +- m[i + j + 1] = (((uint8_t *)&rv_bitlen)[1] >> 8) & 0xFF; ++ m[i + j + 1] = ((uint8_t *)&rv_bitlen)[1]; + + rte_free(msg->val); + msg->len = (rv_bitlen + m_bitlen + 16) / 8; diff --git a/dpdk/examples/fips_validation/fips_validation_sha.c b/dpdk/examples/fips_validation/fips_validation_sha.c index c5da2cc623..178ea492d3 100644 --- a/dpdk/examples/fips_validation/fips_validation_sha.c @@ -59350,8 +76096,21 @@ index 82d5f87c38..9817657ca9 100644 int status = rte_ring_sc_dequeue(msgq_req, (void **) &req); +diff --git a/dpdk/examples/ipsec-secgw/event_helper.h b/dpdk/examples/ipsec-secgw/event_helper.h +index af5cfcf794..5c4e260e2c 100644 +--- a/dpdk/examples/ipsec-secgw/event_helper.h ++++ b/dpdk/examples/ipsec-secgw/event_helper.h +@@ -102,7 +102,7 @@ struct eh_event_link_info { + /**< Event port ID */ + uint8_t eventq_id; + /**< Event queue to be linked to the port */ +- uint8_t lcore_id; ++ uint32_t lcore_id; + /**< Lcore to be polling on this port */ + }; + diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c -index a64a26c992..82a4916fb2 100644 +index a64a26c992..9620d73fc8 100644 --- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c +++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -99,10 +99,10 @@ uint32_t qp_desc_nb = 2048; @@ -59369,7 +76128,144 @@ index a64a26c992..82a4916fb2 100644 }; struct offloads tx_offloads; -@@ -1427,9 +1427,8 @@ add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) +@@ -220,8 +220,8 @@ static const char *cfgfile; + + struct lcore_params { + uint16_t port_id; +- uint8_t queue_id; +- uint8_t lcore_id; ++ uint16_t queue_id; ++ uint32_t lcore_id; + } __rte_cache_aligned; + + static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; +@@ -568,7 +568,7 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, + + static inline void + process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, +- uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx) ++ uint16_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx) + { + struct ipsec_traffic traffic; + +@@ -695,9 +695,7 @@ ipsec_poll_mode_worker(void) + struct rte_mbuf *pkts[MAX_PKT_BURST]; + uint32_t lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc; +- int32_t i, nb_rx; +- uint16_t portid; +- uint8_t queueid; ++ uint16_t i, nb_rx, portid, queueid; + struct lcore_conf *qconf; + int32_t rc, socket_id; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) +@@ -744,7 +742,7 @@ ipsec_poll_mode_worker(void) + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + +@@ -789,8 +787,7 @@ int + check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) + { + uint16_t i; +- uint16_t portid; +- uint8_t queueid; ++ uint16_t portid, queueid; + + for (i = 0; i < nb_lcore_params; ++i) { + portid = lcore_params_array[i].port_id; +@@ -810,7 +807,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) + static int32_t + check_poll_mode_params(struct eh_conf *eh_conf) + { +- uint8_t lcore; ++ uint32_t lcore; + uint16_t portid; + uint16_t i; + int32_t socket_id; +@@ -829,13 +826,13 @@ check_poll_mode_params(struct eh_conf *eh_conf) + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { +- printf("error: lcore %hhu is not enabled in " ++ printf("error: lcore %u is not enabled in " + "lcore mask\n", lcore); + return -1; + } + socket_id = rte_lcore_to_socket_id(lcore); + if (socket_id != 0 && numa_on == 0) { +- printf("warning: lcore %hhu is on socket %d " ++ printf("warning: lcore %u is on socket %d " + "with numa off\n", + lcore, socket_id); + } +@@ -852,7 +849,7 @@ check_poll_mode_params(struct eh_conf *eh_conf) + return 0; + } + +-static uint8_t ++static uint16_t + get_port_nb_rx_queues(const uint16_t port) + { + int32_t queue = -1; +@@ -863,14 +860,14 @@ get_port_nb_rx_queues(const uint16_t port) + lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } +- return (uint8_t)(++queue); ++ return (uint16_t)(++queue); + } + + static int32_t + init_lcore_rx_queues(void) + { + uint16_t i, nb_rx_queue; +- uint8_t lcore; ++ uint32_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; +@@ -1051,6 +1048,11 @@ parse_config(const char *q_arg) + char *str_fld[_NUM_FLD]; + int32_t i; + uint32_t size; ++ uint32_t max_fld[_NUM_FLD] = { ++ RTE_MAX_ETHPORTS, ++ RTE_MAX_QUEUES_PER_PORT, ++ RTE_MAX_LCORE ++ }; + + nb_lcore_params = 0; + +@@ -1071,7 +1073,7 @@ parse_config(const char *q_arg) + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); +- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) ++ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i]) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { +@@ -1080,11 +1082,11 @@ parse_config(const char *q_arg) + return -1; + } + lcore_params_array[nb_lcore_params].port_id = +- (uint8_t)int_fld[FLD_PORT]; ++ (uint16_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = +- (uint8_t)int_fld[FLD_QUEUE]; ++ (uint16_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = +- (uint8_t)int_fld[FLD_LCORE]; ++ (uint32_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; +@@ -1427,9 +1429,8 @@ add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) if (port >= RTE_DIM(ethaddr_tbl)) return -EINVAL; @@ -59381,7 +76277,7 @@ index a64a26c992..82a4916fb2 100644 return 0; } -@@ -1700,6 +1699,9 @@ cryptodevs_init(enum eh_pkt_transfer_mode mode) +@@ -1700,6 +1701,9 @@ cryptodevs_init(enum eh_pkt_transfer_mode mode) total_nb_qps += qp; dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); @@ -59391,7 +76287,17 @@ index a64a26c992..82a4916fb2 100644 dev_conf.nb_queue_pairs = qp; dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; -@@ -1907,11 +1909,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, +@@ -1881,7 +1885,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf *txconf; + uint16_t nb_tx_queue, nb_rx_queue; +- uint16_t tx_queueid, rx_queueid, queue, lcore_id; ++ uint16_t tx_queueid, rx_queueid, queue; ++ uint32_t lcore_id; + int32_t ret, socket_id; + struct lcore_conf *qconf; + struct rte_ether_addr ethaddr; +@@ -1907,11 +1912,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, "Error getting MAC address (port %u): %s\n", portid, rte_strerror(-ret)); @@ -59407,6 +76313,19 @@ index a64a26c992..82a4916fb2 100644 (struct rte_ether_addr *)(val_eth + portid) + 1); print_ethaddr("Address: ", ðaddr); +@@ -2054,10 +2060,10 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, + + /* Register Rx callback if ptypes are not supported */ + if (!ptype_supported && +- !rte_eth_add_rx_callback(portid, queue, ++ !rte_eth_add_rx_callback(portid, rx_queueid, + parse_ptype_cb, NULL)) { + printf("Failed to add rx callback: port=%d, " +- "queue=%d\n", portid, queue); ++ "rx_queueid=%d\n", portid, rx_queueid); + } + + diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.h b/dpdk/examples/ipsec-secgw/ipsec-secgw.h index 0e0012d058..53665adf03 100644 --- a/dpdk/examples/ipsec-secgw/ipsec-secgw.h @@ -59420,10 +76339,66 @@ index 0e0012d058..53665adf03 100644 }; struct ipsec_spd_stats { +diff --git a/dpdk/examples/ipsec-secgw/ipsec.c b/dpdk/examples/ipsec-secgw/ipsec.c +index 9b52d37b81..1df5065ed7 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec.c ++++ b/dpdk/examples/ipsec-secgw/ipsec.c +@@ -81,7 +81,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[], + continue; + + /* Looking for cryptodev, which can handle this SA */ +- key.lcore_id = (uint8_t)lcore_id; ++ key.lcore_id = lcore_id; + key.cipher_algo = (uint8_t)sa->cipher_algo; + key.auth_algo = (uint8_t)sa->auth_algo; + key.aead_algo = (uint8_t)sa->aead_algo; +@@ -110,10 +110,21 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[], + if (cdev_id == RTE_CRYPTO_MAX_DEVS) + cdev_id = ipsec_ctx->tbl[cdev_id_qp].id; + else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) { +- RTE_LOG(ERR, IPSEC, +- "SA mapping to multiple cryptodevs is " +- "not supported!"); +- return -EINVAL; ++ struct rte_cryptodev_info dev_info_1, dev_info_2; ++ rte_cryptodev_info_get(cdev_id, &dev_info_1); ++ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, ++ &dev_info_2); ++ if (dev_info_1.driver_id == dev_info_2.driver_id) { ++ RTE_LOG(WARNING, IPSEC, ++ "SA mapped to multiple cryptodevs for SPI %d\n", ++ sa->spi); ++ ++ } else { ++ RTE_LOG(WARNING, IPSEC, ++ "SA mapped to multiple cryptodevs of different types for SPI %d\n", ++ sa->spi); ++ ++ } + } + + /* Store per core queue pair information */ +@@ -758,6 +769,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, + continue; + } + ++ RTE_ASSERT(sa->cqp[ipsec_ctx->lcore_id] != NULL); + enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop); + } + } diff --git a/dpdk/examples/ipsec-secgw/ipsec.h b/dpdk/examples/ipsec-secgw/ipsec.h -index 6bef2a7285..2890e6e267 100644 +index 6bef2a7285..13694ee1e0 100644 --- a/dpdk/examples/ipsec-secgw/ipsec.h +++ b/dpdk/examples/ipsec-secgw/ipsec.h +@@ -117,7 +117,7 @@ struct ipsec_sa { + uint32_t spi; + struct cdev_qp *cqp[RTE_MAX_LCORE]; + uint64_t seq; +- uint32_t salt; ++ rte_be32_t salt; + uint32_t fallback_sessions; + enum rte_crypto_cipher_algorithm cipher_algo; + enum rte_crypto_auth_algorithm auth_algo; @@ -249,11 +249,18 @@ struct offloads { extern struct offloads tx_offloads; @@ -59435,14 +76410,101 @@ index 6bef2a7285..2890e6e267 100644 + * (hash key calculation reads 8 bytes if this struct is size 5 bytes). + */ struct cdev_key { - uint16_t lcore_id; +- uint16_t lcore_id; ++ uint32_t lcore_id; uint8_t cipher_algo; uint8_t auth_algo; uint8_t aead_algo; -+ uint8_t padding[3]; /* padding to 8-byte size should be zeroed */ ++ uint8_t padding; /* padding to 8-byte size should be zeroed */ }; struct socket_ctx { +@@ -278,7 +285,7 @@ struct cnt_blk { + + struct lcore_rx_queue { + uint16_t port_id; +- uint8_t queue_id; ++ uint16_t queue_id; + struct rte_security_ctx *sec_ctx; + } __rte_cache_aligned; + +diff --git a/dpdk/examples/ipsec-secgw/ipsec_worker.c b/dpdk/examples/ipsec-secgw/ipsec_worker.c +index 2f02946f86..7e4db87caf 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec_worker.c ++++ b/dpdk/examples/ipsec-secgw/ipsec_worker.c +@@ -1598,8 +1598,7 @@ ipsec_poll_mode_wrkr_inl_pr(void) + int32_t socket_id; + uint32_t lcore_id; + int32_t i, nb_rx; +- uint16_t portid; +- uint8_t queueid; ++ uint16_t portid, queueid; + + prev_tsc = 0; + lcore_id = rte_lcore_id(); +@@ -1633,7 +1632,7 @@ ipsec_poll_mode_wrkr_inl_pr(void) + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + +@@ -1729,8 +1728,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void) + uint32_t i, nb_rx, j; + int32_t socket_id; + uint32_t lcore_id; +- uint16_t portid; +- uint8_t queueid; ++ uint16_t portid, queueid; + + prev_tsc = 0; + lcore_id = rte_lcore_id(); +@@ -1764,7 +1762,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void) + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + +diff --git a/dpdk/examples/ipsec-secgw/ipsec_worker.h b/dpdk/examples/ipsec-secgw/ipsec_worker.h +index d5a68d91fa..93e5470962 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec_worker.h ++++ b/dpdk/examples/ipsec-secgw/ipsec_worker.h +@@ -472,7 +472,7 @@ fail: + + static __rte_always_inline void + route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], +- uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum) ++ uint32_t nb_pkts, uint64_t tx_offloads, bool ip_cksum) + { + uint32_t hop[MAX_PKT_BURST * 2]; + uint32_t dst_ip[MAX_PKT_BURST * 2]; +@@ -560,7 +560,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], + } + + static __rte_always_inline void +-route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) ++route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts) + { + int32_t hop[MAX_PKT_BURST * 2]; + uint8_t dst_ip[MAX_PKT_BURST * 2][16]; +diff --git a/dpdk/examples/ipsec-secgw/parser.c b/dpdk/examples/ipsec-secgw/parser.c +index 98f8176651..2bd6df335b 100644 +--- a/dpdk/examples/ipsec-secgw/parser.c ++++ b/dpdk/examples/ipsec-secgw/parser.c +@@ -388,7 +388,7 @@ cfg_parse_neigh(void *parsed_result, __rte_unused struct cmdline *cl, + rc = parse_mac(res->mac, &mac); + APP_CHECK(rc == 0, st, "invalid ether addr:%s", res->mac); + rc = add_dst_ethaddr(res->port, &mac); +- APP_CHECK(rc == 0, st, "invalid port numer:%hu", res->port); ++ APP_CHECK(rc == 0, st, "invalid port number:%hu", res->port); + if (st->status < 0) + return; + } diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c index 7da9444a7b..45cd29f18b 100644 --- a/dpdk/examples/ipsec-secgw/sa.c @@ -59537,8 +76599,427 @@ index 63450537fe..4b5a032e35 100644 } static void __rte_noinline +diff --git a/dpdk/examples/l3fwd-graph/main.c b/dpdk/examples/l3fwd-graph/main.c +index 6dcb6ee92b..ec4f156881 100644 +--- a/dpdk/examples/l3fwd-graph/main.c ++++ b/dpdk/examples/l3fwd-graph/main.c +@@ -78,7 +78,7 @@ static uint32_t enabled_port_mask; + + struct lcore_rx_queue { + uint16_t port_id; +- uint8_t queue_id; ++ uint16_t queue_id; + char node_name[RTE_NODE_NAMESIZE]; + }; + +@@ -96,8 +96,8 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + + struct lcore_params { + uint16_t port_id; +- uint8_t queue_id; +- uint8_t lcore_id; ++ uint16_t queue_id; ++ uint32_t lcore_id; + } __rte_cache_aligned; + + static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; +@@ -150,19 +150,19 @@ static struct ipv4_l3fwd_lpm_route ipv4_l3fwd_lpm_route_array[] = { + static int + check_lcore_params(void) + { +- uint8_t queue, lcore; ++ uint16_t queue, i; + int socketid; +- uint16_t i; ++ uint32_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + queue = lcore_params[i].queue_id; + if (queue >= MAX_RX_QUEUE_PER_PORT) { +- printf("Invalid queue number: %hhu\n", queue); ++ printf("Invalid queue number: %" PRIu16 "\n", queue); + return -1; + } + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { +- printf("Error: lcore %hhu is not enabled in lcore mask\n", ++ printf("Error: lcore %u is not enabled in lcore mask\n", + lcore); + return -1; + } +@@ -173,7 +173,7 @@ check_lcore_params(void) + } + socketid = rte_lcore_to_socket_id(lcore); + if ((socketid != 0) && (numa_on == 0)) { +- printf("Warning: lcore %hhu is on socket %d with numa off\n", ++ printf("Warning: lcore %u is on socket %d with numa off\n", + lcore, socketid); + } + } +@@ -202,7 +202,7 @@ check_port_config(void) + return 0; + } + +-static uint8_t ++static uint16_t + get_port_n_rx_queues(const uint16_t port) + { + int queue = -1; +@@ -220,14 +220,14 @@ get_port_n_rx_queues(const uint16_t port) + } + } + +- return (uint8_t)(++queue); ++ return (uint16_t)(++queue); + } + + static int + init_lcore_rx_queues(void) + { + uint16_t i, nb_rx_queue; +- uint8_t lcore; ++ uint32_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; +@@ -235,7 +235,7 @@ init_lcore_rx_queues(void) + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("Error: too many queues (%u) for lcore: %u\n", + (unsigned int)nb_rx_queue + 1, +- (unsigned int)lcore); ++ lcore); + return -1; + } + +@@ -354,11 +354,11 @@ parse_config(const char *q_arg) + } + + lcore_params_array[nb_lcore_params].port_id = +- (uint8_t)int_fld[FLD_PORT]; ++ (uint16_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = +- (uint8_t)int_fld[FLD_QUEUE]; ++ (uint16_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = +- (uint8_t)int_fld[FLD_LCORE]; ++ (uint32_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; +@@ -746,7 +746,8 @@ main(int argc, char **argv) + "ethdev_tx-*", + "pkt_drop", + }; +- uint8_t nb_rx_queue, queue, socketid; ++ uint8_t socketid; ++ uint16_t nb_rx_queue, queue; + struct rte_graph_param graph_conf; + struct rte_eth_dev_info dev_info; + uint32_t nb_ports, nb_conf = 0; +diff --git a/dpdk/examples/l3fwd-power/main.c b/dpdk/examples/l3fwd-power/main.c +index fd3ade330f..1bcc61e777 100644 +--- a/dpdk/examples/l3fwd-power/main.c ++++ b/dpdk/examples/l3fwd-power/main.c +@@ -228,7 +228,7 @@ enum freq_scale_hint_t + + struct lcore_rx_queue { + uint16_t port_id; +- uint8_t queue_id; ++ uint16_t queue_id; + enum freq_scale_hint_t freq_up_hint; + uint32_t zero_rx_packet_count; + uint32_t idle_hint; +@@ -860,7 +860,7 @@ sleep_until_rx_interrupt(int num, int lcore) + struct rte_epoll_event event[num]; + int n, i; + uint16_t port_id; +- uint8_t queue_id; ++ uint16_t queue_id; + void *data; + + if (status[lcore].wakeup) { +@@ -872,9 +872,9 @@ sleep_until_rx_interrupt(int num, int lcore) + n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10); + for (i = 0; i < n; i++) { + data = event[i].epdata.data; +- port_id = ((uintptr_t)data) >> CHAR_BIT; ++ port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT); + queue_id = ((uintptr_t)data) & +- RTE_LEN2MASK(CHAR_BIT, uint8_t); ++ RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t); + RTE_LOG(INFO, L3FWD_POWER, + "lcore %u is waked up from rx interrupt on" + " port %d queue %d\n", +@@ -889,7 +889,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on) + { + int i; + struct lcore_rx_queue *rx_queue; +- uint8_t queue_id; ++ uint16_t queue_id; + uint16_t port_id; + + for (i = 0; i < qconf->n_rx_queue; ++i) { +@@ -909,7 +909,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on) + static int event_register(struct lcore_conf *qconf) + { + struct lcore_rx_queue *rx_queue; +- uint8_t queueid; ++ uint16_t queueid; + uint16_t portid; + uint32_t data; + int ret; +@@ -919,7 +919,7 @@ static int event_register(struct lcore_conf *qconf) + rx_queue = &(qconf->rx_queue_list[i]); + portid = rx_queue->port_id; + queueid = rx_queue->queue_id; +- data = portid << CHAR_BIT | queueid; ++ data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid; + + ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid, + RTE_EPOLL_PER_THREAD, +@@ -939,8 +939,7 @@ static int main_intr_loop(__rte_unused void *dummy) + unsigned int lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc; + int i, j, nb_rx; +- uint8_t queueid; +- uint16_t portid; ++ uint16_t portid, queueid; + struct lcore_conf *qconf; + struct lcore_rx_queue *rx_queue; + uint32_t lcore_rx_idle_count = 0; +@@ -968,7 +967,7 @@ static int main_intr_loop(__rte_unused void *dummy) + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD_POWER, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + +@@ -1105,8 +1104,7 @@ main_telemetry_loop(__rte_unused void *dummy) + unsigned int lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc; + int i, j, nb_rx; +- uint8_t queueid; +- uint16_t portid; ++ uint16_t portid, queueid; + struct lcore_conf *qconf; + struct lcore_rx_queue *rx_queue; + uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0}; +@@ -1136,7 +1134,7 @@ main_telemetry_loop(__rte_unused void *dummy) + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u " +- "rxqueueid=%hhu\n", lcore_id, portid, queueid); ++ "rxqueueid=%" PRIu16 "\n", lcore_id, portid, queueid); + } + + while (!is_done()) { +@@ -1330,8 +1328,7 @@ main_legacy_loop(__rte_unused void *dummy) + uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz; + uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power; + int i, j, nb_rx; +- uint8_t queueid; +- uint16_t portid; ++ uint16_t portid, queueid; + struct lcore_conf *qconf; + struct lcore_rx_queue *rx_queue; + enum freq_scale_hint_t lcore_scaleup_hint; +@@ -1359,7 +1356,7 @@ main_legacy_loop(__rte_unused void *dummy) + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u " +- "rxqueueid=%hhu\n", lcore_id, portid, queueid); ++ "rxqueueid=%" PRIu16 "\n", lcore_id, portid, queueid); + } + + /* add into event wait list */ +@@ -1524,25 +1521,25 @@ start_rx: + static int + check_lcore_params(void) + { +- uint8_t queue, lcore; +- uint16_t i; ++ uint16_t queue, i; ++ uint32_t lcore; + int socketid; + + for (i = 0; i < nb_lcore_params; ++i) { + queue = lcore_params[i].queue_id; + if (queue >= MAX_RX_QUEUE_PER_PORT) { +- printf("invalid queue number: %hhu\n", queue); ++ printf("invalid queue number: %" PRIu16 "\n", queue); + return -1; + } + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { +- printf("error: lcore %hhu is not enabled in lcore " ++ printf("error: lcore %u is not enabled in lcore " + "mask\n", lcore); + return -1; + } + if ((socketid = rte_lcore_to_socket_id(lcore) != 0) && + (numa_on == 0)) { +- printf("warning: lcore %hhu is on socket %d with numa " ++ printf("warning: lcore %u is on socket %d with numa " + "off\n", lcore, socketid); + } + if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) { +@@ -1576,7 +1573,7 @@ check_port_config(void) + return 0; + } + +-static uint8_t ++static uint16_t + get_port_n_rx_queues(const uint16_t port) + { + int queue = -1; +@@ -1587,21 +1584,21 @@ get_port_n_rx_queues(const uint16_t port) + lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } +- return (uint8_t)(++queue); ++ return (uint16_t)(++queue); + } + + static int + init_lcore_rx_queues(void) + { + uint16_t i, nb_rx_queue; +- uint8_t lcore; ++ uint32_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].n_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", +- (unsigned)nb_rx_queue + 1, (unsigned)lcore); ++ (unsigned int)nb_rx_queue + 1, lcore); + return -1; + } else { + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = +@@ -1782,6 +1779,11 @@ parse_config(const char *q_arg) + char *str_fld[_NUM_FLD]; + int i; + unsigned size; ++ unsigned int max_fld[_NUM_FLD] = { ++ RTE_MAX_ETHPORTS, ++ RTE_MAX_QUEUES_PER_PORT, ++ RTE_MAX_LCORE ++ }; + + nb_lcore_params = 0; + +@@ -1801,8 +1803,7 @@ parse_config(const char *q_arg) + for (i = 0; i < _NUM_FLD; i++){ + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); +- if (errno != 0 || end == str_fld[i] || int_fld[i] > +- 255) ++ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i]) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { +@@ -1811,11 +1812,11 @@ parse_config(const char *q_arg) + return -1; + } + lcore_params_array[nb_lcore_params].port_id = +- (uint8_t)int_fld[FLD_PORT]; ++ (uint16_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = +- (uint8_t)int_fld[FLD_QUEUE]; ++ (uint16_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = +- (uint8_t)int_fld[FLD_LCORE]; ++ (uint32_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; +@@ -2719,8 +2720,8 @@ main(int argc, char **argv) + uint64_t hz; + uint32_t n_tx_queue, nb_lcores; + uint32_t dev_rxq_num, dev_txq_num; +- uint8_t nb_rx_queue, queue, socketid; +- uint16_t portid; ++ uint8_t socketid; ++ uint16_t portid, nb_rx_queue, queue; + const char *ptr_strings[NUM_TELSTATS]; + + /* init EAL */ +diff --git a/dpdk/examples/l3fwd-power/main.h b/dpdk/examples/l3fwd-power/main.h +index 258de98f5b..194bd82102 100644 +--- a/dpdk/examples/l3fwd-power/main.h ++++ b/dpdk/examples/l3fwd-power/main.h +@@ -9,8 +9,8 @@ + #define MAX_LCORE_PARAMS 1024 + struct lcore_params { + uint16_t port_id; +- uint8_t queue_id; +- uint8_t lcore_id; ++ uint16_t queue_id; ++ uint32_t lcore_id; + } __rte_cache_aligned; + + extern struct lcore_params *lcore_params; +diff --git a/dpdk/examples/l3fwd-power/perf_core.c b/dpdk/examples/l3fwd-power/perf_core.c +index 41ef6d0c9a..e4bdb62121 100644 +--- a/dpdk/examples/l3fwd-power/perf_core.c ++++ b/dpdk/examples/l3fwd-power/perf_core.c +@@ -22,9 +22,9 @@ static uint16_t nb_hp_lcores; + + struct perf_lcore_params { + uint16_t port_id; +- uint8_t queue_id; ++ uint16_t queue_id; + uint8_t high_perf; +- uint8_t lcore_idx; ++ uint32_t lcore_idx; + } __rte_cache_aligned; + + static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS]; +@@ -132,6 +132,12 @@ parse_perf_config(const char *q_arg) + char *str_fld[_NUM_FLD]; + int i; + unsigned int size; ++ unsigned int max_fld[_NUM_FLD] = { ++ RTE_MAX_ETHPORTS, ++ RTE_MAX_QUEUES_PER_PORT, ++ 255, ++ RTE_MAX_LCORE ++ }; + + nb_prf_lc_prms = 0; + +@@ -152,7 +158,8 @@ parse_perf_config(const char *q_arg) + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); +- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) ++ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i]) ++ + return -1; + } + if (nb_prf_lc_prms >= MAX_LCORE_PARAMS) { +@@ -161,13 +168,13 @@ parse_perf_config(const char *q_arg) + return -1; + } + prf_lc_prms[nb_prf_lc_prms].port_id = +- (uint8_t)int_fld[FLD_PORT]; ++ (uint16_t)int_fld[FLD_PORT]; + prf_lc_prms[nb_prf_lc_prms].queue_id = +- (uint8_t)int_fld[FLD_QUEUE]; ++ (uint16_t)int_fld[FLD_QUEUE]; + prf_lc_prms[nb_prf_lc_prms].high_perf = + !!(uint8_t)int_fld[FLD_LCORE_HP]; + prf_lc_prms[nb_prf_lc_prms].lcore_idx = +- (uint8_t)int_fld[FLD_LCORE_IDX]; ++ (uint32_t)int_fld[FLD_LCORE_IDX]; + ++nb_prf_lc_prms; + } + diff --git a/dpdk/examples/l3fwd/l3fwd.h b/dpdk/examples/l3fwd/l3fwd.h -index ca1426a687..b55855c932 100644 +index ca1426a687..e774623b57 100644 --- a/dpdk/examples/l3fwd/l3fwd.h +++ b/dpdk/examples/l3fwd/l3fwd.h @@ -55,7 +55,6 @@ @@ -59549,10 +77030,138 @@ index ca1426a687..b55855c932 100644 struct parm_cfg { const char *rule_ipv4_name; +@@ -75,7 +74,7 @@ struct mbuf_table { + + struct lcore_rx_queue { + uint16_t port_id; +- uint8_t queue_id; ++ uint16_t queue_id; + } __rte_cache_aligned; + + struct lcore_conf { +diff --git a/dpdk/examples/l3fwd/l3fwd_acl.c b/dpdk/examples/l3fwd/l3fwd_acl.c +index 401692bcec..31798ccb10 100644 +--- a/dpdk/examples/l3fwd/l3fwd_acl.c ++++ b/dpdk/examples/l3fwd/l3fwd_acl.c +@@ -962,8 +962,6 @@ setup_acl(const int socket_id) + acl_log("IPv6 ACL entries %u:\n", acl_num_ipv6); + dump_ipv6_rules((struct acl6_rule *)acl_base_ipv6, acl_num_ipv6, 1); + +- memset(&acl_config, 0, sizeof(acl_config)); +- + /* Check sockets a context should be created on */ + if (socket_id >= NB_SOCKETS) { + acl_log("Socket %d is out " +@@ -973,6 +971,9 @@ setup_acl(const int socket_id) + return; + } + ++ rte_acl_free(acl_config.acx_ipv4[socket_id]); ++ rte_acl_free(acl_config.acx_ipv6[socket_id]); ++ + acl_config.acx_ipv4[socket_id] = app_acl_init(route_base_ipv4, + acl_base_ipv4, route_num_ipv4, acl_num_ipv4, + 0, socket_id); +@@ -997,7 +998,7 @@ acl_main_loop(__rte_unused void *dummy) + uint64_t prev_tsc, diff_tsc, cur_tsc; + int i, nb_rx; + uint16_t portid; +- uint8_t queueid; ++ uint16_t queueid; + struct lcore_conf *qconf; + int socketid; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) +@@ -1020,7 +1021,7 @@ acl_main_loop(__rte_unused void *dummy) + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + +@@ -1073,9 +1074,9 @@ acl_main_loop(__rte_unused void *dummy) + + l3fwd_acl_send_packets( + qconf, +- pkts_burst, ++ acl_search.m_ipv4, + acl_search.res_ipv4, +- nb_rx); ++ acl_search.num_ipv4); + } + + if (acl_search.num_ipv6) { +@@ -1088,9 +1089,9 @@ acl_main_loop(__rte_unused void *dummy) + + l3fwd_acl_send_packets( + qconf, +- pkts_burst, ++ acl_search.m_ipv6, + acl_search.res_ipv6, +- nb_rx); ++ acl_search.num_ipv6); + } + } + } +diff --git a/dpdk/examples/l3fwd/l3fwd_em.c b/dpdk/examples/l3fwd/l3fwd_em.c +index 35de31157e..e298fef523 100644 +--- a/dpdk/examples/l3fwd/l3fwd_em.c ++++ b/dpdk/examples/l3fwd/l3fwd_em.c +@@ -584,7 +584,7 @@ em_main_loop(__rte_unused void *dummy) + unsigned lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc; + int i, nb_rx; +- uint8_t queueid; ++ uint16_t queueid; + uint16_t portid; + struct lcore_conf *qconf; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / +@@ -607,7 +607,7 @@ em_main_loop(__rte_unused void *dummy) + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + +diff --git a/dpdk/examples/l3fwd/l3fwd_event.h b/dpdk/examples/l3fwd/l3fwd_event.h +index e21817c36b..a7af23b8a0 100644 +--- a/dpdk/examples/l3fwd/l3fwd_event.h ++++ b/dpdk/examples/l3fwd/l3fwd_event.h +@@ -76,8 +76,8 @@ struct l3fwd_event_resources { + uint8_t deq_depth; + uint8_t has_burst; + uint8_t enabled; +- uint8_t eth_rx_queues; + uint8_t vector_enabled; ++ uint16_t eth_rx_queues; + uint16_t vector_size; + uint64_t vector_tmo_ns; + }; diff --git a/dpdk/examples/l3fwd/l3fwd_fib.c b/dpdk/examples/l3fwd/l3fwd_fib.c -index edc0dd69b9..18398492ae 100644 +index edc0dd69b9..10fa121942 100644 --- a/dpdk/examples/l3fwd/l3fwd_fib.c +++ b/dpdk/examples/l3fwd/l3fwd_fib.c +@@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy) + uint64_t prev_tsc, diff_tsc, cur_tsc; + int i, nb_rx; + uint16_t portid; +- uint8_t queueid; ++ uint16_t queueid; + struct lcore_conf *qconf; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / + US_PER_S * BURST_TX_DRAIN_US; +@@ -208,7 +208,7 @@ fib_main_loop(__rte_unused void *dummy) + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + @@ -359,10 +359,10 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc, nh = (uint16_t)hopsv4[ipv4_arr_assem++]; else @@ -59568,8 +77177,31 @@ index edc0dd69b9..18398492ae 100644 process_packet(events[i].mbuf, &hops[i]); events[i].mbuf->port = hops[i] != BAD_PORT ? hops[i] : +diff --git a/dpdk/examples/l3fwd/l3fwd_lpm.c b/dpdk/examples/l3fwd/l3fwd_lpm.c +index 5172979c72..54b059fe2a 100644 +--- a/dpdk/examples/l3fwd/l3fwd_lpm.c ++++ b/dpdk/examples/l3fwd/l3fwd_lpm.c +@@ -148,8 +148,7 @@ lpm_main_loop(__rte_unused void *dummy) + unsigned lcore_id; + uint64_t prev_tsc, diff_tsc, cur_tsc; + int i, nb_rx; +- uint16_t portid; +- uint8_t queueid; ++ uint16_t portid, queueid; + struct lcore_conf *qconf; + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / + US_PER_S * BURST_TX_DRAIN_US; +@@ -171,7 +170,7 @@ lpm_main_loop(__rte_unused void *dummy) + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD, +- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", ++ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n", + lcore_id, portid, queueid); + } + diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c -index 5198ff30dd..a4f061537e 100644 +index 5198ff30dd..9201019711 100644 --- a/dpdk/examples/l3fwd/main.c +++ b/dpdk/examples/l3fwd/main.c @@ -89,7 +89,6 @@ uint32_t enabled_port_mask; @@ -59580,6 +77212,81 @@ index 5198ff30dd..a4f061537e 100644 struct lcore_conf lcore_conf[RTE_MAX_LCORE]; +@@ -97,8 +96,8 @@ struct parm_cfg parm_config; + + struct lcore_params { + uint16_t port_id; +- uint8_t queue_id; +- uint8_t lcore_id; ++ uint16_t queue_id; ++ uint32_t lcore_id; + } __rte_cache_aligned; + + static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; +@@ -289,24 +288,24 @@ setup_l3fwd_lookup_tables(void) + static int + check_lcore_params(void) + { +- uint8_t queue, lcore; +- uint16_t i; ++ uint16_t queue, i; ++ uint32_t lcore; + int socketid; + + for (i = 0; i < nb_lcore_params; ++i) { + queue = lcore_params[i].queue_id; + if (queue >= MAX_RX_QUEUE_PER_PORT) { +- printf("invalid queue number: %hhu\n", queue); ++ printf("invalid queue number: %" PRIu16 "\n", queue); + return -1; + } + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { +- printf("error: lcore %hhu is not enabled in lcore mask\n", lcore); ++ printf("error: lcore %u is not enabled in lcore mask\n", lcore); + return -1; + } + if ((socketid = rte_lcore_to_socket_id(lcore) != 0) && + (numa_on == 0)) { +- printf("warning: lcore %hhu is on socket %d with numa off \n", ++ printf("warning: lcore %u is on socket %d with numa off\n", + lcore, socketid); + } + } +@@ -333,7 +332,7 @@ check_port_config(void) + return 0; + } + +-static uint8_t ++static uint16_t + get_port_n_rx_queues(const uint16_t port) + { + int queue = -1; +@@ -349,21 +348,21 @@ get_port_n_rx_queues(const uint16_t port) + lcore_params[i].port_id); + } + } +- return (uint8_t)(++queue); ++ return (uint16_t)(++queue); + } + + static int + init_lcore_rx_queues(void) + { + uint16_t i, nb_rx_queue; +- uint8_t lcore; ++ uint32_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].n_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", +- (unsigned)nb_rx_queue + 1, (unsigned)lcore); ++ (unsigned int)nb_rx_queue + 1, lcore); + return -1; + } else { + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = @@ -395,7 +394,6 @@ print_usage(const char *prgname) " [--eth-dest=X,MM:MM:MM:MM:MM:MM]" " [--max-pkt-len PKTLEN]" @@ -59619,7 +77326,52 @@ index 5198ff30dd..a4f061537e 100644 static int parse_config(const char *q_arg) { -@@ -852,14 +833,7 @@ parse_args(int argc, char **argv) +@@ -511,6 +492,11 @@ parse_config(const char *q_arg) + char *str_fld[_NUM_FLD]; + int i; + unsigned size; ++ uint16_t max_fld[_NUM_FLD] = { ++ RTE_MAX_ETHPORTS, ++ RTE_MAX_QUEUES_PER_PORT, ++ RTE_MAX_LCORE ++ }; + + nb_lcore_params = 0; + +@@ -529,7 +515,7 @@ parse_config(const char *q_arg) + for (i = 0; i < _NUM_FLD; i++){ + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); +- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) ++ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i]) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { +@@ -538,11 +524,11 @@ parse_config(const char *q_arg) + return -1; + } + lcore_params_array[nb_lcore_params].port_id = +- (uint8_t)int_fld[FLD_PORT]; ++ (uint16_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = +- (uint8_t)int_fld[FLD_QUEUE]; ++ (uint16_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = +- (uint8_t)int_fld[FLD_LCORE]; ++ (uint32_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; +@@ -638,7 +624,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues) + { + struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); + char *end = NULL; +- uint8_t num_eth_rx_queues; ++ uint16_t num_eth_rx_queues; + + /* parse decimal string */ + num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10); +@@ -852,14 +838,7 @@ parse_args(int argc, char **argv) break; case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM: @@ -59635,7 +77387,7 @@ index 5198ff30dd..a4f061537e 100644 break; case CMD_LINE_OPT_PARSE_PTYPE_NUM: -@@ -963,16 +937,6 @@ parse_args(int argc, char **argv) +@@ -963,16 +942,6 @@ parse_args(int argc, char **argv) lookup_mode = L3FWD_LOOKUP_LPM; } @@ -59652,6 +77404,45 @@ index 5198ff30dd..a4f061537e 100644 /* For ACL, update port config rss hash filter */ if (lookup_mode == L3FWD_LOOKUP_ACL) { port_conf.rx_adv_conf.rss_conf.rss_hf |= +@@ -1205,7 +1174,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf, + static void + l3fwd_poll_resource_setup(void) + { +- uint8_t nb_rx_queue, queue, socketid; ++ uint8_t socketid; ++ uint16_t nb_rx_queue, queue; + struct rte_eth_dev_info dev_info; + uint32_t n_tx_queue, nb_lcores; + struct rte_eth_txconf *txconf; +@@ -1502,7 +1472,7 @@ main(int argc, char **argv) + struct lcore_conf *qconf; + uint16_t queueid, portid; + unsigned int lcore_id; +- uint8_t queue; ++ uint16_t queue; + int i, ret; + + /* init EAL */ +@@ -1548,7 +1518,6 @@ main(int argc, char **argv) + l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop; + else + l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop; +- l3fwd_event_service_setup(); + } else + l3fwd_poll_resource_setup(); + +@@ -1579,6 +1548,11 @@ main(int argc, char **argv) + } + } + ++#ifdef RTE_LIB_EVENTDEV ++ if (evt_rsrc->enabled) ++ l3fwd_event_service_setup(); ++#endif ++ + printf("\n"); + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { diff --git a/dpdk/examples/ntb/ntb_fwd.c b/dpdk/examples/ntb/ntb_fwd.c index f9abed28e4..585aad9d70 100644 --- a/dpdk/examples/ntb/ntb_fwd.c @@ -59674,6 +77465,107 @@ index f9abed28e4..585aad9d70 100644 printf("Error: Cannot get count of xstats\n"); return; } +diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c +index d2fd6f77e4..f839db9102 100644 +--- a/dpdk/examples/packet_ordering/main.c ++++ b/dpdk/examples/packet_ordering/main.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -427,8 +428,8 @@ int_handler(int sig_num) + * The mbufs are then passed to the worker threads via the rx_to_workers + * ring. + */ +-static int +-rx_thread(struct rte_ring *ring_out) ++static __rte_always_inline int ++rx_thread(struct rte_ring *ring_out, bool disable_reorder_flag) + { + uint32_t seqn = 0; + uint16_t i, ret = 0; +@@ -454,9 +455,11 @@ rx_thread(struct rte_ring *ring_out) + } + app_stats.rx.rx_pkts += nb_rx_pkts; + +- /* mark sequence number */ +- for (i = 0; i < nb_rx_pkts; ) +- *rte_reorder_seqn(pkts[i++]) = seqn++; ++ /* mark sequence number if reorder is enabled */ ++ if (!disable_reorder_flag) { ++ for (i = 0; i < nb_rx_pkts;) ++ *rte_reorder_seqn(pkts[i++]) = seqn++; ++ } + + /* enqueue to rx_to_workers ring */ + ret = rte_ring_enqueue_burst(ring_out, +@@ -473,6 +476,18 @@ rx_thread(struct rte_ring *ring_out) + return 0; + } + ++static __rte_noinline int ++rx_thread_reorder(struct rte_ring *ring_out) ++{ ++ return rx_thread(ring_out, false); ++} ++ ++static __rte_noinline int ++rx_thread_reorder_disabled(struct rte_ring *ring_out) ++{ ++ return rx_thread(ring_out, true); ++} ++ + /** + * This thread takes bursts of packets from the rx_to_workers ring and + * Changes the input port value to output port value. And feds it to +@@ -772,8 +787,11 @@ main(int argc, char **argv) + (void *)&send_args, last_lcore_id); + } + +- /* Start rx_thread() on the main core */ +- rx_thread(rx_to_workers); ++ /* Start rx_thread_xxx() on the main core */ ++ if (disable_reorder) ++ rx_thread_reorder_disabled(rx_to_workers); ++ else ++ rx_thread_reorder(rx_to_workers); + + RTE_LCORE_FOREACH_WORKER(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) +diff --git a/dpdk/examples/qos_sched/args.c b/dpdk/examples/qos_sched/args.c +index b2959499ae..fbdc738d77 100644 +--- a/dpdk/examples/qos_sched/args.c ++++ b/dpdk/examples/qos_sched/args.c +@@ -141,8 +141,10 @@ app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32 + + n_tokens = rte_strsplit(string, strnlen(string, 32), tokens, n_vals, separator); + +- if (n_tokens > MAX_OPT_VALUES) ++ if (n_tokens > MAX_OPT_VALUES) { ++ free(string); + return -1; ++ } + + for (i = 0; i < n_tokens; i++) + opt_vals[i] = (uint32_t)atol(tokens[i]); +@@ -220,10 +222,10 @@ app_parse_flow_conf(const char *conf_str) + + pconf->rx_port = vals[0]; + pconf->tx_port = vals[1]; +- pconf->rx_core = (uint8_t)vals[2]; +- pconf->wt_core = (uint8_t)vals[3]; ++ pconf->rx_core = vals[2]; ++ pconf->wt_core = vals[3]; + if (ret == 5) +- pconf->tx_core = (uint8_t)vals[4]; ++ pconf->tx_core = vals[4]; + else + pconf->tx_core = pconf->wt_core; + diff --git a/dpdk/examples/qos_sched/init.c b/dpdk/examples/qos_sched/init.c index 0709aec10c..7a27c03b64 100644 --- a/dpdk/examples/qos_sched/init.c @@ -59834,6 +77726,42 @@ index 4486d2799e..cee1470fd7 100644 ; Pipe configuration [pipe profile 0] tb rate = 305175 ; Bytes per second +diff --git a/dpdk/examples/vhost/main.c b/dpdk/examples/vhost/main.c +index 42e53a0f9a..31c7471236 100644 +--- a/dpdk/examples/vhost/main.c ++++ b/dpdk/examples/vhost/main.c +@@ -259,6 +259,9 @@ open_dma(const char *value) + char *dma_arg[RTE_MAX_VHOST_DEVICE]; + int args_nr; + ++ if (input == NULL) ++ return -1; ++ + while (isblank(*addrs)) + addrs++; + if (*addrs == '\0') { +diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c +index 94bfbbaf78..5eddb47847 100644 +--- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c ++++ b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c +@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result) + + struct cmd_set_cpu_freq_result { + cmdline_fixed_string_t set_cpu_freq; +- uint8_t lcore_id; ++ uint32_t lcore_id; + cmdline_fixed_string_t cmd; + }; + +@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq = + set_cpu_freq, "set_cpu_freq"); + cmdline_parse_token_num_t cmd_set_cpu_freq_core_num = + TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result, +- lcore_id, RTE_UINT8); ++ lcore_id, RTE_UINT32); + cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result, + cmd, "up#down#min#max#enable_turbo#disable_turbo"); diff --git a/dpdk/kernel/freebsd/contigmem/contigmem.c b/dpdk/kernel/freebsd/contigmem/contigmem.c index bd72f4d620..7dd87599d9 100644 --- a/dpdk/kernel/freebsd/contigmem/contigmem.c @@ -59856,6 +77784,27 @@ index bd72f4d620..7dd87599d9 100644 { int i; +diff --git a/dpdk/kernel/freebsd/nic_uio/nic_uio.c b/dpdk/kernel/freebsd/nic_uio/nic_uio.c +index 7a81694c92..0043892870 100644 +--- a/dpdk/kernel/freebsd/nic_uio/nic_uio.c ++++ b/dpdk/kernel/freebsd/nic_uio/nic_uio.c +@@ -78,10 +78,14 @@ struct pci_bdf { + uint32_t function; + }; + +-static devclass_t nic_uio_devclass; +- + DEFINE_CLASS_0(nic_uio, nic_uio_driver, nic_uio_methods, sizeof(struct nic_uio_softc)); ++ ++#if __FreeBSD_version < 1400000 ++static devclass_t nic_uio_devclass; + DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_devclass, nic_uio_modevent, 0); ++#else ++DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_modevent, 0); ++#endif + + static int + nic_uio_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, diff --git a/dpdk/kernel/linux/kni/compat.h b/dpdk/kernel/linux/kni/compat.h index 3a86d12bbc..8beb670465 100644 --- a/dpdk/kernel/linux/kni/compat.h @@ -59896,6 +77845,21 @@ index a2c6d9fc1a..975379825b 100644 #endif if (ret < 0) return 0; +diff --git a/dpdk/kernel/linux/kni/kni_net.c b/dpdk/kernel/linux/kni/kni_net.c +index 779ee3451a..c115a728f0 100644 +--- a/dpdk/kernel/linux/kni/kni_net.c ++++ b/dpdk/kernel/linux/kni/kni_net.c +@@ -832,8 +832,8 @@ static const struct net_device_ops kni_net_netdev_ops = { + static void kni_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) + { +- strlcpy(info->version, KNI_VERSION, sizeof(info->version)); +- strlcpy(info->driver, "kni", sizeof(info->driver)); ++ strscpy(info->version, KNI_VERSION, sizeof(info->version)); ++ strscpy(info->driver, "kni", sizeof(info->driver)); + } + + static const struct ethtool_ops kni_net_ethtool_ops = { diff --git a/dpdk/lib/acl/acl_run_altivec.h b/dpdk/lib/acl/acl_run_altivec.h index 4dfe7a14b4..4556e1503b 100644 --- a/dpdk/lib/acl/acl_run_altivec.h @@ -59909,6 +77873,632 @@ index 4dfe7a14b4..4556e1503b 100644 transition4(xmm_t next_input, const uint64_t *trans, xmm_t *indices1, xmm_t *indices2) { +diff --git a/dpdk/lib/bbdev/rte_bbdev.c b/dpdk/lib/bbdev/rte_bbdev.c +index 1521cdbc53..71f1957bf7 100644 +--- a/dpdk/lib/bbdev/rte_bbdev.c ++++ b/dpdk/lib/bbdev/rte_bbdev.c +@@ -1102,12 +1102,12 @@ rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, + + intr_handle = dev->intr_handle; + if (intr_handle == NULL) { +- rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id); ++ rte_bbdev_log(ERR, "Device %u intr handle unset", dev_id); + return -ENOTSUP; + } + + if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) { +- rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n", ++ rte_bbdev_log(ERR, "Device %u queue_id %u is too big", + dev_id, queue_id); + return -ENOTSUP; + } +@@ -1116,7 +1116,7 @@ rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, + ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); + if (ret && (ret != -EEXIST)) { + rte_bbdev_log(ERR, +- "dev %u q %u int ctl error op %d epfd %d vec %u\n", ++ "dev %u q %u int ctl error op %d epfd %d vec %u", + dev_id, queue_id, op, epfd, vec); + return ret; + } +diff --git a/dpdk/lib/bpf/bpf_validate.c b/dpdk/lib/bpf/bpf_validate.c +index 61cbb42216..ae2dad46bb 100644 +--- a/dpdk/lib/bpf/bpf_validate.c ++++ b/dpdk/lib/bpf/bpf_validate.c +@@ -29,10 +29,13 @@ struct bpf_reg_val { + }; + + struct bpf_eval_state { ++ SLIST_ENTRY(bpf_eval_state) next; /* for @safe list traversal */ + struct bpf_reg_val rv[EBPF_REG_NUM]; + struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)]; + }; + ++SLIST_HEAD(bpf_evst_head, bpf_eval_state); ++ + /* possible instruction node colour */ + enum { + WHITE, +@@ -52,6 +55,9 @@ enum { + + #define MAX_EDGES 2 + ++/* max number of 'safe' evaluated states to track per node */ ++#define NODE_EVST_MAX 32 ++ + struct inst_node { + uint8_t colour; + uint8_t nb_edge:4; +@@ -59,7 +65,18 @@ struct inst_node { + uint8_t edge_type[MAX_EDGES]; + uint32_t edge_dest[MAX_EDGES]; + uint32_t prev_node; +- struct bpf_eval_state *evst; ++ struct { ++ struct bpf_eval_state *cur; /* save/restore for jcc targets */ ++ struct bpf_eval_state *start; ++ struct bpf_evst_head safe; /* safe states for track/prune */ ++ uint32_t nb_safe; ++ } evst; ++}; ++ ++struct evst_pool { ++ uint32_t num; ++ uint32_t cur; ++ struct bpf_eval_state *ent; + }; + + struct bpf_verifier { +@@ -73,11 +90,8 @@ struct bpf_verifier { + uint32_t edge_type[MAX_EDGE_TYPE]; + struct bpf_eval_state *evst; + struct inst_node *evin; +- struct { +- uint32_t num; +- uint32_t cur; +- struct bpf_eval_state *ent; +- } evst_pool; ++ struct evst_pool evst_sr_pool; /* for evst save/restore */ ++ struct evst_pool evst_tp_pool; /* for evst track/prune */ + }; + + struct bpf_ins_check { +@@ -636,14 +650,14 @@ eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins) + { + uint64_t msk; + uint32_t op; +- size_t opsz; ++ size_t opsz, sz; + const char *err; + struct bpf_eval_state *st; + struct bpf_reg_val *rd, rs; + +- opsz = (BPF_CLASS(ins->code) == BPF_ALU) ? ++ sz = (BPF_CLASS(ins->code) == BPF_ALU) ? + sizeof(uint32_t) : sizeof(uint64_t); +- opsz = opsz * CHAR_BIT; ++ opsz = sz * CHAR_BIT; + msk = RTE_LEN2MASK(opsz, uint64_t); + + st = bvf->evst; +@@ -652,8 +666,10 @@ eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins) + if (BPF_SRC(ins->code) == BPF_X) { + rs = st->rv[ins->src_reg]; + eval_apply_mask(&rs, msk); +- } else ++ } else { ++ rs = (struct bpf_reg_val){.v = {.size = sz,},}; + eval_fill_imm(&rs, msk, ins->imm); ++ } + + eval_apply_mask(rd, msk); + +@@ -1083,7 +1099,7 @@ eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins) + struct bpf_reg_val rvf, rvt; + + tst = bvf->evst; +- fst = bvf->evin->evst; ++ fst = bvf->evin->evst.cur; + + frd = fst->rv + ins->dst_reg; + trd = tst->rv + ins->dst_reg; +@@ -1812,8 +1828,8 @@ add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx) + uint32_t ne; + + if (nidx > bvf->prm->nb_ins) { +- RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, " +- "next pc: %u\n", ++ RTE_BPF_LOG(ERR, ++ "%s: program boundary violation at pc: %u, next pc: %u\n", + __func__, get_node_idx(bvf, node), nidx); + return -EINVAL; + } +@@ -2089,60 +2105,114 @@ validate(struct bpf_verifier *bvf) + * helper functions get/free eval states. + */ + static struct bpf_eval_state * +-pull_eval_state(struct bpf_verifier *bvf) ++pull_eval_state(struct evst_pool *pool) + { + uint32_t n; + +- n = bvf->evst_pool.cur; +- if (n == bvf->evst_pool.num) ++ n = pool->cur; ++ if (n == pool->num) + return NULL; + +- bvf->evst_pool.cur = n + 1; +- return bvf->evst_pool.ent + n; ++ pool->cur = n + 1; ++ return pool->ent + n; + } + + static void +-push_eval_state(struct bpf_verifier *bvf) ++push_eval_state(struct evst_pool *pool) + { +- bvf->evst_pool.cur--; ++ RTE_ASSERT(pool->cur != 0); ++ pool->cur--; + } + + static void + evst_pool_fini(struct bpf_verifier *bvf) + { + bvf->evst = NULL; +- free(bvf->evst_pool.ent); +- memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool)); ++ free(bvf->evst_sr_pool.ent); ++ memset(&bvf->evst_sr_pool, 0, sizeof(bvf->evst_sr_pool)); ++ memset(&bvf->evst_tp_pool, 0, sizeof(bvf->evst_tp_pool)); + } + + static int + evst_pool_init(struct bpf_verifier *bvf) + { +- uint32_t n; ++ uint32_t k, n; + +- n = bvf->nb_jcc_nodes + 1; ++ /* ++ * We need nb_jcc_nodes + 1 for save_cur/restore_cur ++ * remaining ones will be used for state tracking/pruning. ++ */ ++ k = bvf->nb_jcc_nodes + 1; ++ n = k * 3; + +- bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0])); +- if (bvf->evst_pool.ent == NULL) ++ bvf->evst_sr_pool.ent = calloc(n, sizeof(bvf->evst_sr_pool.ent[0])); ++ if (bvf->evst_sr_pool.ent == NULL) + return -ENOMEM; + +- bvf->evst_pool.num = n; +- bvf->evst_pool.cur = 0; ++ bvf->evst_sr_pool.num = k; ++ bvf->evst_sr_pool.cur = 0; + +- bvf->evst = pull_eval_state(bvf); ++ bvf->evst_tp_pool.ent = bvf->evst_sr_pool.ent + k; ++ bvf->evst_tp_pool.num = n - k; ++ bvf->evst_tp_pool.cur = 0; ++ ++ bvf->evst = pull_eval_state(&bvf->evst_sr_pool); + return 0; + } + ++/* ++ * try to allocate and initialise new eval state for given node. ++ * later if no errors will be encountered, this state will be accepted as ++ * one of the possible 'safe' states for that node. ++ */ ++static void ++save_start_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++{ ++ RTE_ASSERT(node->evst.start == NULL); ++ ++ /* limit number of states for one node with some reasonable value */ ++ if (node->evst.nb_safe >= NODE_EVST_MAX) ++ return; ++ ++ /* try to get new eval_state */ ++ node->evst.start = pull_eval_state(&bvf->evst_tp_pool); ++ ++ /* make a copy of current state */ ++ if (node->evst.start != NULL) { ++ memcpy(node->evst.start, bvf->evst, sizeof(*node->evst.start)); ++ SLIST_NEXT(node->evst.start, next) = NULL; ++ } ++} ++ ++/* ++ * add @start state to the list of @safe states. ++ */ ++static void ++save_safe_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++{ ++ if (node->evst.start == NULL) ++ return; ++ ++ SLIST_INSERT_HEAD(&node->evst.safe, node->evst.start, next); ++ node->evst.nb_safe++; ++ ++ RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u,state=%p): nb_safe=%u;\n", ++ __func__, bvf, get_node_idx(bvf, node), node->evst.start, ++ node->evst.nb_safe); ++ ++ node->evst.start = NULL; ++} ++ + /* + * Save current eval state. + */ + static int +-save_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++save_cur_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + { + struct bpf_eval_state *st; + + /* get new eval_state for this node */ +- st = pull_eval_state(bvf); ++ st = pull_eval_state(&bvf->evst_sr_pool); + if (st == NULL) { + RTE_BPF_LOG(ERR, + "%s: internal error (out of space) at pc: %u\n", +@@ -2154,11 +2224,13 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + memcpy(st, bvf->evst, sizeof(*st)); + + /* swap current state with new one */ +- node->evst = bvf->evst; ++ RTE_ASSERT(node->evst.cur == NULL); ++ node->evst.cur = bvf->evst; + bvf->evst = st; + + RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n", +- __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst); ++ __func__, bvf, get_node_idx(bvf, node), node->evst.cur, ++ bvf->evst); + + return 0; + } +@@ -2167,14 +2239,15 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + * Restore previous eval state and mark current eval state as free. + */ + static void +-restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node) ++restore_cur_eval_state(struct bpf_verifier *bvf, struct inst_node *node) + { + RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n", +- __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst); ++ __func__, bvf, get_node_idx(bvf, node), bvf->evst, ++ node->evst.cur); + +- bvf->evst = node->evst; +- node->evst = NULL; +- push_eval_state(bvf); ++ bvf->evst = node->evst.cur; ++ node->evst.cur = NULL; ++ push_eval_state(&bvf->evst_sr_pool); + } + + static void +@@ -2191,26 +2264,124 @@ log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins, + + rte_log(loglvl, rte_bpf_logtype, + "r%u={\n" +- "\tv={type=%u, size=%zu},\n" ++ "\tv={type=%u, size=%zu, buf_size=%zu},\n" + "\tmask=0x%" PRIx64 ",\n" + "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n" + "\ts={min=%" PRId64 ", max=%" PRId64 "},\n" + "};\n", + ins->dst_reg, +- rv->v.type, rv->v.size, ++ rv->v.type, rv->v.size, rv->v.buf_size, + rv->mask, + rv->u.min, rv->u.max, + rv->s.min, rv->s.max); + } + + /* +- * Do second pass through CFG and try to evaluate instructions +- * via each possible path. +- * Right now evaluation functionality is quite limited. +- * Still need to add extra checks for: +- * - use/return uninitialized registers. +- * - use uninitialized data from the stack. +- * - memory boundaries violation. ++ * compare two evaluation states. ++ * returns zero if @lv is more conservative (safer) then @rv. ++ * returns non-zero value otherwise. ++ */ ++static int ++cmp_reg_val_within(const struct bpf_reg_val *lv, const struct bpf_reg_val *rv) ++{ ++ /* expect @v and @mask to be identical */ ++ if (memcmp(&lv->v, &rv->v, sizeof(lv->v)) != 0 || lv->mask != rv->mask) ++ return -1; ++ ++ /* exact match only for mbuf and stack pointers */ ++ if (lv->v.type == RTE_BPF_ARG_PTR_MBUF || ++ lv->v.type == BPF_ARG_PTR_STACK) ++ return -1; ++ ++ if (lv->u.min <= rv->u.min && lv->u.max >= rv->u.max && ++ lv->s.min <= rv->s.min && lv->s.max >= rv->s.max) ++ return 0; ++ ++ return -1; ++} ++ ++/* ++ * compare two evaluation states. ++ * returns zero if they are identical. ++ * returns positive value if @lv is more conservative (safer) then @rv. ++ * returns negative value otherwise. ++ */ ++static int ++cmp_eval_state(const struct bpf_eval_state *lv, const struct bpf_eval_state *rv) ++{ ++ int32_t rc; ++ uint32_t i, k; ++ ++ /* for stack expect identical values */ ++ rc = memcmp(lv->sv, rv->sv, sizeof(lv->sv)); ++ if (rc != 0) ++ return -(2 * EBPF_REG_NUM); ++ ++ k = 0; ++ /* check register values */ ++ for (i = 0; i != RTE_DIM(lv->rv); i++) { ++ rc = memcmp(&lv->rv[i], &rv->rv[i], sizeof(lv->rv[i])); ++ if (rc != 0 && cmp_reg_val_within(&lv->rv[i], &rv->rv[i]) != 0) ++ return -(i + 1); ++ k += (rc != 0); ++ } ++ ++ return k; ++} ++ ++/* ++ * check did we already evaluated that path and can it be pruned that time. ++ */ ++static int ++prune_eval_state(struct bpf_verifier *bvf, const struct inst_node *node, ++ struct inst_node *next) ++{ ++ int32_t rc; ++ struct bpf_eval_state *safe; ++ ++ rc = INT32_MIN; ++ SLIST_FOREACH(safe, &next->evst.safe, next) { ++ rc = cmp_eval_state(safe, bvf->evst); ++ if (rc >= 0) ++ break; ++ } ++ ++ rc = (rc >= 0) ? 0 : -1; ++ ++ /* ++ * current state doesn't match any safe states, ++ * so no prunning is possible right now, ++ * track current state for future references. ++ */ ++ if (rc != 0) ++ save_start_eval_state(bvf, next); ++ ++ RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u,next=%u) returns %d, " ++ "next->evst.start=%p, next->evst.nb_safe=%u\n", ++ __func__, bvf, get_node_idx(bvf, node), ++ get_node_idx(bvf, next), rc, ++ next->evst.start, next->evst.nb_safe); ++ return rc; ++} ++ ++/* Do second pass through CFG and try to evaluate instructions ++ * via each possible path. The verifier will try all paths, tracking types of ++ * registers used as input to instructions, and updating resulting type via ++ * register state values. Plus for each register and possible stack value it ++ * tries to estimate possible max/min value. ++ * For conditional jumps, a stack is used to save evaluation state, so one ++ * path is explored while the state for the other path is pushed onto the stack. ++ * Then later, we backtrack to the first pushed instruction and repeat the cycle ++ * until the stack is empty and we're done. ++ * For program with many conditional branches walking through all possible path ++ * could be very excessive. So to minimize number of evaluations we use ++ * heuristic similar to what Linux kernel does - state pruning: ++ * If from given instruction for given program state we explore all possible ++ * paths and for each of them reach _exit() without any complaints and a valid ++ * R0 value, then for that instruction, that program state can be marked as ++ * 'safe'. When we later arrive at the same instruction with a state ++ * equivalent to an earlier instruction's 'safe' state, we can prune the search. ++ * For now, only states for JCC targets are saved/examined. + */ + static int + evaluate(struct bpf_verifier *bvf) +@@ -2221,6 +2392,13 @@ evaluate(struct bpf_verifier *bvf) + const struct ebpf_insn *ins; + struct inst_node *next, *node; + ++ struct { ++ uint32_t nb_eval; ++ uint32_t nb_prune; ++ uint32_t nb_save; ++ uint32_t nb_restore; ++ } stats; ++ + /* initial state of frame pointer */ + static const struct bpf_reg_val rvfp = { + .v = { +@@ -2244,6 +2422,8 @@ evaluate(struct bpf_verifier *bvf) + next = node; + rc = 0; + ++ memset(&stats, 0, sizeof(stats)); ++ + while (node != NULL && rc == 0) { + + /* +@@ -2257,11 +2437,14 @@ evaluate(struct bpf_verifier *bvf) + op = ins[idx].code; + + /* for jcc node make a copy of evaluation state */ +- if (node->nb_edge > 1) +- rc |= save_eval_state(bvf, node); ++ if (node->nb_edge > 1) { ++ rc |= save_cur_eval_state(bvf, node); ++ stats.nb_save++; ++ } + + if (ins_chk[op].eval != NULL && rc == 0) { + err = ins_chk[op].eval(bvf, ins + idx); ++ stats.nb_eval++; + if (err != NULL) { + RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n", + __func__, err, idx); +@@ -2275,21 +2458,37 @@ evaluate(struct bpf_verifier *bvf) + + /* proceed through CFG */ + next = get_next_node(bvf, node); ++ + if (next != NULL) { + + /* proceed with next child */ + if (node->cur_edge == node->nb_edge && +- node->evst != NULL) +- restore_eval_state(bvf, node); ++ node->evst.cur != NULL) { ++ restore_cur_eval_state(bvf, node); ++ stats.nb_restore++; ++ } + +- next->prev_node = get_node_idx(bvf, node); +- node = next; ++ /* ++ * for jcc targets: check did we already evaluated ++ * that path and can it's evaluation be skipped that ++ * time. ++ */ ++ if (node->nb_edge > 1 && prune_eval_state(bvf, node, ++ next) == 0) { ++ next = NULL; ++ stats.nb_prune++; ++ } else { ++ next->prev_node = get_node_idx(bvf, node); ++ node = next; ++ } + } else { + /* + * finished with current node and all it's kids, +- * proceed with parent ++ * mark it's @start state as safe for future references, ++ * and proceed with parent. + */ + node->cur_edge = 0; ++ save_safe_eval_state(bvf, node); + node = get_prev_node(bvf, node); + + /* finished */ +@@ -2298,6 +2497,14 @@ evaluate(struct bpf_verifier *bvf) + } + } + ++ RTE_BPF_LOG(DEBUG, "%s(%p) returns %d, stats:\n" ++ "node evaluations=%u;\n" ++ "state pruned=%u;\n" ++ "state saves=%u;\n" ++ "state restores=%u;\n", ++ __func__, bvf, rc, ++ stats.nb_eval, stats.nb_prune, stats.nb_save, stats.nb_restore); ++ + return rc; + } + +diff --git a/dpdk/lib/bpf/meson.build b/dpdk/lib/bpf/meson.build +index cd739bb827..aa258a9061 100644 +--- a/dpdk/lib/bpf/meson.build ++++ b/dpdk/lib/bpf/meson.build +@@ -7,6 +7,12 @@ if is_windows + subdir_done() + endif + ++if arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_32') ++ build = false ++ reason = 'not supported on 32-bit x86' ++ subdir_done() ++endif ++ + sources = files('bpf.c', + 'bpf_dump.c', + 'bpf_exec.c', +diff --git a/dpdk/lib/cfgfile/rte_cfgfile.c b/dpdk/lib/cfgfile/rte_cfgfile.c +index 9fa7d010ef..e2f77d2b64 100644 +--- a/dpdk/lib/cfgfile/rte_cfgfile.c ++++ b/dpdk/lib/cfgfile/rte_cfgfile.c +@@ -135,7 +135,7 @@ rte_cfgfile_check_params(const struct rte_cfgfile_parameters *params) + unsigned int i; + + if (!params) { +- CFG_LOG(ERR, "missing cfgfile parameters\n"); ++ CFG_LOG(ERR, "missing cfgfile parameters"); + return -EINVAL; + } + +@@ -148,7 +148,7 @@ rte_cfgfile_check_params(const struct rte_cfgfile_parameters *params) + } + + if (valid_comment == 0) { +- CFG_LOG(ERR, "invalid comment characters %c\n", ++ CFG_LOG(ERR, "invalid comment characters %c", + params->comment_character); + return -ENOTSUP; + } +@@ -186,7 +186,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags, + lineno++; + if ((len >= sizeof(buffer) - 1) && (buffer[len-1] != '\n')) { + CFG_LOG(ERR, " line %d - no \\n found on string. " +- "Check if line too long\n", lineno); ++ "Check if line too long", lineno); + goto error1; + } + /* skip parsing if comment character found */ +@@ -207,7 +207,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags, + char *end = memchr(buffer, ']', len); + if (end == NULL) { + CFG_LOG(ERR, +- "line %d - no terminating ']' character found\n", ++ "line %d - no terminating ']' character found", + lineno); + goto error1; + } +@@ -223,7 +223,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags, + split[1] = memchr(buffer, '=', len); + if (split[1] == NULL) { + CFG_LOG(ERR, +- "line %d - no '=' character found\n", ++ "line %d - no '=' character found", + lineno); + goto error1; + } +@@ -247,7 +247,7 @@ rte_cfgfile_load_with_params(const char *filename, int flags, + if (!(flags & CFG_FLAG_EMPTY_VALUES) && + (*split[1] == '\0')) { + CFG_LOG(ERR, +- "line %d - cannot use empty values\n", ++ "line %d - cannot use empty values", + lineno); + goto error1; + } +@@ -412,7 +412,7 @@ int rte_cfgfile_set_entry(struct rte_cfgfile *cfg, const char *sectionname, + return 0; + } + +- CFG_LOG(ERR, "entry name doesn't exist\n"); ++ CFG_LOG(ERR, "entry name doesn't exist"); + return -EINVAL; + } + diff --git a/dpdk/lib/cmdline/cmdline.c b/dpdk/lib/cmdline/cmdline.c index e1009ba4c4..355c7d8ca6 100644 --- a/dpdk/lib/cmdline/cmdline.c @@ -60036,7 +78626,7 @@ index 42bda9fc79..7eb5c58798 100644 __rte_experimental void diff --git a/dpdk/lib/compressdev/rte_compressdev_pmd.c b/dpdk/lib/compressdev/rte_compressdev_pmd.c -index e139bc86e7..156bccd972 100644 +index e139bc86e7..762b44f03e 100644 --- a/dpdk/lib/compressdev/rte_compressdev_pmd.c +++ b/dpdk/lib/compressdev/rte_compressdev_pmd.c @@ -23,6 +23,9 @@ rte_compressdev_pmd_parse_name_arg(const char *key __rte_unused, @@ -60059,6 +78649,21 @@ index e139bc86e7..156bccd972 100644 errno = 0; i = strtol(value, &end, 10); if (*end != 0 || errno != 0 || i < 0) +@@ -94,12 +100,12 @@ rte_compressdev_pmd_create(const char *name, + struct rte_compressdev *compressdev; + + if (params->name[0] != '\0') { +- COMPRESSDEV_LOG(INFO, "User specified device name = %s\n", ++ COMPRESSDEV_LOG(INFO, "User specified device name = %s", + params->name); + name = params->name; + } + +- COMPRESSDEV_LOG(INFO, "Creating compressdev %s\n", name); ++ COMPRESSDEV_LOG(INFO, "Creating compressdev %s", name); + + COMPRESSDEV_LOG(INFO, "Init parameters - name: %s, socket id: %d", + name, params->socket_id); diff --git a/dpdk/lib/cryptodev/cryptodev_pmd.c b/dpdk/lib/cryptodev/cryptodev_pmd.c index 77b269f312..d8073a601d 100644 --- a/dpdk/lib/cryptodev/cryptodev_pmd.c @@ -60150,10 +78755,70 @@ index 33b4966e16..0d625ec103 100644 /** diff --git a/dpdk/lib/cryptodev/rte_cryptodev.c b/dpdk/lib/cryptodev/rte_cryptodev.c -index 2165a0688c..515d0df5ce 100644 +index 2165a0688c..6c11881ab8 100644 --- a/dpdk/lib/cryptodev/rte_cryptodev.c +++ b/dpdk/lib/cryptodev/rte_cryptodev.c -@@ -2692,7 +2692,7 @@ cryptodev_handle_dev_info(const char *cmd __rte_unused, +@@ -1407,6 +1407,10 @@ rte_cryptodev_add_enq_callback(uint8_t dev_id, + rte_cryptodev_callback_fn cb_fn, + void *cb_arg) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ rte_errno = ENOTSUP; ++ return NULL; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb, *tail; +@@ -1472,6 +1476,9 @@ rte_cryptodev_remove_enq_callback(uint8_t dev_id, + uint16_t qp_id, + struct rte_cryptodev_cb *cb) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ return -ENOTSUP; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb **prev_cb, *curr_cb; + struct rte_cryptodev_cb_rcu *list; +@@ -1545,6 +1552,10 @@ rte_cryptodev_add_deq_callback(uint8_t dev_id, + rte_cryptodev_callback_fn cb_fn, + void *cb_arg) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ rte_errno = ENOTSUP; ++ return NULL; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb, *tail; +@@ -1611,6 +1622,9 @@ rte_cryptodev_remove_deq_callback(uint8_t dev_id, + uint16_t qp_id, + struct rte_cryptodev_cb *cb) + { ++#ifndef RTE_CRYPTO_CALLBACKS ++ return -ENOTSUP; ++#endif + struct rte_cryptodev *dev; + struct rte_cryptodev_cb **prev_cb, *curr_cb; + struct rte_cryptodev_cb_rcu *list; +@@ -1969,7 +1983,7 @@ rte_cryptodev_sym_session_create(uint8_t dev_id, + } + + if (xforms == NULL) { +- CDEV_LOG_ERR("Invalid xform\n"); ++ CDEV_LOG_ERR("Invalid xform"); + rte_errno = EINVAL; + return NULL; + } +@@ -2579,7 +2593,7 @@ rte_cryptodev_driver_id_get(const char *name) + int driver_id = -1; + + if (name == NULL) { +- RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); ++ CDEV_LOG_DEBUG("name pointer NULL"); + return -1; + } + +@@ -2692,7 +2706,7 @@ cryptodev_handle_dev_info(const char *cmd __rte_unused, rte_tel_data_start_dict(d); rte_tel_data_add_dict_string(d, "device_name", cryptodev_info.device->name); @@ -60163,10 +78828,19 @@ index 2165a0688c..515d0df5ce 100644 return 0; diff --git a/dpdk/lib/cryptodev/rte_cryptodev.h b/dpdk/lib/cryptodev/rte_cryptodev.h -index 86d792e2e7..0c9464649a 100644 +index 86d792e2e7..cef9f2b3cb 100644 --- a/dpdk/lib/cryptodev/rte_cryptodev.h +++ b/dpdk/lib/cryptodev/rte_cryptodev.h -@@ -501,6 +501,7 @@ extern const char * +@@ -26,8 +26,6 @@ extern "C" { + + #include "rte_cryptodev_trace_fp.h" + +-extern const char **rte_cyptodev_names; +- + /* Logging Macros */ + + #define CDEV_LOG_ERR(...) \ +@@ -501,6 +499,7 @@ extern const char * rte_cryptodev_get_feature_name(uint64_t flag); /** Crypto device information */ @@ -60174,7 +78848,7 @@ index 86d792e2e7..0c9464649a 100644 struct rte_cryptodev_info { const char *driver_name; /**< Driver name. */ uint8_t driver_id; /**< Driver identifier */ -@@ -529,6 +530,7 @@ struct rte_cryptodev_info { +@@ -529,6 +528,7 @@ struct rte_cryptodev_info { */ } sym; }; @@ -60182,7 +78856,7 @@ index 86d792e2e7..0c9464649a 100644 #define RTE_CRYPTODEV_DETACHED (0) #define RTE_CRYPTODEV_ATTACHED (1) -@@ -541,11 +543,13 @@ enum rte_cryptodev_event_type { +@@ -541,11 +541,13 @@ enum rte_cryptodev_event_type { }; /** Crypto device queue pair configuration structure. */ @@ -60196,7 +78870,7 @@ index 86d792e2e7..0c9464649a 100644 /** * Function type used for processing crypto ops when enqueue/dequeue burst is -@@ -674,6 +678,7 @@ extern int +@@ -674,6 +676,7 @@ extern int rte_cryptodev_socket_id(uint8_t dev_id); /** Crypto device configuration structure */ @@ -60204,7 +78878,7 @@ index 86d792e2e7..0c9464649a 100644 struct rte_cryptodev_config { int socket_id; /**< Socket to allocate resources on */ uint16_t nb_queue_pairs; -@@ -686,6 +691,7 @@ struct rte_cryptodev_config { +@@ -686,6 +689,7 @@ struct rte_cryptodev_config { * - RTE_CRYTPODEV_FF_SECURITY */ }; @@ -60212,7 +78886,7 @@ index 86d792e2e7..0c9464649a 100644 /** * Configure a device. -@@ -900,6 +906,15 @@ struct rte_cryptodev_cb_rcu { +@@ -900,6 +904,15 @@ struct rte_cryptodev_cb_rcu { /**< RCU QSBR variable per queue pair */ }; @@ -60228,7 +78902,7 @@ index 86d792e2e7..0c9464649a 100644 void * rte_cryptodev_get_sec_ctx(uint8_t dev_id); -@@ -911,11 +926,14 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); +@@ -911,11 +924,14 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); * @param nb_elts * The number of elements in the mempool. * @param elt_size @@ -60248,7 +78922,7 @@ index 86d792e2e7..0c9464649a 100644 * @param cache_size * The number of per-lcore cache elements * @param priv_size -@@ -926,8 +944,8 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); +@@ -926,8 +942,8 @@ rte_cryptodev_get_sec_ctx(uint8_t dev_id); * constraint for the reserved zone. * * @return @@ -60259,7 +78933,7 @@ index 86d792e2e7..0c9464649a 100644 */ __rte_experimental struct rte_mempool * -@@ -968,11 +986,14 @@ rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, +@@ -968,11 +984,14 @@ rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, * @param dev_id ID of device that we want the session to be used on * @param xforms Symmetric crypto transform operations to apply on flow * processed with this session @@ -60276,6 +78950,75 @@ index 86d792e2e7..0c9464649a 100644 */ void * rte_cryptodev_sym_session_create(uint8_t dev_id, +@@ -1832,7 +1851,7 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, + nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops); + + #ifdef RTE_CRYPTO_CALLBACKS +- if (unlikely(fp_ops->qp.deq_cb != NULL)) { ++ if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) { + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb; + +@@ -1899,7 +1918,7 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, + fp_ops = &rte_crypto_fp_ops[dev_id]; + qp = fp_ops->qp.data[qp_id]; + #ifdef RTE_CRYPTO_CALLBACKS +- if (unlikely(fp_ops->qp.enq_cb != NULL)) { ++ if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) { + struct rte_cryptodev_cb_rcu *list; + struct rte_cryptodev_cb *cb; + +diff --git a/dpdk/lib/dmadev/rte_dmadev.c b/dpdk/lib/dmadev/rte_dmadev.c +index 4da653eec7..d94f85ea9a 100644 +--- a/dpdk/lib/dmadev/rte_dmadev.c ++++ b/dpdk/lib/dmadev/rte_dmadev.c +@@ -157,15 +157,24 @@ static int + dma_dev_data_prepare(void) + { + size_t size; ++ void *ptr; + + if (rte_dma_devices != NULL) + return 0; + +- size = dma_devices_max * sizeof(struct rte_dma_dev); +- rte_dma_devices = malloc(size); +- if (rte_dma_devices == NULL) ++ /* The DMA device object is expected to align cacheline, ++ * but the return value of malloc may not be aligned to the cache line. ++ * Therefore, extra memory is applied for realignment. ++ * Note: posix_memalign/aligned_alloc are not used ++ * because not always available, depending on libc. ++ */ ++ size = dma_devices_max * sizeof(struct rte_dma_dev) + RTE_CACHE_LINE_SIZE; ++ ptr = malloc(size); ++ if (ptr == NULL) + return -ENOMEM; +- memset(rte_dma_devices, 0, size); ++ memset(ptr, 0, size); ++ ++ rte_dma_devices = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE); + + return 0; + } +@@ -710,7 +719,7 @@ rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status * + return -EINVAL; + + if (vchan >= dev->data->dev_conf.nb_vchans) { +- RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan); ++ RTE_DMA_LOG(ERR, "Device %u vchan %u out of range", dev_id, vchan); + return -EINVAL; + } + +@@ -1011,7 +1020,7 @@ dmadev_handle_dev_dump(const char *cmd __rte_unused, + if (*end_param != '\0') + RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring"); + +- buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); ++ buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char)); + if (buf == NULL) + return -ENOMEM; + diff --git a/dpdk/lib/eal/common/eal_common_debug.c b/dpdk/lib/eal/common/eal_common_debug.c index dcb554af1e..9cac9c6390 100644 --- a/dpdk/lib/eal/common/eal_common_debug.c @@ -60339,10 +79082,84 @@ index 52e52e5986..95da55d9b0 100644 for (i = 0; i < RTE_DIM(dummy.num_pages); i++) { diff --git a/dpdk/lib/eal/common/eal_common_fbarray.c b/dpdk/lib/eal/common/eal_common_fbarray.c -index f11f87979f..169e66e04b 100644 +index f11f87979f..4b3b3904c7 100644 --- a/dpdk/lib/eal/common/eal_common_fbarray.c +++ b/dpdk/lib/eal/common/eal_common_fbarray.c -@@ -1482,7 +1482,7 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) +@@ -173,7 +173,7 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + + /* combine current ignore mask with last index ignore mask */ + if (msk_idx == last) +- ignore_msk |= last_msk; ++ ignore_msk &= last_msk; + + /* if we have an ignore mask, ignore once */ + if (ignore_msk) { +@@ -216,6 +216,8 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks; + lookahead_idx++) { + unsigned int s_idx, need; ++ uint64_t first_bit = 1; ++ + lookahead_msk = msk->data[lookahead_idx]; + + /* if we're looking for free space, invert the mask */ +@@ -225,18 +227,24 @@ find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + /* figure out how many consecutive bits we need here */ + need = RTE_MIN(left, MASK_ALIGN); + +- for (s_idx = 0; s_idx < need - 1; s_idx++) ++ /* count number of shifts we performed */ ++ for (s_idx = 0; s_idx < need - 1; s_idx++) { + lookahead_msk &= lookahead_msk >> 1ULL; ++ /* did we lose the run yet? */ ++ if ((lookahead_msk & first_bit) == 0) ++ break; ++ } + + /* if first bit is not set, we've lost the run */ +- if ((lookahead_msk & 1) == 0) { ++ if ((lookahead_msk & first_bit) == 0) { + /* + * we've scanned this far, so we know there are + * no runs in the space we've lookahead-scanned + * as well, so skip that on next iteration. + */ +- ignore_msk = ~((1ULL << need) - 1); +- msk_idx = lookahead_idx; ++ ignore_msk = ~((1ULL << (s_idx + 1)) - 1); ++ /* outer loop will increment msk_idx so add 1 */ ++ msk_idx = lookahead_idx - 1; + break; + } + +@@ -500,8 +508,13 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + /* figure out how many consecutive bits we need here */ + need = RTE_MIN(left, MASK_ALIGN); + +- for (s_idx = 0; s_idx < need - 1; s_idx++) ++ /* count number of shifts we performed */ ++ for (s_idx = 0; s_idx < need - 1; s_idx++) { + lookbehind_msk &= lookbehind_msk << 1ULL; ++ /* did we lose the run yet? */ ++ if ((lookbehind_msk & last_bit) == 0) ++ break; ++ } + + /* if last bit is not set, we've lost the run */ + if ((lookbehind_msk & last_bit) == 0) { +@@ -510,8 +523,9 @@ find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n, + * no runs in the space we've lookbehind-scanned + * as well, so skip that on next iteration. + */ +- ignore_msk = UINT64_MAX << need; +- msk_idx = lookbehind_idx; ++ ignore_msk = ~(UINT64_MAX << (MASK_ALIGN - s_idx - 1)); ++ /* outer loop will decrement msk_idx so add 1 */ ++ msk_idx = lookbehind_idx + 1; + break; + } + +@@ -1482,7 +1496,7 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) if (fully_validate(arr->name, arr->elt_sz, arr->len)) { fprintf(f, "Invalid file-backed array\n"); @@ -60351,7 +79168,7 @@ index f11f87979f..169e66e04b 100644 } /* prevent array from changing under us */ -@@ -1496,6 +1496,5 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) +@@ -1496,6 +1510,5 @@ rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f) for (i = 0; i < msk->n_masks; i++) fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]); @@ -60397,6 +79214,93 @@ index 688dc615d7..da6711d129 100644 snprintf(addr, ADDR_STR, "%p", ms->addr); rte_tel_data_add_dict_string(d, "Hugepage_base", addr); +diff --git a/dpdk/lib/eal/common/eal_common_options.c b/dpdk/lib/eal/common/eal_common_options.c +index 2d6535781b..5312d03552 100644 +--- a/dpdk/lib/eal/common/eal_common_options.c ++++ b/dpdk/lib/eal/common/eal_common_options.c +@@ -225,6 +225,8 @@ eal_save_args(int argc, char **argv) + if (strcmp(argv[i], "--") == 0) + break; + eal_args[i] = strdup(argv[i]); ++ if (eal_args[i] == NULL) ++ goto error; + } + eal_args[i++] = NULL; /* always finish with NULL */ + +@@ -234,13 +236,31 @@ eal_save_args(int argc, char **argv) + + eal_app_args = calloc(argc - i + 1, sizeof(*eal_args)); + if (eal_app_args == NULL) +- return -1; ++ goto error; + +- for (j = 0; i < argc; j++, i++) ++ for (j = 0; i < argc; j++, i++) { + eal_app_args[j] = strdup(argv[i]); ++ if (eal_app_args[j] == NULL) ++ goto error; ++ } + eal_app_args[j] = NULL; + + return 0; ++ ++error: ++ if (eal_app_args != NULL) { ++ i = 0; ++ while (eal_app_args[i] != NULL) ++ free(eal_app_args[i++]); ++ free(eal_app_args); ++ eal_app_args = NULL; ++ } ++ i = 0; ++ while (eal_args[i] != NULL) ++ free(eal_args[i++]); ++ free(eal_args); ++ eal_args = NULL; ++ return -1; + } + #endif + +@@ -1666,7 +1686,7 @@ eal_parse_common_option(int opt, const char *optarg, + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : +- (core_parsed == LCORE_OPT_MAP) ? "--lcore" : ++ (core_parsed == LCORE_OPT_MAP) ? "--lcores" : + "-c"); + return -1; + } +@@ -1699,7 +1719,7 @@ eal_parse_common_option(int opt, const char *optarg, + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_MSK) ? "-c" : +- (core_parsed == LCORE_OPT_MAP) ? "--lcore" : ++ (core_parsed == LCORE_OPT_MAP) ? "--lcores" : + "-l"); + return -1; + } +@@ -1880,10 +1900,10 @@ eal_parse_common_option(int opt, const char *optarg, + } + + if (core_parsed) { +- RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n", ++ RTE_LOG(ERR, EAL, "Option --lcores is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : + (core_parsed == LCORE_OPT_MSK) ? "-c" : +- "--lcore"); ++ "--lcores"); + return -1; + } + +@@ -2142,7 +2162,7 @@ rte_vect_set_max_simd_bitwidth(uint16_t bitwidth) + struct internal_config *internal_conf = + eal_get_internal_configuration(); + if (internal_conf->max_simd_bitwidth.forced) { +- RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled"); ++ RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled\n"); + return -EPERM; + } + diff --git a/dpdk/lib/eal/common/eal_common_proc.c b/dpdk/lib/eal/common/eal_common_proc.c index 1fc1d6c53b..9676dd73c5 100644 --- a/dpdk/lib/eal/common/eal_common_proc.c @@ -60437,6 +79341,20 @@ index 1fc1d6c53b..9676dd73c5 100644 } else if (action(msg, s->sun_path) < 0) { RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name); } +diff --git a/dpdk/lib/eal/common/malloc_mp.c b/dpdk/lib/eal/common/malloc_mp.c +index 7270c2ec90..c95c4635d1 100644 +--- a/dpdk/lib/eal/common/malloc_mp.c ++++ b/dpdk/lib/eal/common/malloc_mp.c +@@ -755,7 +755,8 @@ request_to_primary(struct malloc_mp_req *user_req) + do { + ret = pthread_cond_timedwait(&entry->cond, + &mp_request_list.lock, &ts); +- } while (ret != 0 && ret != ETIMEDOUT); ++ } while ((ret != 0 && ret != ETIMEDOUT) && ++ entry->state == REQ_STATE_ACTIVE); + + if (entry->state != REQ_STATE_COMPLETE) { + RTE_LOG(ERR, EAL, "Request timed out\n"); diff --git a/dpdk/lib/eal/common/rte_malloc.c b/dpdk/lib/eal/common/rte_malloc.c index 48db264449..dbd8eae5b0 100644 --- a/dpdk/lib/eal/common/rte_malloc.c @@ -60677,8 +79595,21 @@ index 8c118d0d9f..336698379f 100644 eal_cleanup_config(internal_conf); rte_eal_log_cleanup(); return 0; +diff --git a/dpdk/lib/eal/linux/eal_dev.c b/dpdk/lib/eal/linux/eal_dev.c +index ac76f6174d..c0ba98852b 100644 +--- a/dpdk/lib/eal/linux/eal_dev.c ++++ b/dpdk/lib/eal/linux/eal_dev.c +@@ -182,6 +182,8 @@ dev_uev_parse(const char *buf, struct rte_dev_event *event, int length) + i += 14; + strlcpy(pci_slot_name, buf, sizeof(subsystem)); + event->devname = strdup(pci_slot_name); ++ if (event->devname == NULL) ++ return -1; + } + for (; i < length; i++) { + if (*buf == '\0') diff --git a/dpdk/lib/eal/linux/eal_hugepage_info.c b/dpdk/lib/eal/linux/eal_hugepage_info.c -index a1b6cb31ff..581d9dfc91 100644 +index a1b6cb31ff..36a495fb1f 100644 --- a/dpdk/lib/eal/linux/eal_hugepage_info.c +++ b/dpdk/lib/eal/linux/eal_hugepage_info.c @@ -50,7 +50,7 @@ map_shared_memory(const char *filename, const size_t mem_size, int flags) @@ -60736,10 +79667,41 @@ index a1b6cb31ff..581d9dfc91 100644 strlcpy(found, splitstr[MOUNTPT], len); } /* end while fgets */ +@@ -396,7 +403,7 @@ inspect_hugedir_cb(const struct walk_hugedir_data *whd) + struct stat st; + + if (fstat(whd->file_fd, &st) < 0) +- RTE_LOG(DEBUG, EAL, "%s(): stat(\"%s\") failed: %s", ++ RTE_LOG(DEBUG, EAL, "%s(): stat(\"%s\") failed: %s\n", + __func__, whd->file_name, strerror(errno)); + else + (*total_size) += st.st_size; +diff --git a/dpdk/lib/eal/linux/eal_interrupts.c b/dpdk/lib/eal/linux/eal_interrupts.c +index d52ec8eb4c..0b25dffe3b 100644 +--- a/dpdk/lib/eal/linux/eal_interrupts.c ++++ b/dpdk/lib/eal/linux/eal_interrupts.c +@@ -1542,7 +1542,7 @@ rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd) + /* only check, initialization would be done in vdev driver.*/ + if ((uint64_t)rte_intr_efd_counter_size_get(intr_handle) > + sizeof(union rte_intr_read_buffer)) { +- RTE_LOG(ERR, EAL, "the efd_counter_size is oversized"); ++ RTE_LOG(ERR, EAL, "the efd_counter_size is oversized\n"); + return -EINVAL; + } + } else { diff --git a/dpdk/lib/eal/linux/eal_memalloc.c b/dpdk/lib/eal/linux/eal_memalloc.c -index f8b1588cae..9853ec78a2 100644 +index f8b1588cae..b9fc83fe6a 100644 --- a/dpdk/lib/eal/linux/eal_memalloc.c +++ b/dpdk/lib/eal/linux/eal_memalloc.c +@@ -1061,7 +1061,7 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz, + /* memalloc is locked, so it's safe to use thread-unsafe version */ + ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa); + if (ret == 0) { +- RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n", ++ RTE_LOG(DEBUG, EAL, "%s(): couldn't find suitable memseg_list\n", + __func__); + ret = -1; + } else if (ret > 0) { @@ -1740,7 +1740,10 @@ eal_memalloc_init(void) eal_get_internal_configuration(); @@ -61091,7 +80053,7 @@ index adb929a014..56fadc7afe 100644 } diff --git a/dpdk/lib/eal/windows/eal_memory.c b/dpdk/lib/eal/windows/eal_memory.c -index 215d768e2c..31410a41fd 100644 +index 215d768e2c..fd39155163 100644 --- a/dpdk/lib/eal/windows/eal_memory.c +++ b/dpdk/lib/eal/windows/eal_memory.c @@ -72,10 +72,18 @@ static VirtualAlloc2_type VirtualAlloc2_ptr; @@ -61113,6 +80075,25 @@ index 215d768e2c..31410a41fd 100644 int eal_mem_win32api_init(void) +@@ -102,7 +110,7 @@ eal_mem_win32api_init(void) + VirtualAlloc2_ptr = (VirtualAlloc2_type)( + (void *)GetProcAddress(library, function)); + if (VirtualAlloc2_ptr == NULL) { +- RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n", ++ RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")", + library_name, function); + + /* Contrary to the docs, Server 2016 is not supported. */ +diff --git a/dpdk/lib/eal/windows/include/meson.build b/dpdk/lib/eal/windows/include/meson.build +index 5fb1962ac7..e985a77d58 100644 +--- a/dpdk/lib/eal/windows/include/meson.build ++++ b/dpdk/lib/eal/windows/include/meson.build +@@ -6,4 +6,5 @@ includes += include_directories('.') + headers += files( + 'rte_os.h', + 'rte_windows.h', ++ 'sched.h', + ) diff --git a/dpdk/lib/eal/windows/include/pthread.h b/dpdk/lib/eal/windows/include/pthread.h index 27fd2cca52..f7cf0e9ddf 100644 --- a/dpdk/lib/eal/windows/include/pthread.h @@ -61215,6 +80196,93 @@ index d4d7a5cfc8..fd151be708 100644 rte_mov64((uint8_t *)dst, (const uint8_t *)src); dst = (uint8_t *)dst + 64; src = (const uint8_t *)src + 64; +diff --git a/dpdk/lib/eal/x86/rte_cycles.c b/dpdk/lib/eal/x86/rte_cycles.c +index 0e695caf28..2a601d7035 100644 +--- a/dpdk/lib/eal/x86/rte_cycles.c ++++ b/dpdk/lib/eal/x86/rte_cycles.c +@@ -6,6 +6,10 @@ + #include + #include + ++#define x86_vendor_amd(t1, t2, t3) \ ++ ((t1 == 0x68747541) && /* htuA */ \ ++ (t2 == 0x444d4163) && /* DMAc */ \ ++ (t3 == 0x69746e65)) /* itne */ + + #include "eal_private.h" + +@@ -90,6 +94,18 @@ get_tsc_freq_arch(void) + uint8_t mult, model; + int32_t ret; + ++#ifdef RTE_TOOLCHAIN_MSVC ++ __cpuid(cpuinfo, 0); ++ a = cpuinfo[0]; ++ b = cpuinfo[1]; ++ c = cpuinfo[2]; ++ d = cpuinfo[3]; ++#else ++ __cpuid(0, a, b, c, d); ++#endif ++ if (x86_vendor_amd(b, c, d)) ++ return 0; ++ + /* + * Time Stamp Counter and Nominal Core Crystal Clock + * Information Leaf +diff --git a/dpdk/lib/ethdev/ethdev_driver.c b/dpdk/lib/ethdev/ethdev_driver.c +index 0be1e8ca04..867c5de3bb 100644 +--- a/dpdk/lib/ethdev/ethdev_driver.c ++++ b/dpdk/lib/ethdev/ethdev_driver.c +@@ -275,15 +275,25 @@ rte_eth_dev_create(struct rte_device *device, const char *name, + return -ENODEV; + + if (priv_data_size) { ++ /* try alloc private data on device-local node. */ + ethdev->data->dev_private = rte_zmalloc_socket( + name, priv_data_size, RTE_CACHE_LINE_SIZE, + device->numa_node); + +- if (!ethdev->data->dev_private) { +- RTE_ETHDEV_LOG(ERR, +- "failed to allocate private data\n"); +- retval = -ENOMEM; +- goto probe_failed; ++ /* fall back to alloc on any socket on failure */ ++ if (ethdev->data->dev_private == NULL) { ++ ethdev->data->dev_private = rte_zmalloc(name, ++ priv_data_size, RTE_CACHE_LINE_SIZE); ++ ++ if (ethdev->data->dev_private == NULL) { ++ RTE_ETHDEV_LOG(ERR, "failed to allocate private data\n"); ++ retval = -ENOMEM; ++ goto probe_failed; ++ } ++ /* got memory, but not local, so issue warning */ ++ RTE_ETHDEV_LOG(WARNING, ++ "Private data for ethdev '%s' not allocated on local NUMA node %d\n", ++ device->name, device->numa_node); + } + } + } else { +@@ -465,7 +475,7 @@ rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) + pair = &args.pairs[i]; + if (strcmp("representor", pair->key) == 0) { + if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { +- RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", ++ RTE_ETHDEV_LOG(ERR, "duplicated representor key: %s\n", + dargs); + result = -1; + goto parse_cleanup; +@@ -691,7 +701,7 @@ rte_eth_representor_id_get(uint16_t port_id, + if (info->ranges[i].controller != controller) + continue; + if (info->ranges[i].id_end < info->ranges[i].id_base) { +- RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", ++ RTE_ETHDEV_LOG(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d\n", + port_id, info->ranges[i].id_base, + info->ranges[i].id_end, i); + continue; diff --git a/dpdk/lib/ethdev/ethdev_driver.h b/dpdk/lib/ethdev/ethdev_driver.h index 6a550cfc83..cd2cd89649 100644 --- a/dpdk/lib/ethdev/ethdev_driver.h @@ -61233,10 +80301,49 @@ index 6a550cfc83..cd2cd89649 100644 /** Bitmap associating MAC addresses to pools */ uint64_t mac_pool_sel[RTE_ETH_NUM_RECEIVE_MAC_ADDR]; diff --git a/dpdk/lib/ethdev/ethdev_pci.h b/dpdk/lib/ethdev/ethdev_pci.h -index 94b8fba5d7..320e3e0093 100644 +index 94b8fba5d7..fbe74fdefe 100644 --- a/dpdk/lib/ethdev/ethdev_pci.h +++ b/dpdk/lib/ethdev/ethdev_pci.h -@@ -126,12 +126,13 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev, +@@ -31,7 +31,7 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, + struct rte_pci_device *pci_dev) + { + if ((eth_dev == NULL) || (pci_dev == NULL)) { +- RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p", ++ RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p\n", + (void *)eth_dev, (void *)pci_dev); + return; + } +@@ -93,12 +93,26 @@ rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size) + return NULL; + + if (private_data_size) { ++ /* Try and alloc the private-data structure on socket local to the device */ + eth_dev->data->dev_private = rte_zmalloc_socket(name, + private_data_size, RTE_CACHE_LINE_SIZE, + dev->device.numa_node); +- if (!eth_dev->data->dev_private) { +- rte_eth_dev_release_port(eth_dev); +- return NULL; ++ ++ /* if cannot allocate memory on the socket local to the device ++ * use rte_malloc to allocate memory on some other socket, if available. ++ */ ++ if (eth_dev->data->dev_private == NULL) { ++ eth_dev->data->dev_private = rte_zmalloc(name, ++ private_data_size, RTE_CACHE_LINE_SIZE); ++ ++ if (eth_dev->data->dev_private == NULL) { ++ rte_eth_dev_release_port(eth_dev); ++ return NULL; ++ } ++ /* got memory, but not local, so issue warning */ ++ RTE_ETHDEV_LOG(WARNING, ++ "Private data for ethdev '%s' not allocated on local NUMA node %d\n", ++ dev->device.name, dev->device.numa_node); + } + } + } else { +@@ -126,12 +140,13 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *eth_dev; int ret; @@ -61252,8 +80359,21 @@ index 94b8fba5d7..320e3e0093 100644 ret = dev_init(eth_dev); if (ret) rte_eth_dev_release_port(eth_dev); +diff --git a/dpdk/lib/ethdev/ethdev_private.c b/dpdk/lib/ethdev/ethdev_private.c +index 48090c879a..17f7933c99 100644 +--- a/dpdk/lib/ethdev/ethdev_private.c ++++ b/dpdk/lib/ethdev/ethdev_private.c +@@ -184,7 +184,7 @@ rte_eth_devargs_parse_representor_ports(char *str, void *data) + RTE_DIM(eth_da->representor_ports)); + done: + if (str == NULL) +- RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str); ++ RTE_ETHDEV_LOG(ERR, "wrong representor format: %s\n", str); + return str == NULL ? -1 : 0; + } + diff --git a/dpdk/lib/ethdev/rte_class_eth.c b/dpdk/lib/ethdev/rte_class_eth.c -index 838b3a8f9f..b61dae849d 100644 +index 838b3a8f9f..311beb17cb 100644 --- a/dpdk/lib/ethdev/rte_class_eth.c +++ b/dpdk/lib/ethdev/rte_class_eth.c @@ -67,7 +67,7 @@ eth_representor_cmp(const char *key __rte_unused, @@ -61265,10 +80385,28 @@ index 838b3a8f9f..b61dae849d 100644 if ((data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) return -1; /* not a representor port */ +@@ -165,7 +165,7 @@ eth_dev_iterate(const void *start, + valid_keys = eth_params_keys; + kvargs = rte_kvargs_parse(str, valid_keys); + if (kvargs == NULL) { +- RTE_LOG(ERR, EAL, "cannot parse argument list\n"); ++ RTE_ETHDEV_LOG(ERR, "cannot parse argument list\n"); + rte_errno = EINVAL; + return NULL; + } diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c -index 5d5e18db1e..4f50e2fa80 100644 +index 5d5e18db1e..e1f18fd8a4 100644 --- a/dpdk/lib/ethdev/rte_ethdev.c +++ b/dpdk/lib/ethdev/rte_ethdev.c +@@ -631,7 +631,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) + uint16_t pid; + + if (name == NULL) { +- RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); ++ RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name\n"); + return -EINVAL; + } + @@ -1192,7 +1192,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, } @@ -61280,6 +80418,122 @@ index 5d5e18db1e..4f50e2fa80 100644 ret = eth_dev_validate_mtu(port_id, &dev_info, dev->data->dev_conf.rxmode.mtu); +@@ -2095,41 +2097,41 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, + nb_rx_desc = cap.max_nb_desc; + if (nb_rx_desc > cap.max_nb_desc) { + RTE_ETHDEV_LOG(ERR, +- "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", ++ "Invalid value for nb_rx_desc(=%hu), should be: <= %hu\n", + nb_rx_desc, cap.max_nb_desc); + return -EINVAL; + } + if (conf->peer_count > cap.max_rx_2_tx) { + RTE_ETHDEV_LOG(ERR, +- "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", ++ "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu\n", + conf->peer_count, cap.max_rx_2_tx); + return -EINVAL; + } + if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to use locked device memory for Rx queue, which is not supported"); ++ "Attempt to use locked device memory for Rx queue, which is not supported\n"); + return -EINVAL; + } + if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to use DPDK memory for Rx queue, which is not supported"); ++ "Attempt to use DPDK memory for Rx queue, which is not supported\n"); + return -EINVAL; + } + if (conf->use_locked_device_memory && conf->use_rte_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to use mutually exclusive memory settings for Rx queue"); ++ "Attempt to use mutually exclusive memory settings for Rx queue\n"); + return -EINVAL; + } + if (conf->force_memory && + !conf->use_locked_device_memory && + !conf->use_rte_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to force Rx queue memory settings, but none is set"); ++ "Attempt to force Rx queue memory settings, but none is set\n"); + return -EINVAL; + } + if (conf->peer_count == 0) { + RTE_ETHDEV_LOG(ERR, +- "Invalid value for number of peers for Rx queue(=%u), should be: > 0", ++ "Invalid value for number of peers for Rx queue(=%u), should be: > 0\n", + conf->peer_count); + return -EINVAL; + } +@@ -2139,7 +2141,7 @@ rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, + count++; + } + if (count > cap.max_nb_queues) { +- RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", ++ RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d\n", + cap.max_nb_queues); + return -EINVAL; + } +@@ -2284,41 +2286,41 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, + nb_tx_desc = cap.max_nb_desc; + if (nb_tx_desc > cap.max_nb_desc) { + RTE_ETHDEV_LOG(ERR, +- "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", ++ "Invalid value for nb_tx_desc(=%hu), should be: <= %hu\n", + nb_tx_desc, cap.max_nb_desc); + return -EINVAL; + } + if (conf->peer_count > cap.max_tx_2_rx) { + RTE_ETHDEV_LOG(ERR, +- "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", ++ "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu\n", + conf->peer_count, cap.max_tx_2_rx); + return -EINVAL; + } + if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to use locked device memory for Tx queue, which is not supported"); ++ "Attempt to use locked device memory for Tx queue, which is not supported\n"); + return -EINVAL; + } + if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to use DPDK memory for Tx queue, which is not supported"); ++ "Attempt to use DPDK memory for Tx queue, which is not supported\n"); + return -EINVAL; + } + if (conf->use_locked_device_memory && conf->use_rte_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to use mutually exclusive memory settings for Tx queue"); ++ "Attempt to use mutually exclusive memory settings for Tx queue\n"); + return -EINVAL; + } + if (conf->force_memory && + !conf->use_locked_device_memory && + !conf->use_rte_memory) { + RTE_ETHDEV_LOG(ERR, +- "Attempt to force Tx queue memory settings, but none is set"); ++ "Attempt to force Tx queue memory settings, but none is set\n"); + return -EINVAL; + } + if (conf->peer_count == 0) { + RTE_ETHDEV_LOG(ERR, +- "Invalid value for number of peers for Tx queue(=%u), should be: > 0", ++ "Invalid value for number of peers for Tx queue(=%u), should be: > 0\n", + conf->peer_count); + return -EINVAL; + } +@@ -2328,7 +2330,7 @@ rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, + count++; + } + if (count > cap.max_nb_queues) { +- RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", ++ RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d\n", + cap.max_nb_queues); + return -EINVAL; + } @@ -4362,6 +4364,11 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -61354,6 +80608,33 @@ index 5d5e18db1e..4f50e2fa80 100644 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); return 0; +@@ -6137,7 +6152,7 @@ rte_eth_ip_reassembly_capability_get(uint16_t port_id, + } + + if (reassembly_capa == NULL) { +- RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); ++ RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL\n"); + return -EINVAL; + } + +@@ -6167,7 +6182,7 @@ rte_eth_ip_reassembly_conf_get(uint16_t port_id, + } + + if (conf == NULL) { +- RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); ++ RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL\n"); + return -EINVAL; + } + +@@ -6190,7 +6205,7 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id, + if (dev->data->dev_configured == 0) { + RTE_ETHDEV_LOG(ERR, + "Device with port_id=%u is not configured.\n" +- "Cannot set IP reassembly configuration", ++ "Cannot set IP reassembly configuration\n", + port_id); + return -EINVAL; + } diff --git a/dpdk/lib/ethdev/rte_ethdev.h b/dpdk/lib/ethdev/rte_ethdev.h index c129ca1eaf..e73244822a 100644 --- a/dpdk/lib/ethdev/rte_ethdev.h @@ -61416,10 +80697,76 @@ index c129ca1eaf..e73244822a 100644 int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr); diff --git a/dpdk/lib/ethdev/rte_flow.c b/dpdk/lib/ethdev/rte_flow.c -index 7d0c24366c..ae22755ee6 100644 +index 7d0c24366c..aa43b8034d 100644 --- a/dpdk/lib/ethdev/rte_flow.c +++ b/dpdk/lib/ethdev/rte_flow.c -@@ -654,7 +654,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, +@@ -206,7 +206,7 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = { + sizeof(struct rte_flow_action_of_push_mpls)), + MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), + MK_FLOW_ACTION(VXLAN_DECAP, 0), +- MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), ++ MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)), + MK_FLOW_ACTION(NVGRE_DECAP, 0), + MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), + MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), +@@ -547,6 +547,7 @@ rte_flow_conv_item_spec(void *buf, const size_t size, + switch (item->type) { + union { + const struct rte_flow_item_raw *raw; ++ const struct rte_flow_item_geneve_opt *geneve_opt; + } spec; + union { + const struct rte_flow_item_raw *raw; +@@ -556,10 +557,13 @@ rte_flow_conv_item_spec(void *buf, const size_t size, + } mask; + union { + const struct rte_flow_item_raw *raw; ++ const struct rte_flow_item_geneve_opt *geneve_opt; + } src; + union { + struct rte_flow_item_raw *raw; ++ struct rte_flow_item_geneve_opt *geneve_opt; + } dst; ++ void *deep_src; + size_t tmp; + + case RTE_FLOW_ITEM_TYPE_RAW: +@@ -588,13 +592,30 @@ rte_flow_conv_item_spec(void *buf, const size_t size, + tmp = last.raw->length & mask.raw->length; + if (tmp) { + off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); +- if (size >= off + tmp) +- dst.raw->pattern = rte_memcpy +- ((void *)((uintptr_t)dst.raw + off), +- src.raw->pattern, tmp); ++ if (size >= off + tmp) { ++ deep_src = (void *)((uintptr_t)dst.raw + off); ++ dst.raw->pattern = rte_memcpy(deep_src, ++ src.raw->pattern, ++ tmp); ++ } + off += tmp; + } + break; ++ case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: ++ off = rte_flow_conv_copy(buf, data, size, ++ rte_flow_desc_item, item->type); ++ spec.geneve_opt = item->spec; ++ src.geneve_opt = data; ++ dst.geneve_opt = buf; ++ tmp = spec.geneve_opt->option_len << 2; ++ if (size > 0 && src.geneve_opt->data) { ++ deep_src = (void *)((uintptr_t)(dst.geneve_opt + 1)); ++ dst.geneve_opt->data = rte_memcpy(deep_src, ++ src.geneve_opt->data, ++ tmp); ++ } ++ off += tmp; ++ break; + default: + off = rte_flow_conv_copy(buf, data, size, + rte_flow_desc_item, item->type); +@@ -654,7 +675,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, if (src.rss->key_len && src.rss->key) { off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); tmp = sizeof(*src.rss->key) * src.rss->key_len; @@ -61428,7 +80775,7 @@ index 7d0c24366c..ae22755ee6 100644 dst.rss->key = rte_memcpy ((void *)((uintptr_t)dst.rss + off), src.rss->key, tmp); -@@ -663,7 +663,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, +@@ -663,7 +684,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, if (src.rss->queue_num) { off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); tmp = sizeof(*src.rss->queue) * src.rss->queue_num; @@ -61437,7 +80784,7 @@ index 7d0c24366c..ae22755ee6 100644 dst.rss->queue = rte_memcpy ((void *)((uintptr_t)dst.rss + off), src.rss->queue, tmp); -@@ -855,7 +855,15 @@ rte_flow_conv_actions(struct rte_flow_action *dst, +@@ -855,7 +876,15 @@ rte_flow_conv_actions(struct rte_flow_action *dst, src -= num; dst -= num; do { @@ -61454,7 +80801,7 @@ index 7d0c24366c..ae22755ee6 100644 off = RTE_ALIGN_CEIL(off, sizeof(double)); ret = rte_flow_conv_action_conf ((void *)(data + off), -@@ -1879,6 +1887,8 @@ rte_flow_async_action_handle_query(uint16_t port_id, +@@ -1879,6 +1908,8 @@ rte_flow_async_action_handle_query(uint16_t port_id, const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); int ret; @@ -61463,6 +80810,49 @@ index 7d0c24366c..ae22755ee6 100644 ret = ops->async_action_handle_query(dev, queue_id, op_attr, action_handle, data, user_data, error); return flow_err(port_id, ret, error); +diff --git a/dpdk/lib/ethdev/rte_flow.h b/dpdk/lib/ethdev/rte_flow.h +index b60987db4b..706ffba596 100644 +--- a/dpdk/lib/ethdev/rte_flow.h ++++ b/dpdk/lib/ethdev/rte_flow.h +@@ -3138,7 +3138,7 @@ struct rte_flow_action_vxlan_encap { + */ + struct rte_flow_action_nvgre_encap { + /** +- * Encapsulating vxlan tunnel definition ++ * Encapsulating nvgre tunnel definition + * (terminated by the END pattern item). + */ + struct rte_flow_item *definition; +diff --git a/dpdk/lib/eventdev/eventdev_pmd.h b/dpdk/lib/eventdev/eventdev_pmd.h +index aebab26852..0d8e039fa4 100644 +--- a/dpdk/lib/eventdev/eventdev_pmd.h ++++ b/dpdk/lib/eventdev/eventdev_pmd.h +@@ -49,14 +49,14 @@ extern "C" { + /* Macros to check for valid device */ + #define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \ + if (!rte_event_pmd_is_valid_dev((dev_id))) { \ +- RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \ ++ RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \ + return retval; \ + } \ + } while (0) + + #define RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, errno, retval) do { \ + if (!rte_event_pmd_is_valid_dev((dev_id))) { \ +- RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \ ++ RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \ + rte_errno = errno; \ + return retval; \ + } \ +@@ -64,7 +64,7 @@ extern "C" { + + #define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \ + if (!rte_event_pmd_is_valid_dev((dev_id))) { \ +- RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \ ++ RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \ + return; \ + } \ + } while (0) diff --git a/dpdk/lib/eventdev/eventdev_pmd_vdev.h b/dpdk/lib/eventdev/eventdev_pmd_vdev.h index 5fa9d699ac..bb433ba955 100644 --- a/dpdk/lib/eventdev/eventdev_pmd_vdev.h @@ -61485,12 +80875,55 @@ index 5fa9d699ac..bb433ba955 100644 return eventdev; } diff --git a/dpdk/lib/eventdev/rte_event_crypto_adapter.c b/dpdk/lib/eventdev/rte_event_crypto_adapter.c -index 3c585d7b0d..ea50e405a8 100644 +index 3c585d7b0d..258be0f339 100644 --- a/dpdk/lib/eventdev/rte_event_crypto_adapter.c +++ b/dpdk/lib/eventdev/rte_event_crypto_adapter.c -@@ -240,9 +240,18 @@ eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, +@@ -126,11 +126,33 @@ static struct event_crypto_adapter **event_crypto_adapter; + /* Macros to check for valid adapter */ + #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ + if (!eca_valid_id(id)) { \ +- RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \ ++ RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d", id); \ + return retval; \ + } \ + } while (0) + ++#define ECA_DYNFIELD_NAME "eca_ev_opaque_data" ++/* Device-specific metadata field type */ ++typedef uint8_t eca_dynfield_t; ++ ++/* mbuf dynamic field offset for device-specific metadata */ ++int eca_dynfield_offset = -1; ++ ++static int ++eca_dynfield_register(void) ++{ ++ static const struct rte_mbuf_dynfield eca_dynfield_desc = { ++ .name = ECA_DYNFIELD_NAME, ++ .size = sizeof(eca_dynfield_t), ++ .align = __alignof__(eca_dynfield_t), ++ .flags = 0, ++ }; ++ ++ eca_dynfield_offset = ++ rte_mbuf_dynfield_register(&eca_dynfield_desc); ++ return eca_dynfield_offset; ++} ++ + static inline int + eca_valid_id(uint8_t id) + { +@@ -237,12 +259,29 @@ eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, + struct rte_crypto_op **ops = bufp->op_buffer; + + if (*tailp > *headp) ++ /* Flush ops from head pointer to (tail - head) OPs */ n = *tailp - *headp; else if (*tailp < *headp) ++ /* Circ buffer - Rollover. ++ * Flush OPs from head to max size of buffer. ++ * Rest of the OPs will be flushed in next iteration. ++ */ n = bufp->size - *headp; - else { - *nb_ops_flushed = 0; @@ -61500,17 +80933,73 @@ index 3c585d7b0d..ea50e405a8 100644 + * circ buff is either full(tail pointer roll over) or empty + */ + if (bufp->count != 0) { -+ /* circ buffer is full */ -+ n = bufp->count; ++ /* Circ buffer - FULL. ++ * Flush OPs from head to max size of buffer. ++ * Rest of the OPS will be flushed in next iteration. ++ */ ++ n = bufp->size - *headp; + } else { -+ /* circ buffer is empty */ ++ /* Circ buffer - Empty */ + *nb_ops_flushed = 0; -+ return 0; /* buffer empty */ ++ return 0; + } } *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id, -@@ -497,6 +506,9 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, +@@ -289,7 +328,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id, + dev_conf.nb_event_ports += 1; + ret = rte_event_dev_configure(dev_id, &dev_conf); + if (ret) { +- RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id); ++ RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id); + if (started) { + if (rte_event_dev_start(dev_id)) + return -EIO; +@@ -299,7 +338,7 @@ eca_default_config_cb(uint8_t id, uint8_t dev_id, + + ret = rte_event_port_setup(dev_id, port_id, port_conf); + if (ret) { +- RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id); ++ RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id); + return ret; + } + +@@ -383,7 +422,7 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id, + sizeof(struct crypto_device_info), 0, + socket_id); + if (adapter->cdevs == NULL) { +- RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n"); ++ RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices"); + eca_circular_buffer_free(&adapter->ebuf); + rte_free(adapter); + return -ENOMEM; +@@ -475,6 +514,25 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, + crypto_op = ev[i].event_ptr; + if (crypto_op == NULL) + continue; ++ ++ /** "struct rte_event::impl_opaque" field passed on from ++ * eventdev PMD could have different value per event. ++ * For session-based crypto operations retain ++ * "struct rte_event::impl_opaque" into mbuf dynamic field and ++ * restore it back after copying event information from ++ * session event metadata. ++ * For session-less, each crypto operation carries event ++ * metadata and retains "struct rte_event:impl_opaque" ++ * information to be passed back to eventdev PMD. ++ */ ++ if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { ++ struct rte_mbuf *mbuf = crypto_op->sym->m_src; ++ ++ *RTE_MBUF_DYNFIELD(mbuf, ++ eca_dynfield_offset, ++ eca_dynfield_t *) = ev[i].impl_opaque; ++ } ++ + m_data = rte_cryptodev_session_event_mdata_get(crypto_op); + if (m_data == NULL) { + rte_pktmbuf_free(crypto_op->sym->m_src); +@@ -497,6 +555,9 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, cdev_id, qp_id, &nb_enqueued); @@ -61520,7 +81009,7 @@ index 3c585d7b0d..ea50e405a8 100644 /** * If some crypto ops failed to flush to cdev and * space for another batch is not available, stop -@@ -507,9 +519,6 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, +@@ -507,9 +568,6 @@ eca_enq_to_cryptodev(struct event_crypto_adapter *adapter, struct rte_event *ev, &qp_info->cbuf))) adapter->stop_enq_to_cryptodev = true; } @@ -61530,7 +81019,7 @@ index 3c585d7b0d..ea50e405a8 100644 } return n; -@@ -585,14 +594,15 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, +@@ -585,14 +643,15 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) return 0; @@ -61552,7 +81041,7 @@ index 3c585d7b0d..ea50e405a8 100644 stats->event_poll_count++; n = rte_event_dequeue_burst(event_dev_id, event_port_id, ev, BATCH_SIZE, 0); -@@ -603,8 +613,6 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, +@@ -603,8 +662,6 @@ eca_crypto_adapter_enq_run(struct event_crypto_adapter *adapter, nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n); } @@ -61561,7 +81050,29 @@ index 3c585d7b0d..ea50e405a8 100644 if ((++adapter->transmit_loop_count & (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) { nb_enqueued += eca_crypto_enq_flush(adapter); -@@ -681,7 +689,7 @@ eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter, +@@ -642,6 +699,21 @@ eca_ops_enqueue_burst(struct event_crypto_adapter *adapter, + + rte_memcpy(ev, &m_data->response_info, sizeof(*ev)); + ev->event_ptr = ops[i]; ++ ++ /** Restore "struct rte_event::impl_opaque" from mbuf ++ * dynamic field for session based crypto operation. ++ * For session-less, each crypto operations carries event ++ * metadata and retains "struct rte_event::impl_opaque" ++ * information to be passed back to eventdev PMD. ++ */ ++ if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { ++ struct rte_mbuf *mbuf = ops[i]->sym->m_src; ++ ++ ev->impl_opaque = *RTE_MBUF_DYNFIELD(mbuf, ++ eca_dynfield_offset, ++ eca_dynfield_t *); ++ } ++ + ev->event_type = RTE_EVENT_TYPE_CRYPTODEV; + if (adapter->implicit_release_disabled) + ev->op = RTE_EVENT_OP_FORWARD; +@@ -681,7 +753,7 @@ eca_circular_buffer_flush_to_evdev(struct event_crypto_adapter *adapter, else return 0; /* buffer empty */ @@ -61570,7 +81081,7 @@ index 3c585d7b0d..ea50e405a8 100644 bufp->count -= nb_ops_flushed; if (!bufp->count) { *headp = 0; -@@ -766,7 +774,7 @@ eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter, +@@ -766,7 +838,7 @@ eca_crypto_adapter_deq_run(struct event_crypto_adapter *adapter, for (i = nb_enqueued; i < n; i++) eca_circular_buffer_add( &adapter->ebuf, @@ -61579,6 +81090,25 @@ index 3c585d7b0d..ea50e405a8 100644 check: nb_deq += n; +@@ -866,6 +938,18 @@ eca_init_service(struct event_crypto_adapter *adapter, uint8_t id) + + adapter->max_nb = adapter_conf.max_nb; + adapter->event_port_id = adapter_conf.event_port_id; ++ ++ /** Register for mbuf dyn field to store/restore ++ * "struct rte_event::impl_opaque" ++ */ ++ eca_dynfield_offset = eca_dynfield_register(); ++ if (eca_dynfield_offset < 0) { ++ RTE_EDEV_LOG_ERR("Failed to register eca mbuf dyn field"); ++ eca_circular_buffer_free(&adapter->ebuf); ++ rte_free(adapter); ++ return -EINVAL; ++ } ++ + adapter->service_inited = 1; + + return ret; diff --git a/dpdk/lib/eventdev/rte_event_crypto_adapter.h b/dpdk/lib/eventdev/rte_event_crypto_adapter.h index 83d154a6ce..2a69290097 100644 --- a/dpdk/lib/eventdev/rte_event_crypto_adapter.h @@ -61602,9 +81132,87 @@ index 83d154a6ce..2a69290097 100644 uint8_t dev_id, uint16_t cdev_id, struct rte_event_crypto_adapter_vector_limits *limits); diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c -index cf7bbd4d69..6636128378 100644 +index cf7bbd4d69..b4f05f250c 100644 --- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +@@ -290,14 +290,14 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, + + #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ + if (!rxa_validate_id(id)) { \ +- RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ ++ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \ + return retval; \ + } \ + } while (0) + + #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \ + if (!rxa_validate_id(id)) { \ +- RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ ++ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \ + ret = retval; \ + goto error; \ + } \ +@@ -305,15 +305,15 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, + + #define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \ + if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \ +- RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token\n"); \ ++ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token"); \ + ret = retval; \ + goto error; \ + } \ + } while (0) + +-#define RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(port_id, retval) do { \ ++#define RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(port_id, retval) do { \ + if (!rte_eth_dev_is_valid_port(port_id)) { \ +- RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \ ++ RTE_EDEV_LOG_ERR("Invalid port_id=%u", port_id); \ + ret = retval; \ + goto error; \ + } \ +@@ -1534,7 +1534,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id, + dev_conf.nb_event_ports += 1; + ret = rte_event_dev_configure(dev_id, &dev_conf); + if (ret) { +- RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", ++ RTE_EDEV_LOG_ERR("failed to configure event dev %u", + dev_id); + if (started) { + if (rte_event_dev_start(dev_id)) +@@ -1545,7 +1545,7 @@ rxa_default_conf_cb(uint8_t id, uint8_t dev_id, + + ret = rte_event_port_setup(dev_id, port_id, port_conf); + if (ret) { +- RTE_EDEV_LOG_ERR("failed to setup event port %u\n", ++ RTE_EDEV_LOG_ERR("failed to setup event port %u", + port_id); + return ret; + } +@@ -1622,7 +1622,7 @@ rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter) + if (!err) + return 0; + +- RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err); ++ RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d", err); + rte_free(rx_adapter->epoll_events); + error: + rte_ring_free(rx_adapter->intr_ring); +@@ -1638,12 +1638,12 @@ rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter) + + err = pthread_cancel(rx_adapter->rx_intr_thread); + if (err) +- RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n", ++ RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d", + err); + + err = pthread_join(rx_adapter->rx_intr_thread, NULL); + if (err) +- RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err); ++ RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d", err); + + rte_free(rx_adapter->epoll_events); + rte_ring_free(rx_adapter->intr_ring); @@ -1906,6 +1906,13 @@ rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id) if (rx_adapter->service_inited) return 0; @@ -61612,13 +81220,22 @@ index cf7bbd4d69..6636128378 100644 + if (rte_mbuf_dyn_rx_timestamp_register( + &event_eth_rx_timestamp_dynfield_offset, + &event_eth_rx_timestamp_dynflag) != 0) { -+ RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); ++ RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf"); + return -rte_errno; + } + memset(&service, 0, sizeof(service)); snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN, "rte_event_eth_rx_adapter_%d", id); +@@ -2432,7 +2439,7 @@ rxa_create(uint8_t id, uint8_t dev_id, + RTE_DIM(default_rss_key)); + + if (rx_adapter->eth_devices == NULL) { +- RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n"); ++ RTE_EDEV_LOG_ERR("failed to get mem for eth devices"); + rte_free(rx_adapter); + return -ENOMEM; + } @@ -2468,13 +2475,6 @@ rxa_create(uint8_t id, uint8_t dev_id, if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; @@ -61652,10 +81269,73 @@ index cf7bbd4d69..6636128378 100644 } /* return if entry found */ +@@ -3453,7 +3449,7 @@ handle_rxa_stats(const char *cmd __rte_unused, + /* Get Rx adapter stats */ + if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id, + &rx_adptr_stats)) { +- RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n"); ++ RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats"); + return -1; + } + +@@ -3490,7 +3486,7 @@ handle_rxa_stats_reset(const char *cmd __rte_unused, + + /* Reset Rx adapter stats */ + if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) { +- RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n"); ++ RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats"); + return -1; + } + +@@ -3525,7 +3521,7 @@ handle_rxa_get_queue_conf(const char *cmd __rte_unused, + + /* Get device ID from parameter string */ + eth_dev_id = strtoul(token, NULL, 10); +- RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); ++ RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + + token = strtok(NULL, ","); + RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); +@@ -3597,7 +3593,7 @@ handle_rxa_get_queue_stats(const char *cmd __rte_unused, + + /* Get device ID from parameter string */ + eth_dev_id = strtoul(token, NULL, 10); +- RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); ++ RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + + token = strtok(NULL, ","); + RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); +@@ -3667,7 +3663,7 @@ handle_rxa_queue_stats_reset(const char *cmd __rte_unused, + + /* Get device ID from parameter string */ + eth_dev_id = strtoul(token, NULL, 10); +- RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); ++ RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + + token = strtok(NULL, ","); + RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); +@@ -3722,7 +3718,7 @@ handle_rxa_instance_get(const char *cmd __rte_unused, + + /* Get device ID from parameter string */ + eth_dev_id = strtoul(token, NULL, 10); +- RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); ++ RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + + token = strtok(NULL, ","); + RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); diff --git a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c -index 88309d2aaa..ba7a1c7f1b 100644 +index 88309d2aaa..9bb87fc5f9 100644 --- a/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c +++ b/dpdk/lib/eventdev/rte_event_eth_tx_adapter.c +@@ -330,7 +330,7 @@ txa_service_conf_cb(uint8_t __rte_unused id, uint8_t dev_id, + + ret = rte_event_port_setup(dev_id, port_id, pc); + if (ret) { +- RTE_EDEV_LOG_ERR("failed to setup event port %u\n", ++ RTE_EDEV_LOG_ERR("failed to setup event port %u", + port_id); + if (started) { + if (rte_event_dev_start(dev_id)) @@ -676,7 +676,7 @@ txa_service_func(void *args) RTE_ETH_FOREACH_DEV(i) { uint16_t q; @@ -61666,7 +81346,7 @@ index 88309d2aaa..ba7a1c7f1b 100644 dev = tdi[i].dev; diff --git a/dpdk/lib/eventdev/rte_event_timer_adapter.c b/dpdk/lib/eventdev/rte_event_timer_adapter.c -index a0f14bf861..a13ddce627 100644 +index a0f14bf861..34968f3105 100644 --- a/dpdk/lib/eventdev/rte_event_timer_adapter.c +++ b/dpdk/lib/eventdev/rte_event_timer_adapter.c @@ -17,6 +17,7 @@ @@ -61677,6 +81357,24 @@ index a0f14bf861..a13ddce627 100644 #include "event_timer_adapter_pmd.h" #include "eventdev_pmd.h" +@@ -91,7 +92,7 @@ default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, + dev_conf.nb_event_ports += 1; + ret = rte_event_dev_configure(dev_id, &dev_conf); + if (ret < 0) { +- EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id); ++ EVTIM_LOG_ERR("failed to configure event dev %u", dev_id); + if (started) + if (rte_event_dev_start(dev_id)) + return -EIO; +@@ -111,7 +112,7 @@ default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id, + + ret = rte_event_port_setup(dev_id, port_id, port_conf); + if (ret < 0) { +- EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n", ++ EVTIM_LOG_ERR("failed to setup event port %u on event dev %u", + port_id, dev_id); + return ret; + } @@ -699,13 +700,51 @@ swtim_callback(struct rte_timer *tim) } } @@ -61831,7 +81529,7 @@ index a0f14bf861..a13ddce627 100644 type, lcore_id, NULL, evtims[i]); if (ret < 0) { diff --git a/dpdk/lib/eventdev/rte_eventdev.c b/dpdk/lib/eventdev/rte_eventdev.c -index b0414206d9..04eeb76d4f 100644 +index b0414206d9..4a2fe987fd 100644 --- a/dpdk/lib/eventdev/rte_eventdev.c +++ b/dpdk/lib/eventdev/rte_eventdev.c @@ -99,6 +99,8 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) @@ -61843,6 +81541,35 @@ index b0414206d9..04eeb76d4f 100644 return 0; } +@@ -930,7 +932,7 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, + dev = &rte_eventdevs[dev_id]; + + if (*dev->dev_ops->port_link == NULL) { +- RTE_EDEV_LOG_ERR("Function not supported\n"); ++ RTE_EDEV_LOG_ERR("Function not supported"); + rte_errno = ENOTSUP; + return 0; + } +@@ -1261,8 +1263,8 @@ rte_event_vector_pool_create(const char *name, unsigned int n, + int ret; + + if (!nb_elem) { +- RTE_LOG(ERR, EVENTDEV, +- "Invalid number of elements=%d requested\n", nb_elem); ++ RTE_EDEV_LOG_ERR("Invalid number of elements=%d requested", ++ nb_elem); + rte_errno = EINVAL; + return NULL; + } +@@ -1277,7 +1279,7 @@ rte_event_vector_pool_create(const char *name, unsigned int n, + mp_ops_name = rte_mbuf_best_mempool_ops(); + ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL); + if (ret != 0) { +- RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n"); ++ RTE_EDEV_LOG_ERR("error setting mempool handler"); + goto err; + } + @@ -1678,7 +1680,7 @@ eventdev_build_telemetry_data(int dev_id, if (xstat_names == NULL) return -1; @@ -61852,6 +81579,140 @@ index b0414206d9..04eeb76d4f 100644 if (ids == NULL) { free(xstat_names); return -1; +@@ -1832,7 +1834,7 @@ handle_dev_dump(const char *cmd __rte_unused, + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + +- buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); ++ buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char)); + if (buf == NULL) + return -ENOMEM; + +diff --git a/dpdk/lib/eventdev/rte_eventdev.h b/dpdk/lib/eventdev/rte_eventdev.h +index a90e23ac8b..d0e2463bb8 100644 +--- a/dpdk/lib/eventdev/rte_eventdev.h ++++ b/dpdk/lib/eventdev/rte_eventdev.h +@@ -507,9 +507,9 @@ rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, + struct rte_event_dev_config { + uint32_t dequeue_timeout_ns; + /**< rte_event_dequeue_burst() timeout on this device. +- * This value should be in the range of *min_dequeue_timeout_ns* and +- * *max_dequeue_timeout_ns* which previously provided in +- * rte_event_dev_info_get() ++ * This value should be in the range of @ref rte_event_dev_info.min_dequeue_timeout_ns and ++ * @ref rte_event_dev_info.max_dequeue_timeout_ns returned by ++ * @ref rte_event_dev_info_get() + * The value 0 is allowed, in which case, default dequeue timeout used. + * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT + */ +@@ -517,40 +517,53 @@ struct rte_event_dev_config { + /**< In a *closed system* this field is the limit on maximum number of + * events that can be inflight in the eventdev at a given time. The + * limit is required to ensure that the finite space in a closed system +- * is not overwhelmed. The value cannot exceed the *max_num_events* +- * as provided by rte_event_dev_info_get(). +- * This value should be set to -1 for *open system*. ++ * is not exhausted. ++ * The value cannot exceed @ref rte_event_dev_info.max_num_events ++ * returned by rte_event_dev_info_get(). ++ * ++ * This value should be set to -1 for *open systems*, that is, ++ * those systems returning -1 in @ref rte_event_dev_info.max_num_events. ++ * ++ * @see rte_event_port_conf.new_event_threshold + */ + uint8_t nb_event_queues; + /**< Number of event queues to configure on this device. +- * This value cannot exceed the *max_event_queues* which previously +- * provided in rte_event_dev_info_get() ++ * This value *includes* any single-link queue-port pairs to be used. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_queues + ++ * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs ++ * returned by rte_event_dev_info_get(). ++ * The number of non-single-link queues i.e. this value less ++ * *nb_single_link_event_port_queues* in this struct, cannot exceed ++ * @ref rte_event_dev_info.max_event_queues + */ + uint8_t nb_event_ports; + /**< Number of event ports to configure on this device. +- * This value cannot exceed the *max_event_ports* which previously +- * provided in rte_event_dev_info_get() ++ * This value *includes* any single-link queue-port pairs to be used. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_ports + ++ * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs ++ * returned by rte_event_dev_info_get(). ++ * The number of non-single-link ports i.e. this value less ++ * *nb_single_link_event_port_queues* in this struct, cannot exceed ++ * @ref rte_event_dev_info.max_event_ports + */ + uint32_t nb_event_queue_flows; +- /**< Number of flows for any event queue on this device. +- * This value cannot exceed the *max_event_queue_flows* which previously +- * provided in rte_event_dev_info_get() ++ /**< Max number of flows needed for a single event queue on this device. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_queue_flows ++ * returned by rte_event_dev_info_get() + */ + uint32_t nb_event_port_dequeue_depth; +- /**< Maximum number of events can be dequeued at a time from an +- * event port by this device. +- * This value cannot exceed the *max_event_port_dequeue_depth* +- * which previously provided in rte_event_dev_info_get(). ++ /**< Max number of events that can be dequeued at a time from an event port on this device. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_port_dequeue_depth ++ * returned by rte_event_dev_info_get(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. +- * @see rte_event_port_setup() ++ * @see rte_event_port_setup() rte_event_dequeue_burst() + */ + uint32_t nb_event_port_enqueue_depth; +- /**< Maximum number of events can be enqueued at a time from an +- * event port by this device. +- * This value cannot exceed the *max_event_port_enqueue_depth* +- * which previously provided in rte_event_dev_info_get(). ++ /**< Maximum number of events can be enqueued at a time to an event port on this device. ++ * This value cannot exceed @ref rte_event_dev_info.max_event_port_enqueue_depth ++ * returned by rte_event_dev_info_get(). + * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. +- * @see rte_event_port_setup() ++ * @see rte_event_port_setup() rte_event_enqueue_burst() + */ + uint32_t event_dev_cfg; + /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ +@@ -560,7 +573,7 @@ struct rte_event_dev_config { + * queues; this value cannot exceed *nb_event_ports* or + * *nb_event_queues*. If the device has ports and queues that are + * optimized for single-link usage, this field is a hint for how many +- * to allocate; otherwise, regular event ports and queues can be used. ++ * to allocate; otherwise, regular event ports and queues will be used. + */ + }; + +@@ -1094,10 +1107,8 @@ struct rte_event_vector { + * port and queue of the mbufs in the vector + */ + struct { +- uint16_t port; +- /* Ethernet device port id. */ +- uint16_t queue; +- /* Ethernet device queue id. */ ++ uint16_t port; /**< Ethernet device port id. */ ++ uint16_t queue; /**< Ethernet device queue id. */ + }; + }; + /**< Union to hold common attributes of the vector array. */ +@@ -1126,7 +1137,11 @@ struct rte_event_vector { + * vector array can be an array of mbufs or pointers or opaque u64 + * values. + */ ++#ifndef __DOXYGEN__ + } __rte_aligned(16); ++#else ++}; ++#endif + + /* Scheduler type definitions */ + #define RTE_SCHED_TYPE_ORDERED 0 diff --git a/dpdk/lib/eventdev/version.map b/dpdk/lib/eventdev/version.map index dd63ec6f68..56000271a4 100644 --- a/dpdk/lib/eventdev/version.map @@ -61995,6 +81856,140 @@ index 89a118f357..d21fadc052 100644 sources = files( 'gpudev.c', ) +diff --git a/dpdk/lib/graph/graph.c b/dpdk/lib/graph/graph.c +index 3a617cc369..9027e62a26 100644 +--- a/dpdk/lib/graph/graph.c ++++ b/dpdk/lib/graph/graph.c +@@ -18,11 +18,54 @@ + + static struct graph_head graph_list = STAILQ_HEAD_INITIALIZER(graph_list); + static rte_spinlock_t graph_lock = RTE_SPINLOCK_INITIALIZER; +-static rte_graph_t graph_id; +- +-#define GRAPH_ID_CHECK(id) ID_CHECK(id, graph_id) + + /* Private functions */ ++static struct graph * ++graph_from_id(rte_graph_t id) ++{ ++ struct graph *graph; ++ STAILQ_FOREACH(graph, &graph_list, next) { ++ if (graph->id == id) ++ return graph; ++ } ++ rte_errno = EINVAL; ++ return NULL; ++} ++ ++static rte_graph_t ++graph_next_free_id(void) ++{ ++ struct graph *graph; ++ rte_graph_t id = 0; ++ ++ STAILQ_FOREACH(graph, &graph_list, next) { ++ if (id < graph->id) ++ break; ++ id = graph->id + 1; ++ } ++ ++ return id; ++} ++ ++static void ++graph_insert_ordered(struct graph *graph) ++{ ++ struct graph *after, *g; ++ ++ after = NULL; ++ STAILQ_FOREACH(g, &graph_list, next) { ++ if (g->id < graph->id) ++ after = g; ++ else if (g->id > graph->id) ++ break; ++ } ++ if (after == NULL) { ++ STAILQ_INSERT_HEAD(&graph_list, graph, next); ++ } else { ++ STAILQ_INSERT_AFTER(&graph_list, after, graph, next); ++ } ++} ++ + struct graph_head * + graph_list_head_get(void) + { +@@ -327,7 +370,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm) + graph->socket = prm->socket_id; + graph->src_node_count = src_node_count; + graph->node_count = graph_nodes_count(graph); +- graph->id = graph_id; ++ graph->id = graph_next_free_id(); + + /* Allocate the Graph fast path memory and populate the data */ + if (graph_fp_mem_create(graph)) +@@ -338,8 +381,7 @@ rte_graph_create(const char *name, struct rte_graph_param *prm) + goto graph_mem_destroy; + + /* All good, Lets add the graph to the list */ +- graph_id++; +- STAILQ_INSERT_TAIL(&graph_list, graph, next); ++ graph_insert_ordered(graph); + + graph_spinlock_unlock(); + return graph->id; +@@ -378,7 +420,6 @@ rte_graph_destroy(rte_graph_t id) + graph_cleanup(graph); + STAILQ_REMOVE(&graph_list, graph, graph, next); + free(graph); +- graph_id--; + goto done; + } + graph = tmp; +@@ -405,7 +446,8 @@ rte_graph_id_to_name(rte_graph_t id) + { + struct graph *graph; + +- GRAPH_ID_CHECK(id); ++ if (graph_from_id(id) == NULL) ++ goto fail; + STAILQ_FOREACH(graph, &graph_list, next) + if (graph->id == id) + return graph->name; +@@ -422,7 +464,8 @@ rte_graph_node_get(rte_graph_t gid, uint32_t nid) + rte_graph_off_t off; + rte_node_t count; + +- GRAPH_ID_CHECK(gid); ++ if (graph_from_id(gid) == NULL) ++ goto fail; + STAILQ_FOREACH(graph, &graph_list, next) + if (graph->id == gid) { + rte_graph_foreach_node(count, off, graph->graph, +@@ -547,7 +590,8 @@ graph_scan_dump(FILE *f, rte_graph_t id, bool all) + struct graph *graph; + + RTE_VERIFY(f); +- GRAPH_ID_CHECK(id); ++ if (graph_from_id(id) == NULL) ++ goto fail; + + STAILQ_FOREACH(graph, &graph_list, next) { + if (all == true) { +@@ -576,7 +620,13 @@ rte_graph_list_dump(FILE *f) + rte_graph_t + rte_graph_max_count(void) + { +- return graph_id; ++ struct graph *graph; ++ rte_graph_t count = 0; ++ ++ STAILQ_FOREACH(graph, &graph_list, next) ++ count++; ++ ++ return count; + } + + RTE_LOG_REGISTER_DEFAULT(rte_graph_logtype, INFO); diff --git a/dpdk/lib/graph/node.c b/dpdk/lib/graph/node.c index fc6345de07..149414dcd9 100644 --- a/dpdk/lib/graph/node.c @@ -62021,10 +82016,26 @@ index fc6345de07..149414dcd9 100644 } diff --git a/dpdk/lib/hash/rte_cuckoo_hash.c b/dpdk/lib/hash/rte_cuckoo_hash.c -index 829b79c89a..a08b5dd875 100644 +index 829b79c89a..7da32f1d49 100644 --- a/dpdk/lib/hash/rte_cuckoo_hash.c +++ b/dpdk/lib/hash/rte_cuckoo_hash.c -@@ -1860,11 +1860,15 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, +@@ -166,6 +166,7 @@ rte_hash_create(const struct rte_hash_parameters *params) + /* Check for valid parameters */ + if ((params->entries > RTE_HASH_ENTRIES_MAX) || + (params->entries < RTE_HASH_BUCKET_ENTRIES) || ++ (params->name == NULL) || + (params->key_len == 0)) { + rte_errno = EINVAL; + RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n"); +@@ -1542,6 +1543,7 @@ rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config *cfg) + if (params.size == 0) + params.size = total_entries; + params.trigger_reclaim_limit = cfg->trigger_reclaim_limit; ++ params.max_reclaim_size = cfg->max_reclaim_size; + if (params.max_reclaim_size == 0) + params.max_reclaim_size = RTE_HASH_RCU_DQ_RECLAIM_MAX; + params.esize = sizeof(struct __rte_hash_rcu_dq_entry); +@@ -1860,11 +1862,15 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, _mm_load_si128( (__m128i const *)prim_bkt->sig_current), _mm_set1_epi16(sig))); @@ -62040,6 +82051,104 @@ index 829b79c89a..a08b5dd875 100644 break; #elif defined(__ARM_NEON) case RTE_HASH_COMPARE_NEON: { +diff --git a/dpdk/lib/hash/rte_cuckoo_hash.h b/dpdk/lib/hash/rte_cuckoo_hash.h +index eb2644f74b..b5a608b442 100644 +--- a/dpdk/lib/hash/rte_cuckoo_hash.h ++++ b/dpdk/lib/hash/rte_cuckoo_hash.h +@@ -29,17 +29,6 @@ + #define RETURN_IF_TRUE(cond, retval) + #endif + +-#if defined(RTE_LIBRTE_HASH_DEBUG) +-#define ERR_IF_TRUE(cond, fmt, args...) do { \ +- if (cond) { \ +- RTE_LOG(ERR, HASH, fmt, ##args); \ +- return; \ +- } \ +-} while (0) +-#else +-#define ERR_IF_TRUE(cond, fmt, args...) +-#endif +- + #include + #include + +diff --git a/dpdk/lib/hash/rte_hash.h b/dpdk/lib/hash/rte_hash.h +index a399346d02..51611e392b 100644 +--- a/dpdk/lib/hash/rte_hash.h ++++ b/dpdk/lib/hash/rte_hash.h +@@ -287,7 +287,7 @@ rte_hash_add_key_with_hash_data(const struct rte_hash *h, const void *key, + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOSPC if there is no space in the hash for this key. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key. This + * unique key id may be larger than the user specified entry count + * when RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD flag is set. +@@ -311,7 +311,7 @@ rte_hash_add_key(const struct rte_hash *h, const void *key); + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOSPC if there is no space in the hash for this key. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key. This + * unique key ID may be larger than the user specified entry count + * when RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD flag is set. +@@ -342,7 +342,7 @@ rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +@@ -374,7 +374,7 @@ rte_hash_del_key(const struct rte_hash *h, const void *key); + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +@@ -441,7 +441,7 @@ rte_hash_free_key_with_position(const struct rte_hash *h, + * @param data + * Output with pointer to data returned from the hash table. + * @return +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + * - -EINVAL if the parameters are invalid. +@@ -466,7 +466,7 @@ rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data); + * @param data + * Output with pointer to data returned from the hash table. + * @return +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + * - -EINVAL if the parameters are invalid. +@@ -489,7 +489,7 @@ rte_hash_lookup_with_hash_data(const struct rte_hash *h, const void *key, + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +@@ -511,7 +511,7 @@ rte_hash_lookup(const struct rte_hash *h, const void *key); + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. +- * - A positive value that can be used by the caller as an offset into an ++ * - A non-negative value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ diff --git a/dpdk/lib/hash/rte_thash.c b/dpdk/lib/hash/rte_thash.c index 0249883b8d..2228af576b 100644 --- a/dpdk/lib/hash/rte_thash.c @@ -62170,6 +82279,64 @@ index 359a9f5b09..4900b750bc 100644 typedef int (*arg_handler_t)(const char *key, const char *value, void *opaque); /** A key/value association */ +diff --git a/dpdk/lib/latencystats/rte_latencystats.c b/dpdk/lib/latencystats/rte_latencystats.c +index 8985a377db..e47eac2cf8 100644 +--- a/dpdk/lib/latencystats/rte_latencystats.c ++++ b/dpdk/lib/latencystats/rte_latencystats.c +@@ -164,7 +164,7 @@ calc_latency(uint16_t pid __rte_unused, + * a constant smoothing factor between 0 and 1. The value + * is used below for measuring average latency. + */ +- const float alpha = 0.2; ++ const float alpha = 0.2f; + + now = rte_rdtsc(); + for (i = 0; i < nb_pkts; i++) { +diff --git a/dpdk/lib/lpm/rte_lpm6.c b/dpdk/lib/lpm/rte_lpm6.c +index 8d21aeddb8..79c75d8dfc 100644 +--- a/dpdk/lib/lpm/rte_lpm6.c ++++ b/dpdk/lib/lpm/rte_lpm6.c +@@ -279,7 +279,7 @@ rte_lpm6_create(const char *name, int socket_id, + + rules_tbl = rte_hash_create(&rule_hash_tbl_params); + if (rules_tbl == NULL) { +- RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)", ++ RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)\n", + rte_strerror(rte_errno), rte_errno); + goto fail_wo_unlock; + } +@@ -289,7 +289,7 @@ rte_lpm6_create(const char *name, int socket_id, + sizeof(uint32_t) * config->number_tbl8s, + RTE_CACHE_LINE_SIZE); + if (tbl8_pool == NULL) { +- RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)", ++ RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)\n", + rte_strerror(rte_errno), rte_errno); + rte_errno = ENOMEM; + goto fail_wo_unlock; +@@ -300,7 +300,7 @@ rte_lpm6_create(const char *name, int socket_id, + sizeof(struct rte_lpm_tbl8_hdr) * config->number_tbl8s, + RTE_CACHE_LINE_SIZE); + if (tbl8_hdrs == NULL) { +- RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)", ++ RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)\n", + rte_strerror(rte_errno), rte_errno); + rte_errno = ENOMEM; + goto fail_wo_unlock; +diff --git a/dpdk/lib/mbuf/rte_mbuf.h b/dpdk/lib/mbuf/rte_mbuf.h +index 3a82eb136d..a1b4c2b31d 100644 +--- a/dpdk/lib/mbuf/rte_mbuf.h ++++ b/dpdk/lib/mbuf/rte_mbuf.h +@@ -1120,6 +1120,9 @@ rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, + static inline void + rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc) + { ++#if !RTE_IOVA_AS_PA ++ mdst->dynfield2 = msrc->dynfield2; ++#endif + memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1)); + } + diff --git a/dpdk/lib/mbuf/rte_mbuf_core.h b/dpdk/lib/mbuf/rte_mbuf_core.h index a30e1e0eaf..3ab7be49fa 100644 --- a/dpdk/lib/mbuf/rte_mbuf_core.h @@ -62226,7 +82393,7 @@ index 524ba77620..d5f35aabe9 100644 ss->hash_seeds[i] = rte_rand(); diff --git a/dpdk/lib/mempool/rte_mempool.c b/dpdk/lib/mempool/rte_mempool.c -index f33f455790..3de857abf5 100644 +index f33f455790..258754cf67 100644 --- a/dpdk/lib/mempool/rte_mempool.c +++ b/dpdk/lib/mempool/rte_mempool.c @@ -915,6 +915,22 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, @@ -62283,7 +82450,38 @@ index f33f455790..3de857abf5 100644 /* call the mempool priv initializer */ if (mp_init) mp_init(mp, mp_init_arg); -@@ -1500,27 +1499,27 @@ mempool_info_cb(struct rte_mempool *mp, void *arg) +@@ -1055,10 +1054,6 @@ rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) + return count; + } + +-#ifndef __INTEL_COMPILER +-#pragma GCC diagnostic ignored "-Wcast-qual" +-#endif +- + /* check and update cookies or panic (internal) */ + void rte_mempool_check_cookies(const struct rte_mempool *mp, + void * const *obj_table_const, unsigned n, int free) +@@ -1073,7 +1068,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp, + + /* Force to drop the "const" attribute. This is done only when + * DEBUG is enabled */ +- tmp = (void *) obj_table_const; ++ tmp = (void *)(uintptr_t)obj_table_const; + obj_table = tmp; + + while (n--) { +@@ -1182,10 +1177,6 @@ mempool_audit_cookies(struct rte_mempool *mp) + #define mempool_audit_cookies(mp) do {} while(0) + #endif + +-#ifndef __INTEL_COMPILER +-#pragma GCC diagnostic error "-Wcast-qual" +-#endif +- + /* check cookies before and after objects */ + static void + mempool_audit_cache(const struct rte_mempool *mp) +@@ -1500,27 +1491,27 @@ mempool_info_cb(struct rte_mempool *mp, void *arg) return; rte_tel_data_add_dict_string(info->d, "name", mp->name); @@ -62358,6 +82556,32 @@ index 9f530db24b..4a8a2d5dcb 100644 * - <0: Error; code of driver dequeue function. */ static __rte_always_inline int +diff --git a/dpdk/lib/mempool/rte_mempool_ops.c b/dpdk/lib/mempool/rte_mempool_ops.c +index 3b43edc548..9db952150a 100644 +--- a/dpdk/lib/mempool/rte_mempool_ops.c ++++ b/dpdk/lib/mempool/rte_mempool_ops.c +@@ -46,7 +46,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h) + + if (strlen(h->name) >= sizeof(ops->name) - 1) { + rte_spinlock_unlock(&rte_mempool_ops_table.sl); +- RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n", ++ RTE_LOG(DEBUG, MEMPOOL, "%s(): mempool_ops <%s>: name too long\n", + __func__, h->name); + rte_errno = EEXIST; + return -EEXIST; +diff --git a/dpdk/lib/meson.build b/dpdk/lib/meson.build +index fd55925340..02dfa9b868 100644 +--- a/dpdk/lib/meson.build ++++ b/dpdk/lib/meson.build +@@ -176,7 +176,7 @@ foreach l:libraries + + if not build + dpdk_libs_disabled += name +- set_variable(name.underscorify() + '_disable_reason', reason) ++ set_variable('lib_' + name.underscorify() + '_disable_reason', reason) + continue + endif + diff --git a/dpdk/lib/meter/rte_meter.h b/dpdk/lib/meter/rte_meter.h index 0932645d0a..35e2675028 100644 --- a/dpdk/lib/meter/rte_meter.h @@ -62402,11 +82626,67 @@ index 0932645d0a..35e2675028 100644 * trTCM RFC4115 color aware traffic metering * * @param m +diff --git a/dpdk/lib/metrics/rte_metrics_telemetry.c b/dpdk/lib/metrics/rte_metrics_telemetry.c +index 5be21b2e86..1d133e1f8c 100644 +--- a/dpdk/lib/metrics/rte_metrics_telemetry.c ++++ b/dpdk/lib/metrics/rte_metrics_telemetry.c +@@ -363,7 +363,7 @@ rte_metrics_tel_stat_names_to_ids(const char * const *stat_names, + } + } + if (j == num_metrics) { +- METRICS_LOG_WARN("Invalid stat name %s\n", ++ METRICS_LOG_WARN("Invalid stat name %s", + stat_names[i]); + free(names); + return -EINVAL; +diff --git a/dpdk/lib/net/rte_ether.h b/dpdk/lib/net/rte_ether.h +index bf8a55ba06..020d9b46ba 100644 +--- a/dpdk/lib/net/rte_ether.h ++++ b/dpdk/lib/net/rte_ether.h +@@ -46,6 +46,20 @@ extern "C" { + + #define RTE_ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */ + ++/* VLAN header fields */ ++#define RTE_VLAN_DEI_SHIFT 12 ++#define RTE_VLAN_PRI_SHIFT 13 ++#define RTE_VLAN_PRI_MASK 0xe000 /* Priority Code Point */ ++#define RTE_VLAN_DEI_MASK 0x1000 /* Drop Eligible Indicator */ ++#define RTE_VLAN_ID_MASK 0x0fff /* VLAN Identifier */ ++ ++#define RTE_VLAN_TCI_ID(vlan_tci) ((vlan_tci) & RTE_VLAN_ID_MASK) ++#define RTE_VLAN_TCI_PRI(vlan_tci) (((vlan_tci) & RTE_VLAN_PRI_MASK) >> RTE_VLAN_PRI_SHIFT) ++#define RTE_VLAN_TCI_DEI(vlan_tci) (((vlan_tci) & RTE_VLAN_DEI_MASK) >> RTE_VLAN_DEI_SHIFT) ++#define RTE_VLAN_TCI_MAKE(id, pri, dei) ((id) | \ ++ ((pri) << RTE_VLAN_PRI_SHIFT) | \ ++ ((dei) << RTE_VLAN_DEI_SHIFT)) ++ + /** + * Ethernet address: + * A universally administered address is uniquely assigned to a device by its diff --git a/dpdk/lib/net/rte_ip.h b/dpdk/lib/net/rte_ip.h -index 9c8e8206f0..0cafb980ef 100644 +index 9c8e8206f0..8da23660c6 100644 --- a/dpdk/lib/net/rte_ip.h +++ b/dpdk/lib/net/rte_ip.h -@@ -514,7 +514,7 @@ rte_ipv4_udptcp_cksum_verify(const struct rte_ipv4_hdr *ipv4_hdr, +@@ -420,11 +420,14 @@ __rte_ipv4_udptcp_cksum_mbuf(const struct rte_mbuf *m, + { + uint16_t raw_cksum; + uint32_t cksum; ++ uint16_t len; + +- if (l4_off > m->pkt_len) +- return 0; ++ if (unlikely(l4_off > m->pkt_len)) ++ return 0; /* invalid params, return a dummy value */ ++ ++ len = rte_be_to_cpu_16(ipv4_hdr->total_length) - (uint16_t)rte_ipv4_hdr_len(ipv4_hdr); + +- if (rte_raw_cksum_mbuf(m, l4_off, m->pkt_len - l4_off, &raw_cksum)) ++ if (rte_raw_cksum_mbuf(m, l4_off, len, &raw_cksum)) + return 0; + + cksum = raw_cksum + rte_ipv4_phdr_cksum(ipv4_hdr, 0); +@@ -514,7 +517,7 @@ rte_ipv4_udptcp_cksum_verify(const struct rte_ipv4_hdr *ipv4_hdr, * Return 0 if the checksum is correct, else -1. */ __rte_experimental @@ -62415,6 +82695,133 @@ index 9c8e8206f0..0cafb980ef 100644 rte_ipv4_udptcp_cksum_mbuf_verify(const struct rte_mbuf *m, const struct rte_ipv4_hdr *ipv4_hdr, uint16_t l4_off) +@@ -650,10 +653,10 @@ __rte_ipv6_udptcp_cksum_mbuf(const struct rte_mbuf *m, + uint16_t raw_cksum; + uint32_t cksum; + +- if (l4_off > m->pkt_len) +- return 0; ++ if (unlikely(l4_off > m->pkt_len)) ++ return 0; /* invalid params, return a dummy value */ + +- if (rte_raw_cksum_mbuf(m, l4_off, m->pkt_len - l4_off, &raw_cksum)) ++ if (rte_raw_cksum_mbuf(m, l4_off, rte_be_to_cpu_16(ipv6_hdr->payload_len), &raw_cksum)) + return 0; + + cksum = raw_cksum + rte_ipv6_phdr_cksum(ipv6_hdr, 0); +diff --git a/dpdk/lib/net/rte_net.h b/dpdk/lib/net/rte_net.h +index 56611fc8f9..a94f6af717 100644 +--- a/dpdk/lib/net/rte_net.h ++++ b/dpdk/lib/net/rte_net.h +@@ -121,7 +121,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) + * no offloads are requested. + */ + if (!(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG | +- RTE_MBUF_F_TX_OUTER_IP_CKSUM))) ++ RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM))) + return 0; + + if (ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) { +@@ -135,6 +135,21 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) + struct rte_ipv4_hdr *, m->outer_l2_len); + ipv4_hdr->hdr_checksum = 0; + } ++ if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { ++ if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { ++ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, ++ m->outer_l2_len); ++ udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + ++ m->outer_l3_len); ++ udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, m->ol_flags); ++ } else { ++ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, ++ m->outer_l2_len); ++ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, ++ m->outer_l2_len + m->outer_l3_len); ++ udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, m->ol_flags); ++ } ++ } + } + + /* +diff --git a/dpdk/lib/net/rte_net_crc.c b/dpdk/lib/net/rte_net_crc.c +index a685f9e7bb..900d6de7f4 100644 +--- a/dpdk/lib/net/rte_net_crc.c ++++ b/dpdk/lib/net/rte_net_crc.c +@@ -179,7 +179,7 @@ avx512_vpclmulqdq_get_handlers(void) + max_simd_bitwidth >= RTE_VECT_SIMD_512) + return handlers_avx512; + #endif +- NET_LOG(INFO, "Requirements not met, can't use AVX512\n"); ++ NET_LOG(INFO, "Requirements not met, can't use AVX512"); + return NULL; + } + +@@ -205,7 +205,7 @@ sse42_pclmulqdq_get_handlers(void) + max_simd_bitwidth >= RTE_VECT_SIMD_128) + return handlers_sse42; + #endif +- NET_LOG(INFO, "Requirements not met, can't use SSE\n"); ++ NET_LOG(INFO, "Requirements not met, can't use SSE"); + return NULL; + } + +@@ -231,7 +231,7 @@ neon_pmull_get_handlers(void) + max_simd_bitwidth >= RTE_VECT_SIMD_128) + return handlers_neon; + #endif +- NET_LOG(INFO, "Requirements not met, can't use NEON\n"); ++ NET_LOG(INFO, "Requirements not met, can't use NEON"); + return NULL; + } + +diff --git a/dpdk/lib/node/ethdev_rx.c b/dpdk/lib/node/ethdev_rx.c +index a19237b42f..f6936ed77e 100644 +--- a/dpdk/lib/node/ethdev_rx.c ++++ b/dpdk/lib/node/ethdev_rx.c +@@ -160,13 +160,13 @@ ethdev_ptype_setup(uint16_t port, uint16_t queue) + + if (!l3_ipv4 || !l3_ipv6) { + node_info("ethdev_rx", +- "Enabling ptype callback for required ptypes on port %u\n", ++ "Enabling ptype callback for required ptypes on port %u", + port); + + if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb, + NULL)) { + node_err("ethdev_rx", +- "Failed to add rx ptype cb: port=%d, queue=%d\n", ++ "Failed to add rx ptype cb: port=%d, queue=%d", + port, queue); + return -EINVAL; + } +diff --git a/dpdk/lib/node/ip4_lookup.c b/dpdk/lib/node/ip4_lookup.c +index 8bce03d7db..75253ed837 100644 +--- a/dpdk/lib/node/ip4_lookup.c ++++ b/dpdk/lib/node/ip4_lookup.c +@@ -143,7 +143,7 @@ rte_node_ip4_route_add(uint32_t ip, uint8_t depth, uint16_t next_hop, + ip, depth, val); + if (ret < 0) { + node_err("ip4_lookup", +- "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d\n", ++ "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d", + abuf, depth, val, socket, ret); + return ret; + } +diff --git a/dpdk/lib/pcapng/rte_pcapng.c b/dpdk/lib/pcapng/rte_pcapng.c +index 80d08e1a3b..d8fd36799b 100644 +--- a/dpdk/lib/pcapng/rte_pcapng.c ++++ b/dpdk/lib/pcapng/rte_pcapng.c +@@ -110,7 +110,8 @@ pcapng_add_option(struct pcapng_option *popt, uint16_t code, + { + popt->code = code; + popt->length = len; +- memcpy(popt->data, data, len); ++ if (len > 0) ++ memcpy(popt->data, data, len); + + return (struct pcapng_option *)((uint8_t *)popt + pcapng_optlen(len)); + } diff --git a/dpdk/lib/pci/rte_pci.h b/dpdk/lib/pci/rte_pci.h index 5088157e74..9876c3fb9d 100644 --- a/dpdk/lib/pci/rte_pci.h @@ -62493,8 +82900,87 @@ index 0e631dea2b..084c614639 100644 } } +diff --git a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c b/dpdk/lib/pipeline/rte_swx_pipeline_spec.c +index 9116f38ed2..833436b831 100644 +--- a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c ++++ b/dpdk/lib/pipeline/rte_swx_pipeline_spec.c +@@ -2841,7 +2841,7 @@ pipeline_spec_parse(FILE *spec, + } + + /* Memory allocation. */ +- s = calloc(sizeof(struct pipeline_spec), 1); ++ s = calloc(1, sizeof(struct pipeline_spec)); + if (!s) { + if (err_line) + *err_line = n_lines; +@@ -4145,7 +4145,7 @@ pipeline_iospec_parse(FILE *spec, + } + + /* Memory allocation. */ +- s = calloc(sizeof(struct pipeline_iospec), 1); ++ s = calloc(1, sizeof(struct pipeline_iospec)); + if (!s) { + if (err_line) + *err_line = n_lines; +diff --git a/dpdk/lib/power/guest_channel.c b/dpdk/lib/power/guest_channel.c +index 969a9e5aaa..0117b856bb 100644 +--- a/dpdk/lib/power/guest_channel.c ++++ b/dpdk/lib/power/guest_channel.c +@@ -89,7 +89,7 @@ guest_channel_host_connect(const char *path, unsigned int lcore_id) + flags |= O_NONBLOCK; + if (fcntl(fd, F_SETFL, flags) < 0) { + RTE_LOG(ERR, GUEST_CHANNEL, "Failed on setting non-blocking mode for " +- "file %s", fd_path); ++ "file %s\n", fd_path); + goto error; + } + /* QEMU needs a delay after connection */ +diff --git a/dpdk/lib/power/rte_power_intel_uncore.c b/dpdk/lib/power/rte_power_intel_uncore.c +index 3b8724385f..7193b86516 100644 +--- a/dpdk/lib/power/rte_power_intel_uncore.c ++++ b/dpdk/lib/power/rte_power_intel_uncore.c +@@ -11,7 +11,7 @@ + #include "rte_power_intel_uncore.h" + #include "power_common.h" + +-#define MAX_UNCORE_FREQS 32 ++#define MAX_UNCORE_FREQS 64 + #define MAX_NUMA_DIE 8 + #define BUS_FREQ 100000 + #define FILTER_LENGTH 18 +diff --git a/dpdk/lib/power/rte_power_pmd_mgmt.c b/dpdk/lib/power/rte_power_pmd_mgmt.c +index ca1840387c..f9a2606e6c 100644 +--- a/dpdk/lib/power/rte_power_pmd_mgmt.c ++++ b/dpdk/lib/power/rte_power_pmd_mgmt.c +@@ -684,7 +684,7 @@ int + rte_power_pmd_mgmt_set_pause_duration(unsigned int duration) + { + if (duration == 0) { +- RTE_LOG(ERR, POWER, "Pause duration must be greater than 0, value unchanged"); ++ RTE_LOG(ERR, POWER, "Pause duration must be greater than 0, value unchanged\n"); + return -EINVAL; + } + pause_duration = duration; +@@ -707,7 +707,7 @@ rte_power_pmd_mgmt_set_scaling_freq_min(unsigned int lcore, unsigned int min) + } + + if (min > scale_freq_max[lcore]) { +- RTE_LOG(ERR, POWER, "Invalid min frequency: Cannot be greater than max frequency"); ++ RTE_LOG(ERR, POWER, "Invalid min frequency: Cannot be greater than max frequency\n"); + return -EINVAL; + } + scale_freq_min[lcore] = min; +@@ -727,7 +727,7 @@ rte_power_pmd_mgmt_set_scaling_freq_max(unsigned int lcore, unsigned int max) + if (max == 0) + max = UINT32_MAX; + if (max < scale_freq_min[lcore]) { +- RTE_LOG(ERR, POWER, "Invalid max frequency: Cannot be less than min frequency"); ++ RTE_LOG(ERR, POWER, "Invalid max frequency: Cannot be less than min frequency\n"); + return -EINVAL; + } + diff --git a/dpdk/lib/rawdev/rte_rawdev.c b/dpdk/lib/rawdev/rte_rawdev.c -index 5fbdb94229..891e79dcd7 100644 +index 5fbdb94229..dcebe4f653 100644 --- a/dpdk/lib/rawdev/rte_rawdev.c +++ b/dpdk/lib/rawdev/rte_rawdev.c @@ -505,8 +505,7 @@ rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id) @@ -62507,6 +82993,90 @@ index 5fbdb94229..891e79dcd7 100644 return NULL; } +@@ -657,7 +656,7 @@ handle_dev_dump(const char *cmd __rte_unused, + if (!rte_rawdev_pmd_is_valid_dev(dev_id)) + return -EINVAL; + +- buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); ++ buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char)); + if (buf == NULL) + return -ENOMEM; + +diff --git a/dpdk/lib/rcu/rte_rcu_qsbr.c b/dpdk/lib/rcu/rte_rcu_qsbr.c +index 17be93e830..40e99c402e 100644 +--- a/dpdk/lib/rcu/rte_rcu_qsbr.c ++++ b/dpdk/lib/rcu/rte_rcu_qsbr.c +@@ -92,7 +92,7 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id) + return 1; + } + +- __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", ++ __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", + v->qsbr_cnt[thread_id].lock_cnt); + + id = thread_id & __RTE_QSBR_THRID_MASK; +@@ -144,7 +144,7 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id) + return 1; + } + +- __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", ++ __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", + v->qsbr_cnt[thread_id].lock_cnt); + + id = thread_id & __RTE_QSBR_THRID_MASK; +diff --git a/dpdk/lib/rcu/rte_rcu_qsbr.h b/dpdk/lib/rcu/rte_rcu_qsbr.h +index ccae5d54f7..8e0a166414 100644 +--- a/dpdk/lib/rcu/rte_rcu_qsbr.h ++++ b/dpdk/lib/rcu/rte_rcu_qsbr.h +@@ -304,7 +304,7 @@ rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id) + + RTE_ASSERT(v != NULL && thread_id < v->max_threads); + +- __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", ++ __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", + v->qsbr_cnt[thread_id].lock_cnt); + + /* Copy the current value of token. +@@ -355,7 +355,7 @@ rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id) + { + RTE_ASSERT(v != NULL && thread_id < v->max_threads); + +- __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", ++ __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", + v->qsbr_cnt[thread_id].lock_cnt); + + /* The reader can go offline only after the load of the +@@ -432,7 +432,7 @@ rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v, + 1, __ATOMIC_RELEASE); + + __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING, +- "Lock counter %u. Nested locks?\n", ++ "Lock counter %u. Nested locks?", + v->qsbr_cnt[thread_id].lock_cnt); + #endif + } +@@ -486,7 +486,7 @@ rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id) + + RTE_ASSERT(v != NULL && thread_id < v->max_threads); + +- __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", ++ __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", + v->qsbr_cnt[thread_id].lock_cnt); + + /* Acquire the changes to the shared data structure released +diff --git a/dpdk/lib/regexdev/rte_regexdev.c b/dpdk/lib/regexdev/rte_regexdev.c +index caec069182..d38a85eb0b 100644 +--- a/dpdk/lib/regexdev/rte_regexdev.c ++++ b/dpdk/lib/regexdev/rte_regexdev.c +@@ -19,7 +19,7 @@ static struct { + struct rte_regexdev_data data[RTE_MAX_REGEXDEV_DEVS]; + } *rte_regexdev_shared_data; + +-int rte_regexdev_logtype; ++RTE_LOG_REGISTER_DEFAULT(rte_regexdev_logtype, INFO); + + static uint16_t + regexdev_find_free_dev(void) diff --git a/dpdk/lib/reorder/rte_reorder.c b/dpdk/lib/reorder/rte_reorder.c index 385ee479da..bc85b83b14 100644 --- a/dpdk/lib/reorder/rte_reorder.c @@ -62826,6 +83396,46 @@ index 4bacf9fcd9..fd7013a23d 100644 /** * Create security session as specified by the session configuration +diff --git a/dpdk/lib/stack/rte_stack.c b/dpdk/lib/stack/rte_stack.c +index 1fabec2bfe..1dab6d6645 100644 +--- a/dpdk/lib/stack/rte_stack.c ++++ b/dpdk/lib/stack/rte_stack.c +@@ -56,7 +56,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id, + int ret; + + if (flags & ~(RTE_STACK_F_LF)) { +- STACK_LOG_ERR("Unsupported stack flags %#x\n", flags); ++ STACK_LOG_ERR("Unsupported stack flags %#x", flags); + return NULL; + } + +@@ -65,7 +65,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id, + #endif + #if !defined(RTE_STACK_LF_SUPPORTED) + if (flags & RTE_STACK_F_LF) { +- STACK_LOG_ERR("Lock-free stack is not supported on your platform\n"); ++ STACK_LOG_ERR("Lock-free stack is not supported on your platform"); + rte_errno = ENOTSUP; + return NULL; + } +@@ -82,7 +82,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id, + + te = rte_zmalloc("STACK_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { +- STACK_LOG_ERR("Cannot reserve memory for tailq\n"); ++ STACK_LOG_ERR("Cannot reserve memory for tailq"); + rte_errno = ENOMEM; + return NULL; + } +@@ -92,7 +92,7 @@ rte_stack_create(const char *name, unsigned int count, int socket_id, + mz = rte_memzone_reserve_aligned(mz_name, sz, socket_id, + 0, __alignof__(*s)); + if (mz == NULL) { +- STACK_LOG_ERR("Cannot reserve stack memzone!\n"); ++ STACK_LOG_ERR("Cannot reserve stack memzone!"); + rte_mcfg_tailq_write_unlock(); + rte_free(te); + return NULL; diff --git a/dpdk/lib/table/rte_swx_table_selector.c b/dpdk/lib/table/rte_swx_table_selector.c index ad99f18453..18e021fe6f 100644 --- a/dpdk/lib/table/rte_swx_table_selector.c @@ -62863,10 +83473,23 @@ index d9918c4e96..40e9a3bf9d 100644 #define RTE_TEL_MAX_STRING_LEN 128 /** Maximum length of string. */ diff --git a/dpdk/lib/telemetry/telemetry.c b/dpdk/lib/telemetry/telemetry.c -index 8fbb4f3060..9c3c346ff5 100644 +index 8fbb4f3060..491c4d21dd 100644 --- a/dpdk/lib/telemetry/telemetry.c +++ b/dpdk/lib/telemetry/telemetry.c -@@ -208,7 +208,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) +@@ -171,7 +171,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) + d->type != RTE_TEL_ARRAY_INT && d->type != RTE_TEL_ARRAY_STRING) + return snprintf(out_buf, buf_len, "null"); + +- used = rte_tel_json_empty_array(out_buf, buf_len, 0); ++ if (d->type == RTE_TEL_DICT) ++ used = rte_tel_json_empty_obj(out_buf, buf_len, 0); ++ else ++ used = rte_tel_json_empty_array(out_buf, buf_len, 0); ++ + if (d->type == RTE_TEL_ARRAY_U64) + for (i = 0; i < d->data_len; i++) + used = rte_tel_json_add_array_u64(out_buf, +@@ -208,7 +212,11 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) break; case RTE_TEL_CONTAINER: { @@ -62879,7 +83502,7 @@ index 8fbb4f3060..9c3c346ff5 100644 const struct container *cont = &v->value.container; if (container_to_json(cont->data, -@@ -219,6 +223,7 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) +@@ -219,6 +227,7 @@ container_to_json(const struct rte_tel_data *d, char *out_buf, size_t buf_len) v->name, temp); if (!cont->keep) rte_tel_data_free(cont->data); @@ -62887,7 +83510,7 @@ index 8fbb4f3060..9c3c346ff5 100644 break; } } -@@ -275,7 +280,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -275,7 +284,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) break; case RTE_TEL_CONTAINER: { @@ -62900,7 +83523,7 @@ index 8fbb4f3060..9c3c346ff5 100644 const struct container *cont = &v->value.container; if (container_to_json(cont->data, -@@ -286,6 +295,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -286,6 +299,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) v->name, temp); if (!cont->keep) rte_tel_data_free(cont->data); @@ -62908,7 +83531,7 @@ index 8fbb4f3060..9c3c346ff5 100644 } } } -@@ -311,7 +321,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -311,7 +325,11 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) buf_len, used, d->data.array[i].u64val); else if (d->type == RTE_TEL_ARRAY_CONTAINER) { @@ -62921,7 +83544,7 @@ index 8fbb4f3060..9c3c346ff5 100644 const struct container *rec_data = &d->data.array[i].container; if (container_to_json(rec_data->data, -@@ -321,6 +335,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -321,6 +339,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) buf_len, used, temp); if (!rec_data->keep) rte_tel_data_free(rec_data->data); @@ -62929,7 +83552,7 @@ index 8fbb4f3060..9c3c346ff5 100644 } break; } -@@ -333,7 +348,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) +@@ -333,7 +352,7 @@ output_json(const char *cmd, const struct rte_tel_data *d, int s) static void perform_command(telemetry_cb fn, const char *cmd, const char *param, int s) { @@ -62938,6 +83561,38 @@ index 8fbb4f3060..9c3c346ff5 100644 int ret = fn(cmd, param, &data); if (ret < 0) { +@@ -364,8 +383,8 @@ client_handler(void *sock_id) + "{\"version\":\"%s\",\"pid\":%d,\"max_output_len\":%d}", + telemetry_version, getpid(), MAX_OUTPUT_LEN); + if (write(s, info_str, strlen(info_str)) < 0) { +- close(s); +- return NULL; ++ TMTY_LOG(DEBUG, "Socket write base info to client failed"); ++ goto exit; + } + + /* receive data is not null terminated */ +@@ -390,6 +409,7 @@ client_handler(void *sock_id) + + bytes = read(s, buffer, sizeof(buffer) - 1); + } ++exit: + close(s); + __atomic_sub_fetch(&v2_clients, 1, __ATOMIC_RELAXED); + return NULL; +diff --git a/dpdk/lib/telemetry/telemetry_legacy.c b/dpdk/lib/telemetry/telemetry_legacy.c +index 4c1d1c353a..578230732c 100644 +--- a/dpdk/lib/telemetry/telemetry_legacy.c ++++ b/dpdk/lib/telemetry/telemetry_legacy.c +@@ -94,7 +94,7 @@ register_client(const char *cmd __rte_unused, const char *params, + } + #ifndef RTE_EXEC_ENV_WINDOWS + strlcpy(data, strchr(params, ':'), sizeof(data)); +- memcpy(data, &data[strlen(":\"")], strlen(data)); ++ memmove(data, &data[strlen(":\"")], strlen(data)); + if (!strchr(data, '\"')) { + fprintf(stderr, "Invalid client data\n"); + return -1; diff --git a/dpdk/lib/vhost/socket.c b/dpdk/lib/vhost/socket.c index 863a6f6d52..669c322e12 100644 --- a/dpdk/lib/vhost/socket.c @@ -62958,6 +83613,54 @@ index 863a6f6d52..669c322e12 100644 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL; cmsg = CMSG_NXTHDR(&msgh, cmsg)) { +diff --git a/dpdk/lib/vhost/vdpa.c b/dpdk/lib/vhost/vdpa.c +index 577cb00a43..cf51ca957a 100644 +--- a/dpdk/lib/vhost/vdpa.c ++++ b/dpdk/lib/vhost/vdpa.c +@@ -19,6 +19,7 @@ + #include "rte_vdpa.h" + #include "vdpa_driver.h" + #include "vhost.h" ++#include "iotlb.h" + + /** Double linked list of vDPA devices. */ + TAILQ_HEAD(vdpa_device_list, rte_vdpa_device); +@@ -191,17 +192,21 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) + if (unlikely(nr_descs > vq->size)) + return -1; + ++ vhost_user_iotlb_rd_lock(vq); + desc_ring = (struct vring_desc *)(uintptr_t) + vhost_iova_to_vva(dev, vq, + vq->desc[desc_id].addr, &dlen, + VHOST_ACCESS_RO); ++ vhost_user_iotlb_rd_unlock(vq); + if (unlikely(!desc_ring)) + return -1; + + if (unlikely(dlen < vq->desc[desc_id].len)) { ++ vhost_user_iotlb_rd_lock(vq); + idesc = vhost_alloc_copy_ind_table(dev, vq, + vq->desc[desc_id].addr, + vq->desc[desc_id].len); ++ vhost_user_iotlb_rd_unlock(vq); + if (unlikely(!idesc)) + return -1; + +@@ -218,9 +223,12 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) + if (unlikely(nr_descs-- == 0)) + goto fail; + desc = desc_ring[desc_id]; +- if (desc.flags & VRING_DESC_F_WRITE) ++ if (desc.flags & VRING_DESC_F_WRITE) { ++ vhost_user_iotlb_rd_lock(vq); + vhost_log_write_iova(dev, vq, desc.addr, + desc.len); ++ vhost_user_iotlb_rd_unlock(vq); ++ } + desc_id = desc.next; + } while (desc.flags & VRING_DESC_F_NEXT); + diff --git a/dpdk/lib/vhost/vhost.c b/dpdk/lib/vhost/vhost.c index 19c7b92c32..9e28198528 100644 --- a/dpdk/lib/vhost/vhost.c @@ -63247,11 +83950,50 @@ index ef211ed519..63e2f3f577 100644 if (dev->notify_ops->guest_notified) dev->notify_ops->guest_notified(dev->vid); } +diff --git a/dpdk/lib/vhost/vhost_crypto.c b/dpdk/lib/vhost/vhost_crypto.c +index b448b6685d..bd69d3b46e 100644 +--- a/dpdk/lib/vhost/vhost_crypto.c ++++ b/dpdk/lib/vhost/vhost_crypto.c +@@ -245,7 +245,7 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform, + return ret; + + if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) { +- VC_LOG_DBG("Invalid cipher key length\n"); ++ VC_LOG_DBG("Invalid cipher key length"); + return -VIRTIO_CRYPTO_BADMSG; + } + +@@ -301,7 +301,7 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, + return ret; + + if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) { +- VC_LOG_DBG("Invalid cipher key length\n"); ++ VC_LOG_DBG("Invalid cipher key length"); + return -VIRTIO_CRYPTO_BADMSG; + } + +@@ -321,7 +321,7 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, + return ret; + + if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) { +- VC_LOG_DBG("Invalid auth key length\n"); ++ VC_LOG_DBG("Invalid auth key length"); + return -VIRTIO_CRYPTO_BADMSG; + } + diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c -index 9902ae9944..8df66e68b3 100644 +index 9902ae9944..8d7d04059c 100644 --- a/dpdk/lib/vhost/vhost_user.c +++ b/dpdk/lib/vhost/vhost_user.c -@@ -1809,7 +1809,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev, +@@ -1745,6 +1745,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, + if (!vq) + continue; + ++ cleanup_vq_inflight(dev, vq); + if (vq_is_packed(dev)) { + vq->inflight_packed = addr; + vq->inflight_packed->desc_num = queue_size; +@@ -1809,7 +1810,7 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev, if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) close(ctx->fds[0]); @@ -63260,7 +84002,17 @@ index 9902ae9944..8df66e68b3 100644 return RTE_VHOST_MSG_RESULT_OK; } -@@ -2326,7 +2326,7 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev, +@@ -2144,7 +2145,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev, + + vhost_user_iotlb_flush_all(vq); + ++ rte_spinlock_lock(&vq->access_lock); + vring_invalidate(dev, vq); ++ rte_spinlock_unlock(&vq->access_lock); + + return RTE_VHOST_MSG_RESULT_REPLY; + } +@@ -2326,7 +2329,7 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev, return RTE_VHOST_MSG_RESULT_ERR; close(ctx->fds[0]); @@ -63269,7 +84021,7 @@ index 9902ae9944..8df66e68b3 100644 return RTE_VHOST_MSG_RESULT_OK; } -@@ -2817,29 +2817,36 @@ read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context * +@@ -2817,29 +2820,36 @@ read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context * ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE, ctx->fds, VHOST_MEMORY_MAX_NREGIONS, &ctx->fd_num); @@ -63314,7 +84066,7 @@ index 9902ae9944..8df66e68b3 100644 return ret; } -@@ -2987,13 +2994,10 @@ vhost_user_msg_handler(int vid, int fd) +@@ -2987,13 +2997,10 @@ vhost_user_msg_handler(int vid, int fd) } } @@ -63331,7 +84083,7 @@ index 9902ae9944..8df66e68b3 100644 return -1; } -@@ -3003,6 +3007,14 @@ vhost_user_msg_handler(int vid, int fd) +@@ -3003,6 +3010,14 @@ vhost_user_msg_handler(int vid, int fd) else msg_handler = NULL; @@ -63347,7 +84099,7 @@ index 9902ae9944..8df66e68b3 100644 if (request != VHOST_USER_IOTLB_MSG) VHOST_LOG_CONFIG(dev->ifname, INFO, diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c -index 9abf752f30..26f184f8b2 100644 +index 9abf752f30..9f314f83c7 100644 --- a/dpdk/lib/vhost/virtio_net.c +++ b/dpdk/lib/vhost/virtio_net.c @@ -1453,6 +1453,12 @@ virtio_dev_rx_batch_packed_copy(struct virtio_net *dev, @@ -63363,7 +84115,88 @@ index 9abf752f30..26f184f8b2 100644 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr); -@@ -3470,6 +3476,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -1790,7 +1796,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev, + else + max_tries = 1; + +- while (size > 0) { ++ do { + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal +@@ -1817,7 +1823,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev, + avail_idx += desc_count; + if (avail_idx >= vq->size) + avail_idx -= vq->size; +- } ++ } while (size > 0); + + if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0)) + return -1; +@@ -2856,7 +2862,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + { + uint16_t i; + uint16_t avail_entries; +- uint16_t dropped = 0; + static bool allocerr_warned; + + /* +@@ -2895,11 +2900,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + + update_shadow_used_ring_split(vq, head_idx, 0); + +- if (unlikely(buf_len <= dev->vhost_hlen)) { +- dropped += 1; +- i++; ++ if (unlikely(buf_len <= dev->vhost_hlen)) + break; +- } + + buf_len -= dev->vhost_hlen; + +@@ -2916,8 +2918,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + buf_len, mbuf_pool->name); + allocerr_warned = true; + } +- dropped += 1; +- i++; + break; + } + +@@ -2928,27 +2928,21 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n"); + allocerr_warned = true; + } +- dropped += 1; +- i++; + break; + } +- + } + +- if (dropped) +- rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1); ++ if (unlikely(count != i)) ++ rte_pktmbuf_free_bulk(&pkts[i], count - i); + +- vq->last_avail_idx += i; +- +- do_data_copy_dequeue(vq); +- if (unlikely(i < count)) +- vq->shadow_used_idx = i; + if (likely(vq->shadow_used_idx)) { ++ vq->last_avail_idx += vq->shadow_used_idx; ++ do_data_copy_dequeue(vq); + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } + +- return (i - dropped); ++ return i; + } + + __rte_noinline +@@ -3470,6 +3464,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, allocerr_warned = true; } dropped = true; @@ -63371,6 +84204,54 @@ index 9abf752f30..26f184f8b2 100644 break; } +diff --git a/dpdk/meson.build b/dpdk/meson.build +index f91d652bc5..86f59a2fe4 100644 +--- a/dpdk/meson.build ++++ b/dpdk/meson.build +@@ -163,17 +163,17 @@ message(output_message + '\n') + output_message = '\n=================\nContent Skipped\n=================\n' + output_message += '\napps:\n\t' + foreach app:dpdk_apps_disabled +- reason = get_variable(app.underscorify() + '_disable_reason') ++ reason = get_variable('app_' + app.underscorify() + '_disable_reason') + output_message += app + ':\t' + reason + '\n\t' + endforeach + output_message += '\nlibs:\n\t' + foreach lib:dpdk_libs_disabled +- reason = get_variable(lib.underscorify() + '_disable_reason') ++ reason = get_variable('lib_' + lib.underscorify() + '_disable_reason') + output_message += lib + ':\t' + reason + '\n\t' + endforeach + output_message += '\ndrivers:\n\t' + foreach drv:dpdk_drvs_disabled +- reason = get_variable(drv.underscorify() + '_disable_reason') ++ reason = get_variable('drv_' + drv.underscorify() + '_disable_reason') + output_message += drv + ':\t' + reason + '\n\t' + endforeach + message(output_message + '\n') +diff --git a/dpdk/usertools/dpdk-devbind.py b/dpdk/usertools/dpdk-devbind.py +index 4d9c1be666..d9aff74873 100755 +--- a/dpdk/usertools/dpdk-devbind.py ++++ b/dpdk/usertools/dpdk-devbind.py +@@ -66,14 +66,14 @@ intel_ntb_icx = {'Class': '06', 'Vendor': '8086', 'Device': '347e', + 'SVendor': None, 'SDevice': None} + + cnxk_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f9,a0fa', +- 'SVendor': None, 'SDevice': None} ++ 'SVendor': None, 'SDevice': None} + cnxk_npa = {'Class': '08', 'Vendor': '177d', 'Device': 'a0fb,a0fc', +- 'SVendor': None, 'SDevice': None} ++ 'SVendor': None, 'SDevice': None} + cn9k_ree = {'Class': '08', 'Vendor': '177d', 'Device': 'a0f4', +- 'SVendor': None, 'SDevice': None} ++ 'SVendor': None, 'SDevice': None} + + virtio_blk = {'Class': '01', 'Vendor': "1af4", 'Device': '1001,1042', +- 'SVendor': None, 'SDevice': None} ++ 'SVendor': None, 'SDevice': None} + + network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class] + baseband_devices = [acceleration_class] diff --git a/dpdk/usertools/dpdk-pmdinfo.py b/dpdk/usertools/dpdk-pmdinfo.py index 67d023a047..2c728de7b8 100755 --- a/dpdk/usertools/dpdk-pmdinfo.py diff --git a/SPECS/openvswitch3.1.spec b/SPECS/openvswitch3.1.spec index abab387..9d993dc 100644 --- a/SPECS/openvswitch3.1.spec +++ b/SPECS/openvswitch3.1.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 3.1.0 -Release: 134%{?dist} +Release: 135%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -756,6 +756,445 @@ exit 0 %endif %changelog +* Fri Oct 25 2024 Open vSwitch CI - 3.1.0-135 +- Merging dpdk subtree [RH git: 80da586a0a] + Commit list: + 7294b64b1d Revert "eal/unix: support ZSTD compression for firmware" + 3ad974b46b Merge tag 'v22.11.6' into 22.11 + d6fc7991d0 Reapply "build: add libarchive to optional external dependencies" + ff4d4839f9 version: 22.11.6 + 2480dbd434 version: 22.11.6-rc1 + 29857b82e4 doc: add baseline mode in l3fwd-power guide + b9c8076d92 doc: fix mbuf flags + ff2d7af17a examples/ipsec-secgw: revert SA salt endianness + b70780f316 power: increase the number of UNCORE frequencies + e224810340 doc: remove reference to mbuf pkt field + 28c93c22f7 examples: fix port ID restriction + 6a1313b34a examples: fix lcore ID restriction + 3b8becb93e examples: fix queue ID restriction + 15ac04a068 net/ice/base: fix temporary failures reading NVM + 4568f7b753 net/hns3: fix uninitialized variable in FEC query + b9b793c805 examples/l3fwd: fix crash on multiple sockets + 68b33da50f examples/l3fwd: fix crash in ACL mode for mixed traffic + 3682872558 bus/vdev: fix device reinitialization + 0a15aefcc6 malloc: fix multi-process wait condition handling + 8ea890172e app/pdump: handle SIGTERM and SIGHUP + 7ecd214dda app/dumpcap: handle SIGTERM and SIGHUP + 1b1337cc93 dma/hisilicon: remove support for HIP09 platform + 635c7eaa5e bus/pci: fix FD in secondary process + 6bf6cd5184 bus/pci: fix UIO resource mapping in secondary process + 99b0e4a22c app/testpmd: fix build on signed comparison + 1092b5655f ethdev: fix device init without socket-local memory + 9c7ae1bf6d app/testpmd: add postpone option to async flow destroy + c4f3e79ac5 ethdev: fix GENEVE option item conversion + becd25a54a net/ark: fix index arithmetic + 1c9c96641a net/hns3: check Rx DMA address alignmnent + 5669dcea46 net/mlx5: fix disabling E-Switch default flow rules + d69a8b5eb2 common/mlx5: remove unneeded field when modify RQ table + 751fa44c51 net/mlx5: fix uplink port probing in bonding mode + f06e1a4e49 net/mlx5: fix end condition of reading xstats + 7aa564683c net/mlx5/hws: remove unused variable + fdf5a81ff7 net/mlx5/hws: fix port ID on root item convert + 6da7724d9e net/mlx5/hws: fix deletion of action vport + 57ebce02cd net/mlx5: fix MTU configuration + 48bfee9777 net/mlx5: fix Arm build with GCC 9.1 + e444727915 net/ice: fix return value for raw pattern parsing + 9928f1ec31 net/ice: fix memory leaks in raw pattern parsing + 68d0c242e4 crypto/qat: fix placement of OOP offset + 327b33505c test/crypto: fix asymmetric capability test + 5456b47a3a test/crypto: remove unused stats in setup + ffc0a0f5d9 doc: fix typo in l2fwd-crypto guide + f4834455b2 crypto/qat: fix log message typo + 5a257288f8 test/crypto: fix allocation comment + 8e373a7f33 crypto/ipsec_mb: fix function comment + e793a7063f crypto/qat: fix GEN4 write + a0bb9efc61 net/nfp: fix disabling 32-bit build + f0e36f585b net/nfp: adapt reverse sequence card + 0c73b671c8 net/nfp: remove redundant function call + 59cee7032b net/nfp: forbid offload flow rules with empty action list + 18b7942cef net/ena: fix checksum handling + cf4bb6bffc net/ena: fix return value check + 33ddd1e51d net/ena: fix bad checksum handling + 59adabd787 net/nfp: disable ctrl VNIC queues on close + 047aff2cf1 net/ionic: fix mbuf double-free when emptying array + 087d968fc0 net/nfp: fix allocation of switch domain + f4affbfda7 net/nfp: fix IPv6 TTL and DSCP flow action + a91148a6fe net/vmxnet3: fix init logs + 48d15a5ef2 net/txgbe: fix Rx interrupt + bd6f2422df net/ngbe: fix memory leaks + c14e0d31df net/txgbe: fix memory leaks + a51a250cc5 net/ngbe: fix MTU range + c5accd3813 net/txgbe: fix MTU range + 0ce675d4cf net/ngbe: fix hotplug remove + 4d4117d9d9 net/txgbe: fix hotplug remove + e8d10f45d6 net/ngbe: keep PHY power down while device probing + 8b161ee917 net/ngbe: add special config for YT8531SH-CA PHY + ee530d3ee4 net/txgbe: fix VF promiscuous and allmulticast + 13909e4ba2 net/txgbe: reconfigure more MAC Rx registers + 7b6163ebfa net/txgbe: restrict configuration of VLAN strip offload + a96f251def net/txgbe: fix Tx hang on queue disable + 500f540c7c net/txgbe: fix flow filters in VT mode + f0b14e1401 net/txgbe: fix tunnel packet parsing + 40700232ba app/testpmd: fix parsing for connection tracking item + 4d92c686d6 doc: remove empty section from testpmd guide + 23e7802fa9 app/testpmd: handle IEEE1588 init failure + 4999f309fb net/ice: fix sizing of filter hash table + d0f2f65e1f common/idpf: fix flex descriptor mask + e804d44715 net/ice/base: fix masking when reading context + 44cb540275 net/ice/base: fix preparing PHY for timesync command + 8719d11e4b net/ice/base: fix board type definition + ecb6c3285e net/ice/base: fix potential TLV length overflow + 39696cfa70 net/ice/base: fix check for existing switch rule + e31db1b871 net/ice/base: fix return type of bitmap hamming weight + 92938d56d0 net/ice/base: fix GCS descriptor field offsets + f3701c8b98 net/ice/base: fix size when allocating children arrays + 7fdbc3da5b net/ice/base: fix sign extension + 64f49919dd net/ice/base: fix memory leak in firmware version check + 7f57a7a2ce net/ice/base: fix pointer to variable outside scope + f7dcc5e44f buildtools: fix build with clang 17 and ASan + 73a8315608 fbarray: fix finding for unaligned length + 3a0b6d25c0 net/mlx5: fix start without duplicate flow patterns + c1257d39c7 net/dpaa: forbid MTU configuration for shared interface + b7594f8f15 bus/dpaa: remove redundant file descriptor check + 55950c7aa5 common/dpaax: fix node array overrun + 041e6663bd common/dpaax: fix IOVA table cleanup + 75332a8da8 bus/dpaa: fix memory leak in bus scan + b82122e2ab bus/dpaa: fix bus scan for DMA devices + e922a98a21 app/testpmd: fix help string of BPF load command + c74c897cfe dma/idxd: fix setup with Ubuntu 24.04 + b929a370ef eal/linux: lower log level on allocation attempt failure + 49fc31bc8b usertools/devbind: fix indentation + 58dd24b466 fbarray: fix lookbehind ignore mask handling + 2050d2ea01 fbarray: fix lookahead ignore mask handling + 8f3876f866 fbarray: fix incorrect lookbehind behavior + e67654ca81 fbarray: fix incorrect lookahead behavior + e8f3418f64 examples/ipsec-secgw: fix SA salt endianness + 6530bf0f6d crypto/dpaa2_sec: fix event queue user context + ac47aa31a1 crypto/dpaa_sec: fix IPsec descriptor + 6182babbe7 common/dpaax/caamflib: fix PDCP AES-AES watchdog error + 09ca25c293 common/dpaax/caamflib: fix PDCP-SDAP watchdog error + d0aee3876f crypto/openssl: set cipher padding once + ccb6568398 crypto/openssl: make per-QP auth context clones + 26e1e90724 crypto/openssl: make per-QP cipher context clones + 8e32c3a94a crypto/openssl: optimize 3DES-CTR context init + 5154afdce8 crypto/openssl: fix GCM and CCM thread unsafe contexts + 7e6b9c0686 examples/fips_validation: fix dereference and out-of-bound + 3d19e7d9eb cryptodev: validate crypto callbacks from next node + ff0ffd5338 cryptodev: fix build without crypto callbacks + 7341376515 crypto/cnxk: fix minimal input normalization + 528f8f36a8 app/crypto-perf: fix result for asymmetric + d581598df3 app/crypto-perf: remove redundant local variable + cbe35e02f9 baseband/la12xx: forbid secondary process + 63cc1ddb1b telemetry: fix connection parameter parsing + e03493d52e bpf: fix load hangs with six IPv6 addresses + d7a549306d bpf: fix MOV instruction evaluation + f172fd7567 mbuf: fix dynamic fields copy + 2db5746bc8 vdpa/sfc: remove dead code + e35ccc8804 dmadev: fix structure alignment + cdb1114156 app/bbdev: fix interrupt tests + fcfe871790 build: use builtin helper for python dependencies + 0938693288 config: fix warning for cross build with meson >= 1.3.0 + 1a055be1a3 doc: fix link to hugepage mapping from Linux guide + 6a30502095 bus/vdev: revert fix devargs in secondary process + 1620b6d717 telemetry: lower log level on socket error + 5cbc25e40f test/crypto: fix enqueue/dequeue callback case + e12b4a47ba net/mlx5: fix crash on counter pool destroy + aadaf7fe72 net/mlx5: support jump in meter hierarchy + 739bd81a46 net/mlx5: fix access to flow template operations + 68b68c76e1 net/mlx5: break flow resource release loop + 42e1eb6a45 net/mlx5: fix flow template indirect action failure + c8ee47f1a6 net/mlx5: fix hash Rx queue release in flow sample + a752b66a9a net/mlx5: fix indexed pool with invalid index + 06af59ff2f net/mlx5/hws: fix action template dump + 4a75ad8a7e net/mlx5/hws: add template match none flag + df1753ebe8 net/mlx5/hws: fix spinlock release on context open + ef9f7c1fd8 net/mlx5/hws: fix function comment + 06de40842e common/mlx5: fix PRM structs + a05258399a net/mlx5/hws: decrease log level for creation failure + 17c5271078 common/mlx5: fix unsigned/signed mismatch + 0ac140899b hash: fix RCU reclamation size + 1f65019c2d bpf: disable on 32-bit x86 + 145220677b graph: fix ID collisions + e0da7a253d net/cnxk: fix promiscuous state after MAC change + d9d6771eba net/cnxk: fix outbound security with higher packet burst + d1c48d821e net/cnxk: fix RSS config + 8873f14545 net/ixgbe/base: fix PHY ID for X550 + 7b94a8b53e net/ixgbe/base: fix 5G link speed reported on VF + 5e73514050 net/ixgbe/base: revert advertising for X550 2.5G/5G + 229bdd77bb net/e1000/base: fix link power down + b2e98edd71 net/ixgbe: do not create delayed interrupt handler twice + 23efe3ba7a net/ixgbe: do not update link status in secondary process + f461c249ff net/fm10k: fix cleanup during init failure + 4c07a5e8c1 eal: fix logs for '--lcores' + 5575459062 eventdev/crypto: fix opaque field handling + ae651a5adb event/sw: fix warning from useless snprintf + d47135efe7 baseband/acc: fix memory barrier + 48fb2f00d8 net/virtio: fix MAC table update + aff6953d46 vhost: cleanup resubmit info before inflight setup + 6267610fff vhost: fix build with GCC 13 + 9cdf5aecb2 mempool: replace GCC pragma with cast + a28f53ce9a hash: check name when creating a hash + 07f2ecf7b3 hash: fix return code description in Doxygen + a4a27aa3e0 app/testpmd: fix lcore ID restriction + fe02a92fcc net/iavf: remove outer UDP checksum offload for X710 VF + 9d02109d19 net/i40e: fix outer UDP checksum offload for X710 + b5fc38d53c net: fix outer UDP checksum in Intel prepare helper + 0c72bf4c1d app/testpmd: fix outer IP checksum offload + e48c887599 net/ice: fix check for outer UDP checksum offload + ac5a8e2ab5 net/axgbe: fix linkup in PHY status + e8615db990 net/axgbe: delay AN timeout during KR training + 59767090cb net/axgbe: fix Tx flow on 30H HW + 08c36b23fd net/axgbe: check only minimum speed for cables + 73e8b7893e net/axgbe: fix connection for SFP+ active cables + 39527c017d net/axgbe: fix SFP codes check for DAC cables + 07b6c26ee2 net/axgbe: enable PLL control for fixed PHY modes only + a1e4f462f3 net/axgbe: disable RRC for yellow carp devices + b07fc265f9 net/axgbe: disable interrupts during device removal + 6aad2db82c net/axgbe: update DMA coherency values + fac582ee7d net/axgbe: fix fluctuations for 1G Bel Fuse SFP + ad99680c1d net/axgbe: reset link when link never comes back + b43c908eb2 net/axgbe: fix MDIO access for non-zero ports and CL45 PHYs + 2f9077766d net/tap: fix file descriptor check in isolated flow + 19c6df9ce8 net/af_xdp: remove unused local statistic + 9e8bcd1f55 net/af_xdp: fix stats reset + 13ac242820 net/af_xdp: count mbuf allocation failures + 20c39f6a1b net/af_xdp: fix port ID in Rx mbuf + d00a311cfb doc: fix testpmd ring size command + a7e4b44736 net/af_packet: align Rx/Tx structs to cache line + ac72e81041 net/hns3: disable SCTP verification tag for RSS hash input + f8aabaa13a net/hns3: fix variable overflow + 8ec7b95de7 net/hns3: fix double free for Rx/Tx queue + afd6de50eb net/hns3: fix Rx timestamp flag + 419ac23f30 net/hns3: fix offload flag of IEEE 1588 + 6282bde96d net/bonding: fix failover time of LACP with mode 4 + 541c30f3d1 net/nfp: fix representor port queue release + 0257b6327a latencystats: fix literal float suffix + 9957ef4a31 eal/windows: install sched.h file + 7cc8737e52 net/virtio-user: add memcpy check + 461739eed3 pcapng: add memcpy check + 73c189a067 eal/unix: support ZSTD compression for firmware + 502049c8b3 bus/pci: fix build with musl 1.2.4 / Alpine 3.19 + 9dc3826a4a test: force IOVA mode on PPC64 without huge pages + a409653a12 test/crypto: fix vector global buffer overflow + 15658afda0 kni: fix build with Linux 6.8 + dbd8f39c7c version: 22.11.5 + 972d4c02ae net/mlx5: fix incorrect counter cache dereference + 83a31ef4d4 crypto/ipsec_mb: fix incorrectly setting cipher keys + 28d7ed224f version: 22.11.5-rc1 + bbf97a4279 net/mlx5: fix async flow create error handling + 2b8fe78bd7 net/mlx5: fix rollback on failed flow configure + 1cf5f52d1f net/mlx5: fix flow configure validation + efe1d783f4 examples/ipsec-secgw: fix typo in error message + 8a9e1d1f86 test/cfgfile: fix typo in error messages + f4e263b024 test/power: fix typo in error message + eebb999b45 doc: fix typo in packet framework guide + 16eac11cb6 doc: fix typo in profiling guide + e96de331da net/mlx5/hws: fix port ID for root table + 6409b26550 net/vmxnet3: ignore Rx queue interrupt setup on FreeBSD + 3894e82ac5 net/ena: fix mbuf double free in fast free mode + 6e39ed2088 doc: fix default IP fragments maximum in programmer guide + 6a73ac3d7a examples/ipsec-secgw: fix Rx queue ID in Rx callback + aead7fda3f net/bnxt: fix number of Tx queues being created + f6a26c8864 net/mlx5: fix warning about copy length + 905283004d net/mlx5: fix drop action release timing + 0ddc41f5c6 net/mlx5: fix age position in hairpin split + 3b972375de net/mlx5: prevent ioctl failure log flooding + 3be62ef2f3 net/mlx5: fix template clean up of FDB control flow rule + 7858502f9a net/mlx5: fix DR context release ordering + fd3721be47 net/mlx5: fix IP-in-IP tunnels recognition + 03243a2773 net/mlx5: remove duplication of L3 flow item validation + bad500334c net/mlx5: fix meter policy priority + f5ff0aaf2e net/mlx5: fix VLAN ID in flow modify + 78b059d759 doc: update link to Windows DevX in mlx5 guide + ec91ab121a net/mlx5: fix HWS meter actions availability + c45c771584 net/hns3: support new device + 65b4f90e2e app/testpmd: fix error message for invalid option + 2eb0560cb0 app/testpmd: fix burst option parsing + d6cee7ffc0 app/testpmd: fix --stats-period option check + 75b8660005 net/nfp: fix switch domain free check + c5251ed8a6 net/ena/base: restructure interrupt handling + ad7591896c net/ena/base: limit exponential backoff + fd262a1e35 net/ena: fix fast mbuf free + 6d307d9eaa doc: add link speeds configuration in features table + d50c5dfbf4 doc: add traffic manager in features table + fc5f4ef40f net/hns3: enable PFC for all user priorities + a56f1654c4 crypto/qat: fix crash with CCM null AAD pointer + 642fff5862 examples/ipsec-secgw: fix cryptodev to SA mapping + a05a096f1d build: pass cflags in subproject + 244dbbf778 net/mlx5: fix flow counter cache starvation + bd00ef3b09 examples/l3fwd: fix Rx over not ready port + 8b642184a9 examples/packet_ordering: fix Rx with reorder mode disabled + bf8ce81fc0 test: do not count skipped tests as executed + a985d4953e net/ice: fix version for experimental symbols + bd47e113a2 baseband/fpga_5gnr_fec: use a better random generator + 49989423d0 net/tap: log Netlink extended ack unavailability + 7019fde3a9 test/bpf: fix mbuf init in some filter test + af4304e8c4 test/mbuf: fix external mbuf case with assert enabled + 0426b179f3 config: fix CPU instruction set for cross-build + ba823066bf bus/vdev: fix devargs in secondary process + 60ca8f9c76 test: fix probing in secondary process + fccac369b5 net/mlx5: remove device status check in flow creation + cc21b4a2d2 net/mlx5: fix counters map in bonding mode + e7b38d0f36 net/mlx5: fix VLAN handling in meter split + 325c88d078 net/mlx5/hws: enable multiple integrity items + ff41fc17f1 net/mlx5: fix connection tracking action validation + c2a6e4ddca net/mlx5: fix conntrack action handle representation + 25f2e79045 net/mlx5: fix condition of LACP miss flow + 38b2735798 net/mlx5/hws: fix VLAN inner type + afe47440d1 net/mlx5: prevent querying aged flows on uninit port + 3032a40f28 net/mlx5: fix error packets drop in regular Rx + 5126f7dd10 net/mlx5: fix use after free when releasing Tx queues + c3d7ceece3 net/mlx5/hws: fix VLAN item in non-relaxed mode + d458f6a235 net/mlx5/hws: check not supported fields in VXLAN + f29c2d6b20 common/cnxk: fix possible out-of-bounds access + a1f69f50e1 common/cnxk: remove dead code + 66dffe8a6d common/cnxk: fix link config for SDP + ba9eb97868 net/cnxk: fix mbuf fields in multi-segment Tx + 430b2766ce common/cnxk: fix mbox struct attributes + df9a4e91c6 net/cnxk: add cookies check for multi-segment offload + ec9d6236d0 common/cnxk: fix RSS RETA configuration + a2ec8fb5fa net/cnxk: fix MTU limit + edf3fc5d23 common/cnxk: fix Tx MTU configuration + 36157ca75c net/cnxk: fix buffer size configuration + 41dcb6a83d common/cnxk: remove CN9K inline IPsec FP opcodes + fdc5c7c46a net/bnx2x: fix warnings about memcpy lengths + 4e873a75e2 net/ice: remove incorrect 16B descriptor read block + 0d2f5b4ca1 net/iavf: remove incorrect 16B descriptor read block + 614a3bf8e0 net/i40e: remove incorrect 16B descriptor read block + f7667994bd net/ixgbe: increase VF reset timeout + baf1699534 net/iavf: remove error logs for VLAN offloading + 3a2d35d6e9 net/ixgbevf: fix RSS init for x550 NICs + 100e77a19e net/bnxt: fix null pointer dereference + 48309178df net/tap: fix traffic control handle calculation + 337a1fa674 net/tap: do not overwrite flow API errors + 1b8a6de75f app/testpmd: fix async flow create failure handling + 0c972805e1 app/testpmd: return if no packets in GRO heavy weight mode + fde6a99f1c net/af_xdp: fix leak on XSK configuration failure + d393058761 doc: fix typos in cryptodev overview + 3ebfe50507 app/crypto-perf: add missing op resubmission + 211da2b3c8 app/crypto-perf: fix out-of-place mbuf size + 2e6e29e797 app/crypto-perf: fix copy segment size + 85bd236b25 eventdev/crypto: fix enqueueing + 7a45bfba9c eventdev: fix Doxygen processing of vector struct + ad39899ad8 eventdev: improve Doxygen comments on configure struct + 3cdb7e6d44 test/event: fix crash in Tx adapter freeing + ede7aa11ae event/dlb2: remove superfluous memcpy + 9507b460c3 doc: fix configuration in baseband 5GNR driver guide + fc8e14108f baseband/acc: fix common logs + 12cbf90bfa baseband/acc: fix logtypes register + ab4db7ac79 Revert "build: add libarchive to optional external dependencies" + 782b5a22bc net/nfp: free switch domain ID on close + 51b7521551 net/nfp: fix device resource freeing + 72254c7736 net/nfp: fix device close + b2d4e3c5da net/nfp: fix resource leak for exit of flower firmware + eb3dcfafdb net/nfp: fix resource leak for exit of CoreNIC firmware + bd903bd459 net/nfp: fix resource leak for flower firmware + f12ad3f05d net/nfp: fix resource leak for CoreNIC firmware + 1997b844bf vhost: fix virtqueue access check in vhost-user setup + aa166bc2d3 app/testpmd: fix GRO packets flush on timeout + b14df7f20a net/mlx5: fix stats query crash in secondary process + c7f17de89d net/mlx5: fix GENEVE option item translation + 37b1f135bf net/mlx5: remove GENEVE options length limitation + 02373cf8c2 net/mlx5/hws: fix tunnel protocol checks + 77d9b5e6d6 common/mlx5: fix duplicate read of general capabilities + b328c4501c net/mlx5: fix GENEVE TLV option management + 5300887877 net/mlx5: fix jump action validation + 3294358b40 common/cnxk: fix VLAN check for inner header + 079911f9f2 common/cnxk: fix mbox region copy + f1110aacd8 net/thunderx: fix DMAC control register update + d7cb16965b net/cnxk: fix flow RSS configuration + 51f2cd1ff7 net/bnxt: fix deadlock in ULP timer callback + d71a770231 net/bnxt: modify locking for representor Tx + e2cfac4357 net/bnxt: fix backward firmware compatibility + 61fe746549 net/bnxt: fix speed change from 200G to 25G on Thor + 66f4540fa5 net/bnxt: fix 50G and 100G forced speed + f94b1dffa1 net/bnxt: fix array overflow + a254412faf net/netvsc: fix VLAN metadata parsing + 6ccc096367 net: add macros for VLAN metadata parsing + 8603dea8c5 net/memif: fix extra mbuf refcnt update in zero copy Tx + f90918d9f4 common/sfc_efx/base: use C11 static assert + 706c98e249 net/mana: handle MR cache expansion failure + 97789491e6 net/mana: fix memory leak on MR allocation + 32bf2f21f5 net/bonding: fix flow count query + 3bd12069ce net/ionic: fix device close + 64baeefd1f net/ionic: fix RSS query + e15d5a01b1 net/ionic: fix missing volatile type for cqe pointers + 0bef99c53e app/testpmd: fix crash in multi-process forwarding + cf7a7b8134 drivers/net: fix buffer overflow for packet types list + d3273299a7 net/mana: prevent values overflow returned from RDMA layer + ef5454bbab net/vmxnet3: fix initialization on FreeBSD + 6d4646c0f7 app/testpmd: hide --bitrate-stats in help if disabled + f3e5b61f91 doc: add --latencystats option in testpmd guide + 1822aa59af net/hns3: remove QinQ insert support for VF + 75120a950e net/hns3: fix reset level comparison + d043fdd66e net/hns3: fix disable command with firmware + cfeb27992c net/hns3: fix VF multiple count on one reset + f644df1142 net/hns3: refactor handle mailbox function + 5371898fa5 net/hns3: refactor send mailbox function + b089a189d5 net/hns3: refactor PF mailbox message struct + 0a6c163f93 net/hns3: refactor VF mailbox message struct + 173ff75d74 net/af_xdp: fix memzone leak on config failure + bc1e0ee2a3 net/nfp: fix resource leak for PF initialization + 636a019a74 ethdev: fix NVGRE encap flow action description + 97986481e9 doc: fix commands in eventdev test tool guide + 1e4f46e6f9 test/event: skip test if no driver is present + 8dd3b5b4ab event/cnxk: fix dequeue timeout configuration + 7c0e5bd8ed app/crypto-perf: fix encrypt operation verification + 642af46b13 app/crypto-perf: fix data comparison + ebfa161069 app/crypto-perf: fix next segment mbuf + 3d8b8805d8 common/cnxk: fix memory leak in CPT init + c5db2d6897 examples/ipsec-secgw: fix width of variables + fac0d1f057 cryptodev: remove unused extern variable + 86389c2bd7 vhost: fix memory leak in Virtio Tx split path + 49dfcc6ae2 vdpa/mlx5: fix queue enable drain CQ + 9b6bdd022b vhost: fix deadlock during vDPA SW live migration + 453678fc16 net/virtio: remove duplicate queue xstats + 3dab00d897 net: fix TCP/UDP checksum with padding data + 0ceab70d2c build: link static libs with whole-archive in subproject + 00a4d8525c build: fix linker warnings about undefined symbols + 0b4a4ace22 net/nfp: fix calloc parameters + 5cad6d4260 net/bnx2x: fix calloc parameters + 3dba8ba797 common/mlx5: fix calloc parameters + 477478f179 rawdev: fix calloc parameters + ba907dffe5 dmadev: fix calloc parameters + 5b9e4068dc eventdev: fix calloc parameters + eb18df799b pipeline: fix calloc parameters + e0cc2baf48 examples/vhost: verify strdup return + 7d35dff74c examples/qos_sched: fix memory leak in args parsing + 25bcc3c5b7 test: verify strdup return + c5b1021624 app/testpmd: verify strdup return + 3baa529086 app/crypto-perf: verify strdup return + 81314db4bd app/pdump: verify strdup return + 8f29f8d800 app/dumpcap: verify strdup return + ab86b4a27a net/failsafe: fix memory leak in args parsing + 18d9de6df8 event/cnxk: verify strdup return + 705ab71cea dma/idxd: verify strdup return + 915eda4e5a bus/vdev: verify strdup return + f0becad294 bus/fslmc: verify strdup return + 8d6011441b bus/dpaa: verify strdup return + 10d9146d6d eal: verify strdup return + 0de12cad1b event/opdl: fix compile-time check + 3c9e08f6d3 eal/x86: add AMD vendor check for TSC calibration + 8661160ad3 ci: update versions of actions in GHA + 1d8abba661 telemetry: fix empty JSON dictionaries + 5543f65075 telemetry: fix connected clients count + d9ce83470f build: fix reasons conflict + 7d1744b05c kernel/freebsd: fix module build on FreeBSD 14 + 65973f73ee net/ice: fix memory leaks + 1a93dbe667 net/ice: fix tunnel TSO capabilities + 51cb79315c net/ice: fix link update + 6bc4e06ef9 net/ixgbe: fix memoy leak after device init failure + ae8369536d net/iavf: fix memory leak on security context error + 599593804b net/i40e: remove redundant judgment in flow parsing + fc8cfb6a22 dma/dpaa2: fix logtype register + cfef91b1e0 bus/ifpga: remove dead code + d0ec6c1277 lib: remove redundant newline from logs + 31a09fab4b lib: add newline in logs + 88ba671592 lib: use dedicated logtypes and macros + cf93b45a70 regexdev: fix logtype register + f66284d141 hash: remove some dead code + + * Tue Oct 22 2024 Open vSwitch CI - 3.1.0-134 - Merging upstream branch-3.1 [RH git: baba56c67e] Commit list: