From d76f5049dee35d9961f10132ea78622cc8c866cb Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Apr 10 2018 09:35:53 +0000 Subject: import dpdk-17.11-7.el7 --- diff --git a/.dpdk.metadata b/.dpdk.metadata index 9bd3682..95f4f73 100644 --- a/.dpdk.metadata +++ b/.dpdk.metadata @@ -1 +1 @@ -59003b90b0037e2e88f9819b9305529717261562 SOURCES/dpdk-stable-16.11.2.tar.xz +d6eaf8102983208e0ab7f6aa1643c949f6eca2e8 SOURCES/dpdk-17.11.tar.xz diff --git a/.gitignore b/.gitignore index b0a0dd7..9de4691 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/dpdk-stable-16.11.2.tar.xz +SOURCES/dpdk-17.11.tar.xz diff --git a/SOURCES/0001-bus-pci-forbid-IOVA-mode-if-IOMMU-address-width-too-.patch b/SOURCES/0001-bus-pci-forbid-IOVA-mode-if-IOMMU-address-width-too-.patch new file mode 100644 index 0000000..4394bdf --- /dev/null +++ b/SOURCES/0001-bus-pci-forbid-IOVA-mode-if-IOMMU-address-width-too-.patch @@ -0,0 +1,148 @@ +From 781bb36add9e43907a16a1303a13808ae53cfa31 Mon Sep 17 00:00:00 2001 +From: Maxime Coquelin +Date: Fri, 12 Jan 2018 11:22:20 +0100 +Subject: [PATCH] bus/pci: forbid IOVA mode if IOMMU address width too small + +[ upstream commit 54a328f552ff2e0098c3f96f9e32302675f2bcf4 ] + +Intel VT-d supports different address widths for the IOVAs, from +39 bits to 56 bits. + +While recent processors support at least 48 bits, VT-d emulation +currently only supports 39 bits. It makes DMA mapping to fail in this +case when using VA as IOVA mode, as user-space virtual addresses uses +up to 47 bits (see kernel's Documentation/x86/x86_64/mm.txt). + +This patch parses VT-d CAP register value available in sysfs, and +forbid VA as IOVA mode if the GAW is 39 bits or unknown. + +Fixes: f37dfab21c98 ("drivers/net: enable IOVA mode for Intel PMDs") + +Signed-off-by: Maxime Coquelin +Tested-by: Chas Williams +--- + drivers/bus/pci/linux/pci.c | 90 ++++++++++++++++++++++++++++++++++++++++----- + 1 file changed, 81 insertions(+), 9 deletions(-) + +diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c +index ec31216..74deef3 100644 +--- a/drivers/bus/pci/linux/pci.c ++++ b/drivers/bus/pci/linux/pci.c +@@ -577,4 +577,80 @@ + } + ++#if defined(RTE_ARCH_X86) ++static bool ++pci_one_device_iommu_support_va(struct rte_pci_device *dev) ++{ ++#define VTD_CAP_MGAW_SHIFT 16 ++#define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT) ++#define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */ ++ struct rte_pci_addr *addr = &dev->addr; ++ char filename[PATH_MAX]; ++ FILE *fp; ++ uint64_t mgaw, vtd_cap_reg = 0; ++ ++ snprintf(filename, sizeof(filename), ++ "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap", ++ rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid, ++ addr->function); ++ if (access(filename, F_OK) == -1) { ++ /* We don't have an Intel IOMMU, assume VA supported*/ ++ return true; ++ } ++ ++ /* We have an intel IOMMU */ ++ fp = fopen(filename, "r"); ++ if (fp == NULL) { ++ RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename); ++ return false; ++ } ++ ++ if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) { ++ RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename); ++ fclose(fp); ++ return false; ++ } ++ ++ fclose(fp); ++ ++ mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1; ++ if (mgaw < X86_VA_WIDTH) ++ return false; ++ ++ return true; ++} ++#elif defined(RTE_ARCH_PPC_64) ++static bool ++pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) ++{ ++ return false; ++} ++#else ++static bool ++pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev) ++{ ++ return true; ++} ++#endif ++ ++/* ++ * All devices IOMMUs support VA as IOVA ++ */ ++static bool ++pci_devices_iommu_support_va(void) ++{ ++ struct rte_pci_device *dev = NULL; ++ struct rte_pci_driver *drv = NULL; ++ ++ FOREACH_DRIVER_ON_PCIBUS(drv) { ++ FOREACH_DEVICE_ON_PCIBUS(dev) { ++ if (!rte_pci_match(drv, dev)) ++ continue; ++ if (!pci_one_device_iommu_support_va(dev)) ++ return false; ++ } ++ } ++ return true; ++} ++ + /* + * Get iommu class of PCI devices on the bus. +@@ -587,10 +663,5 @@ enum rte_iova_mode + bool has_iova_va; + bool is_bound_uio; +- bool spapr_iommu = +-#if defined(RTE_ARCH_PPC_64) +- true; +-#else +- false; +-#endif ++ bool iommu_no_va; + + is_bound = pci_one_device_is_bound(); +@@ -600,4 +671,5 @@ enum rte_iova_mode + has_iova_va = pci_one_device_has_iova_va(); + is_bound_uio = pci_one_device_bound_uio(); ++ iommu_no_va = !pci_devices_iommu_support_va(); + #ifdef VFIO_PRESENT + is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ? +@@ -606,5 +678,5 @@ enum rte_iova_mode + + if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled && +- !spapr_iommu) ++ !iommu_no_va) + return RTE_IOVA_VA; + +@@ -615,6 +687,6 @@ enum rte_iova_mode + if (is_bound_uio) + RTE_LOG(WARNING, EAL, "few device bound to UIO\n"); +- if (spapr_iommu) +- RTE_LOG(WARNING, EAL, "sPAPR IOMMU does not support IOVA as VA\n"); ++ if (iommu_no_va) ++ RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n"); + } + +-- +1.8.3.1 + diff --git a/SOURCES/0001-vhost_user_protect_active_rings_from_async_ring_changes.patch b/SOURCES/0001-vhost_user_protect_active_rings_from_async_ring_changes.patch new file mode 100644 index 0000000..40222c0 --- /dev/null +++ b/SOURCES/0001-vhost_user_protect_active_rings_from_async_ring_changes.patch @@ -0,0 +1,311 @@ +From patchwork Wed Jan 17 13:49:25 2018 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Subject: [dpdk-dev, + v5] vhost_user: protect active rings from async ring changes +From: Victor Kaplansky +X-Patchwork-Id: 33921 +X-Patchwork-Delegate: yuanhan.liu@linux.intel.com +Message-Id: <20180117154925-mutt-send-email-victork@redhat.com> +List-Id: dev.dpdk.org +To: dev@dpdk.org +Cc: stable@dpdk.org, Jens Freimann , + Maxime Coquelin , + Yuanhan Liu , Tiwei Bie , + "Tan, Jianfeng" , + Stephen Hemminger , + Victor Kaplansky +Date: Wed, 17 Jan 2018 15:49:25 +0200 + +When performing live migration or memory hot-plugging, +the changes to the device and vrings made by message handler +done independently from vring usage by PMD threads. + +This causes for example segfaults during live-migration +with MQ enable, but in general virtually any request +sent by qemu changing the state of device can cause +problems. + +These patches fixes all above issues by adding a spinlock +to every vring and requiring message handler to start operation +only after ensuring that all PMD threads related to the device +are out of critical section accessing the vring data. + +Each vring has its own lock in order to not create contention +between PMD threads of different vrings and to prevent +performance degradation by scaling queue pair number. + +See https://bugzilla.redhat.com/show_bug.cgi?id=1450680 + +Signed-off-by: Victor Kaplansky +Reviewed-by: Maxime Coquelin +--- +v5: + o get rid of spinlock wrapping functions in vhost.h + +v4: + + o moved access_unlock before accessing enable flag and + access_unlock after iommu_unlock consistently. + o cosmetics: removed blank line. + o the access_lock variable moved to be in the same + cache line with enable and access_ok flags. + o dequeue path is now guarded with trylock and returning + zero if unsuccessful. + o GET_VRING_BASE operation is not guarded by access lock + to avoid deadlock with device_destroy. See the comment + in the code. + o Fixed error path exit from enqueue and dequeue carefully + unlocking access and iommu locks as appropriate. + +v3: + o Added locking to enqueue flow. + o Enqueue path guarded as well as dequeue path. + o Changed name of active_lock. + o Added initialization of guarding spinlock. + o Reworked functions skimming over all virt-queues. + o Performance measurements done by Maxime Coquelin shows + no degradation in bandwidth and throughput. + o Spelling. + o Taking lock only on set operations. + o IOMMU messages are not guarded by access lock. + +v2: + o Fixed checkpatch complains. + o Added Signed-off-by. + o Refined placement of guard to exclude IOMMU messages. + o TODO: performance degradation measurement. + + dpdk-17.11/lib/librte_vhost/vhost.h | 6 ++-- + dpdk-17.11/lib/librte_vhost/vhost.c | 1 + + dpdk-17.11/lib/librte_vhost/vhost_user.c | 70 ++++++++++++++++++++++++++++++++ + dpdk-17.11/lib/librte_vhost/virtio_net.c | 28 ++++++++++++++--- + 4 files changed, 99 insertions(+), 6 deletions(-) + +diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h +index 1cc81c17..c8f2a817 100644 +--- a/lib/librte_vhost/vhost.h ++++ b/lib/librte_vhost/vhost.h +@@ -108,12 +108,14 @@ struct vhost_virtqueue { + + /* Backend value to determine if device should started/stopped */ + int backend; ++ int enabled; ++ int access_ok; ++ rte_spinlock_t access_lock; ++ + /* Used to notify the guest (trigger interrupt) */ + int callfd; + /* Currently unused as polling mode is enabled */ + int kickfd; +- int enabled; +- int access_ok; + + /* Physical address of used ring, for logging */ + uint64_t log_guest_addr; +diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c +index 4f8b73a0..dcc42fc7 100644 +--- a/lib/librte_vhost/vhost.c ++++ b/lib/librte_vhost/vhost.c +@@ -259,6 +259,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) + + dev->virtqueue[vring_idx] = vq; + init_vring_queue(dev, vring_idx); ++ rte_spinlock_init(&vq->access_lock); + + dev->nr_vring += 1; + +diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c +index f4c7ce46..0685d4e7 100644 +--- a/lib/librte_vhost/vhost_user.c ++++ b/lib/librte_vhost/vhost_user.c +@@ -1190,12 +1190,47 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg) + return alloc_vring_queue(dev, vring_idx); + } + ++static void ++vhost_user_lock_all_queue_pairs(struct virtio_net *dev) ++{ ++ unsigned int i = 0; ++ unsigned int vq_num = 0; ++ ++ while (vq_num < dev->nr_vring) { ++ struct vhost_virtqueue *vq = dev->virtqueue[i]; ++ ++ if (vq) { ++ rte_spinlock_lock(&vq->access_lock); ++ vq_num++; ++ } ++ i++; ++ } ++} ++ ++static void ++vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) ++{ ++ unsigned int i = 0; ++ unsigned int vq_num = 0; ++ ++ while (vq_num < dev->nr_vring) { ++ struct vhost_virtqueue *vq = dev->virtqueue[i]; ++ ++ if (vq) { ++ rte_spinlock_unlock(&vq->access_lock); ++ vq_num++; ++ } ++ i++; ++ } ++} ++ + int + vhost_user_msg_handler(int vid, int fd) + { + struct virtio_net *dev; + struct VhostUserMsg msg; + int ret; ++ int unlock_required = 0; + + dev = get_device(vid); + if (dev == NULL) +@@ -1241,6 +1276,38 @@ vhost_user_msg_handler(int vid, int fd) + return -1; + } + ++ /* ++ * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE, ++ * since it is sent when virtio stops and device is destroyed. ++ * destroy_device waits for queues to be inactive, so it is safe. ++ * Otherwise taking the access_lock would cause a dead lock. ++ */ ++ switch (msg.request.master) { ++ case VHOST_USER_SET_FEATURES: ++ case VHOST_USER_SET_PROTOCOL_FEATURES: ++ case VHOST_USER_SET_OWNER: ++ case VHOST_USER_RESET_OWNER: ++ case VHOST_USER_SET_MEM_TABLE: ++ case VHOST_USER_SET_LOG_BASE: ++ case VHOST_USER_SET_LOG_FD: ++ case VHOST_USER_SET_VRING_NUM: ++ case VHOST_USER_SET_VRING_ADDR: ++ case VHOST_USER_SET_VRING_BASE: ++ case VHOST_USER_SET_VRING_KICK: ++ case VHOST_USER_SET_VRING_CALL: ++ case VHOST_USER_SET_VRING_ERR: ++ case VHOST_USER_SET_VRING_ENABLE: ++ case VHOST_USER_SEND_RARP: ++ case VHOST_USER_NET_SET_MTU: ++ case VHOST_USER_SET_SLAVE_REQ_FD: ++ vhost_user_lock_all_queue_pairs(dev); ++ unlock_required = 1; ++ break; ++ default: ++ break; ++ ++ } ++ + switch (msg.request.master) { + case VHOST_USER_GET_FEATURES: + msg.payload.u64 = vhost_user_get_features(dev); +@@ -1342,6 +1409,9 @@ vhost_user_msg_handler(int vid, int fd) + + } + ++ if (unlock_required) ++ vhost_user_unlock_all_queue_pairs(dev); ++ + if (msg.flags & VHOST_USER_NEED_REPLY) { + msg.payload.u64 = !!ret; + msg.size = sizeof(msg.payload.u64); +diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c +index 6fee16e5..e09a927d 100644 +--- a/lib/librte_vhost/virtio_net.c ++++ b/lib/librte_vhost/virtio_net.c +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + + #include "iotlb.h" + #include "vhost.h" +@@ -326,8 +327,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, + } + + vq = dev->virtqueue[queue_id]; ++ ++ rte_spinlock_lock(&vq->access_lock); ++ + if (unlikely(vq->enabled == 0)) +- return 0; ++ goto out_access_unlock; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); +@@ -419,6 +423,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + ++out_access_unlock: ++ rte_spinlock_unlock(&vq->access_lock); ++ + return count; + } + +@@ -651,8 +658,11 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, + } + + vq = dev->virtqueue[queue_id]; ++ ++ rte_spinlock_lock(&vq->access_lock); ++ + if (unlikely(vq->enabled == 0)) +- return 0; ++ goto out_access_unlock; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); +@@ -715,6 +725,9 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + ++out_access_unlock: ++ rte_spinlock_unlock(&vq->access_lock); ++ + return pkt_idx; + } + +@@ -1180,9 +1193,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, + } + + vq = dev->virtqueue[queue_id]; +- if (unlikely(vq->enabled == 0)) ++ ++ if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) + return 0; + ++ if (unlikely(vq->enabled == 0)) ++ goto out_access_unlock; ++ + vq->batch_copy_nb_elems = 0; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) +@@ -1240,7 +1257,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, + if (rarp_mbuf == NULL) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to allocate memory for mbuf.\n"); +- return 0; ++ goto out; + } + + if (make_rarp_packet(rarp_mbuf, &dev->mac)) { +@@ -1356,6 +1373,9 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + ++out_access_unlock: ++ rte_spinlock_unlock(&vq->access_lock); ++ + if (unlikely(rarp_mbuf != NULL)) { + /* + * Inject it to the head of "pkts" array, so that switch's mac diff --git a/SOURCES/arm64-armv8a-linuxapp-gcc-config b/SOURCES/arm64-armv8a-linuxapp-gcc-config new file mode 100644 index 0000000..90863d5 --- /dev/null +++ b/SOURCES/arm64-armv8a-linuxapp-gcc-config @@ -0,0 +1,559 @@ +# -*- cfg-sha: 2543d3fdeee262a6a7fdcdd19e5c36cde5ae450d4cdf35a4a4af438710180e98 +# BSD LICENSE +# Copyright (C) Cavium, Inc 2015. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Cavium, Inc nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD LICENSE +# Copyright (C) Cavium, Inc 2017. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Cavium, Inc nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD LICENSE +# Copyright(c) 2010-2016 Intel Corporation. All rights reserved. +# All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD LICENSE +# Copyright(c) 2010-2017 Intel Corporation. All rights reserved. +# All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# RTE_EXEC_ENV values are the directories in mk/exec-env/ +CONFIG_RTE_EXEC_ENV="linuxapp" +# RTE_ARCH values are architecture we compile for. directories in mk/arch/ +CONFIG_RTE_ARCH="arm64" +# machine can define specific variables or action for a specific board +# RTE_MACHINE values are architecture we compile for. directories in mk/machine/ +CONFIG_RTE_MACHINE="armv8a" +# The compiler we use. +# RTE_TOOLCHAIN values are architecture we compile for. directories in mk/toolchain/ +CONFIG_RTE_TOOLCHAIN="gcc" +# Use intrinsics or assembly code for key routines +CONFIG_RTE_FORCE_INTRINSICS=y +# Machine forces strict alignment constraints. +CONFIG_RTE_ARCH_STRICT_ALIGN=n +# Compile to share library +CONFIG_RTE_BUILD_SHARED_LIB=y +# Use newest code breaking previous ABI +CONFIG_RTE_NEXT_ABI=n +# Major ABI to overwrite library specific LIBABIVER +CONFIG_RTE_MAJOR_ABI= +# Machine's cache line size +CONFIG_RTE_CACHE_LINE_SIZE=128 +# Compile Environment Abstraction Layer +CONFIG_RTE_LIBRTE_EAL=y +CONFIG_RTE_MAX_LCORE=128 +CONFIG_RTE_MAX_NUMA_NODES=8 +CONFIG_RTE_MAX_MEMSEG=256 +CONFIG_RTE_MAX_MEMZONE=2560 +CONFIG_RTE_MAX_TAILQ=32 +CONFIG_RTE_ENABLE_ASSERT=n +CONFIG_RTE_LOG_LEVEL=RTE_LOG_INFO +CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO +CONFIG_RTE_LOG_HISTORY=256 +CONFIG_RTE_BACKTRACE=y +CONFIG_RTE_LIBEAL_USE_HPET=n +CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n +CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n +CONFIG_RTE_EAL_IGB_UIO=n +CONFIG_RTE_EAL_VFIO=y +CONFIG_RTE_MALLOC_DEBUG=n +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y +# Recognize/ignore architecture we compile for. AVX/AVX512 CPU flags for performance/power testing. +# AVX512 is marked as experimental for now, will enable it after enough +# field test and possible optimization. +CONFIG_RTE_ENABLE_AVX=y +CONFIG_RTE_ENABLE_AVX512=n +# Default driver path (or "" to disable) +CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds" +# Compile Environment Abstraction Layer to support Vmware TSC map +CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=y +# Compile architecture we compile for. PCI library +CONFIG_RTE_LIBRTE_PCI=y +# Compile architecture we compile for. argument parser library +CONFIG_RTE_LIBRTE_KVARGS=y +# Compile generic ethernet library +CONFIG_RTE_LIBRTE_ETHER=y +CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n +CONFIG_RTE_MAX_ETHPORTS=32 +CONFIG_RTE_MAX_QUEUES_PER_PORT=1024 +CONFIG_RTE_LIBRTE_IEEE1588=n +CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16 +CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y +CONFIG_RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS=n +# Turn off Tx preparation stage +# Warning: rte_eth_tx_prepare() can be safely disabled only if using a +# driver which do not implement any Tx preparation. +CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n +# Compile PCI bus driver +CONFIG_RTE_LIBRTE_PCI_BUS=y +# Compile architecture we compile for. vdev bus +CONFIG_RTE_LIBRTE_VDEV_BUS=y +# Compile burst-oriented Amazon ENA PMD driver +CONFIG_RTE_LIBRTE_ENA_PMD=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_RX=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_TX=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_ENA_COM_DEBUG=n +# Compile burst-oriented IGB & EM PMD drivers +CONFIG_RTE_LIBRTE_EM_PMD=n +CONFIG_RTE_LIBRTE_IGB_PMD=y +CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n +# Compile burst-oriented IXGBE PMD driver +CONFIG_RTE_LIBRTE_IXGBE_PMD=y +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n +CONFIG_RTE_IXGBE_INC_VECTOR=y +CONFIG_RTE_LIBRTE_IXGBE_BYPASS=n +# Compile burst-oriented I40E PMD driver +CONFIG_RTE_LIBRTE_I40E_PMD=y +CONFIG_RTE_LIBRTE_I40E_DEBUG_RX=n +CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n +CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y +CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y +CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64 +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4 +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4 +# interval up to 8160 us, aligned to 2 (or default value) +CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1 +# Compile burst-oriented FM10K PMD +CONFIG_RTE_LIBRTE_FM10K_PMD=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_RX=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y +CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y +# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD +CONFIG_RTE_LIBRTE_MLX4_PMD=n +CONFIG_RTE_LIBRTE_MLX4_DEBUG=n +CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n +CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 +# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD +CONFIG_RTE_LIBRTE_MLX5_PMD=n +CONFIG_RTE_LIBRTE_MLX5_DEBUG=n +CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 +# Compile burst-oriented Broadcom PMD driver +CONFIG_RTE_LIBRTE_BNX2X_PMD=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_RX=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_TX=n +CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n +# Compile burst-oriented Chelsio Terminator (CXGBE) PMD +CONFIG_RTE_LIBRTE_CXGBE_PMD=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_CXGBE_TPUT=y +# Compile burst-oriented Cisco ENIC PMD driver +CONFIG_RTE_LIBRTE_ENIC_PMD=n +CONFIG_RTE_LIBRTE_ENIC_DEBUG=n +CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW=n +# Compile burst-oriented Netronome NFP PMD driver +CONFIG_RTE_LIBRTE_NFP_PMD=n +CONFIG_RTE_LIBRTE_NFP_DEBUG=n +# Compile Marvell PMD driver +CONFIG_RTE_LIBRTE_MRVL_PMD=n +# Compile burst-oriented Broadcom BNXT PMD driver +CONFIG_RTE_LIBRTE_BNXT_PMD=n +# Compile burst-oriented Solarflare libefx-based PMD +CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n +CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n +# Compile SOFTNIC PMD +CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y +# Compile software PMD backed by SZEDATA2 device +CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n +# Defines firmware type address space. +# See documentation for supported values. +# Other values raise compile time error. +CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0 +# Compile burst-oriented Cavium Thunderx NICVF PMD driver +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n +# Compile burst-oriented Cavium LiquidIO PMD driver +CONFIG_RTE_LIBRTE_LIO_PMD=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_RX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n +# NXP DPAA Bus +CONFIG_RTE_LIBRTE_DPAA_BUS=n +CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n +CONFIG_RTE_LIBRTE_DPAA_PMD=n +# Compile burst-oriented Cavium OCTEONTX network PMD driver +CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_RX=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_TX=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_MBOX=n +# Compile NXP DPAA2 FSL-MC Bus +CONFIG_RTE_LIBRTE_FSLMC_BUS=n +# Compile Support Libraries for NXP DPAA2 +CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n +CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y +# Compile burst-oriented NXP DPAA2 PMD driver +CONFIG_RTE_LIBRTE_DPAA2_PMD=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n +# Compile burst-oriented VIRTIO PMD driver +CONFIG_RTE_LIBRTE_VIRTIO_PMD=y +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_RX=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_TX=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DUMP=n +# Compile virtio device emulation inside virtio PMD driver +CONFIG_RTE_VIRTIO_USER=y +# Compile burst-oriented VMXNET3 PMD driver +CONFIG_RTE_LIBRTE_VMXNET3_PMD=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_RX=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_DRIVER=n +# Compile example software rings based PMD +CONFIG_RTE_LIBRTE_PMD_RING=y +CONFIG_RTE_PMD_RING_MAX_RX_RINGS=16 +CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16 +# Compile software PMD backed by PCAP files +CONFIG_RTE_LIBRTE_PMD_PCAP=n +# Compile link bonding PMD library +CONFIG_RTE_LIBRTE_PMD_BOND=n +CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n +CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n +# QLogic 10G/25G/40G/50G/100G PMD +CONFIG_RTE_LIBRTE_QEDE_PMD=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_QEDE_VF_TX_SWITCH=y +#Provides abs path/name of architecture we compile for. firmware file. +#Empty string denotes driver will use default firmware +CONFIG_RTE_LIBRTE_QEDE_FW="" +# Compile software PMD backed by AF_PACKET sockets (Linux only) +CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n +# Compile ARK PMD +CONFIG_RTE_LIBRTE_ARK_PMD=n +CONFIG_RTE_LIBRTE_ARK_PAD_TX=y +CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n +# Compile WRS accelerated virtual port (AVP) guest PMD driver +CONFIG_RTE_LIBRTE_AVP_PMD=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_TX=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_DRIVER=y +CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n +# Compile architecture we compile for. TAP PMD +# It is enabled by default for Linux only. +CONFIG_RTE_LIBRTE_PMD_TAP=n +# Compile null PMD +CONFIG_RTE_LIBRTE_PMD_NULL=n +# Compile fail-safe PMD +CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y +# Do prefetch of packet data within PMD driver receive function +CONFIG_RTE_PMD_PACKET_PREFETCH=y +# Compile generic crypto device library +CONFIG_RTE_LIBRTE_CRYPTODEV=y +CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n +CONFIG_RTE_CRYPTO_MAX_DEVS=64 +CONFIG_RTE_CRYPTODEV_NAME_LEN=64 +# Compile PMD for ARMv8 Crypto device +CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n +CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n +# Compile NXP DPAA2 crypto sec driver for CAAM HW +CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n +# NXP DPAA caam - crypto driver +CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n +# Compile PMD for QuickAssist based devices +CONFIG_RTE_LIBRTE_PMD_QAT=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER=n +# Number of sessions to create in architecture we compile for. session memory pool +# on a single QuickAssist device. +CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048 +# Compile PMD for AESNI backed device +CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n +CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n +# Compile PMD for Software backed device +CONFIG_RTE_LIBRTE_PMD_OPENSSL=n +CONFIG_RTE_LIBRTE_PMD_OPENSSL_DEBUG=n +# Compile PMD for AESNI GCM device +CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n +CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n +# Compile PMD for SNOW 3G device +CONFIG_RTE_LIBRTE_PMD_SNOW3G=n +CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n +# Compile PMD for KASUMI device +CONFIG_RTE_LIBRTE_PMD_KASUMI=n +CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n +# Compile PMD for ZUC device +CONFIG_RTE_LIBRTE_PMD_ZUC=n +CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n +# Compile PMD for Crypto Scheduler device +CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=n +CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n +# Compile PMD for NULL Crypto device +CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n +# Compile PMD for Marvell Crypto device +CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n +CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n +# Compile generic security library +CONFIG_RTE_LIBRTE_SECURITY=y +# Compile generic event device library +CONFIG_RTE_LIBRTE_EVENTDEV=y +CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n +CONFIG_RTE_EVENT_MAX_DEVS=16 +CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64 +# Compile PMD for skeleton event device +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=n +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n +# Compile PMD for software event device +CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n +CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV_DEBUG=n +# Compile PMD for octeontx sso event device +CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n +CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG=n +# Compile librte_ring +CONFIG_RTE_LIBRTE_RING=y +# Compile librte_mempool +CONFIG_RTE_LIBRTE_MEMPOOL=y +CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512 +CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n +# Compile Mempool drivers +CONFIG_RTE_DRIVER_MEMPOOL_RING=y +CONFIG_RTE_DRIVER_MEMPOOL_STACK=y +# Compile PMD for octeontx fpa mempool device +CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=y +CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG=n +# Compile librte_mbuf +CONFIG_RTE_LIBRTE_MBUF=y +CONFIG_RTE_LIBRTE_MBUF_DEBUG=n +CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" +CONFIG_RTE_MBUF_REFCNT_ATOMIC=y +CONFIG_RTE_PKTMBUF_HEADROOM=128 +# Compile librte_timer +CONFIG_RTE_LIBRTE_TIMER=y +CONFIG_RTE_LIBRTE_TIMER_DEBUG=n +# Compile librte_cfgfile +CONFIG_RTE_LIBRTE_CFGFILE=y +# Compile librte_cmdline +CONFIG_RTE_LIBRTE_CMDLINE=y +CONFIG_RTE_LIBRTE_CMDLINE_DEBUG=n +# Compile librte_hash +CONFIG_RTE_LIBRTE_HASH=y +CONFIG_RTE_LIBRTE_HASH_DEBUG=n +# Compile librte_efd +CONFIG_RTE_LIBRTE_EFD=y +# Compile librte_member +CONFIG_RTE_LIBRTE_MEMBER=y +# Compile librte_jobstats +CONFIG_RTE_LIBRTE_JOBSTATS=y +# Compile architecture we compile for. device metrics library +CONFIG_RTE_LIBRTE_METRICS=y +# Compile architecture we compile for. bitrate statistics library +CONFIG_RTE_LIBRTE_BITRATE=y +# Compile architecture we compile for. latency statistics library +CONFIG_RTE_LIBRTE_LATENCY_STATS=y +# Compile librte_lpm +CONFIG_RTE_LIBRTE_LPM=y +CONFIG_RTE_LIBRTE_LPM_DEBUG=n +# Compile librte_acl +CONFIG_RTE_LIBRTE_ACL=y +CONFIG_RTE_LIBRTE_ACL_DEBUG=n +# Compile librte_power +CONFIG_RTE_LIBRTE_POWER=y +CONFIG_RTE_LIBRTE_POWER_DEBUG=n +CONFIG_RTE_MAX_LCORE_FREQS=64 +# Compile librte_net +CONFIG_RTE_LIBRTE_NET=y +# Compile librte_ip_frag +CONFIG_RTE_LIBRTE_IP_FRAG=y +CONFIG_RTE_LIBRTE_IP_FRAG_DEBUG=n +CONFIG_RTE_LIBRTE_IP_FRAG_MAX_FRAG=4 +CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n +# Compile GRO library +CONFIG_RTE_LIBRTE_GRO=y +# Compile GSO library +CONFIG_RTE_LIBRTE_GSO=y +# Compile librte_meter +CONFIG_RTE_LIBRTE_METER=y +# Compile librte_classify +CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=y +# Compile librte_sched +CONFIG_RTE_LIBRTE_SCHED=y +CONFIG_RTE_SCHED_DEBUG=n +CONFIG_RTE_SCHED_RED=n +CONFIG_RTE_SCHED_COLLECT_STATS=n +CONFIG_RTE_SCHED_SUBPORT_TC_OV=n +CONFIG_RTE_SCHED_PORT_N_GRINDERS=8 +CONFIG_RTE_SCHED_VECTOR=n +# Compile architecture we compile for. distributor library +CONFIG_RTE_LIBRTE_DISTRIBUTOR=y +# Compile architecture we compile for. reorder library +CONFIG_RTE_LIBRTE_REORDER=y +# Compile librte_port +CONFIG_RTE_LIBRTE_PORT=y +CONFIG_RTE_PORT_STATS_COLLECT=n +CONFIG_RTE_PORT_PCAP=n +# Compile librte_table +CONFIG_RTE_LIBRTE_TABLE=y +CONFIG_RTE_TABLE_STATS_COLLECT=n +# Compile librte_pipeline +CONFIG_RTE_LIBRTE_PIPELINE=y +CONFIG_RTE_PIPELINE_STATS_COLLECT=n +# Compile librte_kni +CONFIG_RTE_LIBRTE_KNI=n +CONFIG_RTE_LIBRTE_PMD_KNI=n +CONFIG_RTE_KNI_KMOD=n +CONFIG_RTE_KNI_KMOD_ETHTOOL=n +CONFIG_RTE_KNI_PREEMPT_DEFAULT=y +# Compile architecture we compile for. pdump library +CONFIG_RTE_LIBRTE_PDUMP=y +# Compile vhost user library +CONFIG_RTE_LIBRTE_VHOST=y +CONFIG_RTE_LIBRTE_VHOST_NUMA=y +CONFIG_RTE_LIBRTE_VHOST_DEBUG=n +# Compile vhost PMD +# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled. +CONFIG_RTE_LIBRTE_PMD_VHOST=y +# Compile architecture we compile for. test application +CONFIG_RTE_APP_TEST=y +CONFIG_RTE_APP_TEST_RESOURCE_TAR=n +# Compile architecture we compile for. PMD test application +CONFIG_RTE_TEST_PMD=y +CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n +CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n +# Compile architecture we compile for. crypto performance application +CONFIG_RTE_APP_CRYPTO_PERF=y +# Compile architecture we compile for. eventdev application +CONFIG_RTE_APP_EVENTDEV=y +CONFIG_RTE_EXEC_ENV_LINUXAPP=y +CONFIG_RTE_ARCH_ARM64=y +CONFIG_RTE_ARCH_64=y +# Maximum available cache line size in arm64 implementations. +# Setting to maximum available cache line size in generic config +# to address minimum DMA alignment across all arm64 implementations. +CONFIG_RTE_TOOLCHAIN_GCC=y +CONFIG_RTE_LIBRTE_PMD_XENVIRT=n diff --git a/SOURCES/configlib.sh b/SOURCES/configlib.sh new file mode 100644 index 0000000..cf8d70d --- /dev/null +++ b/SOURCES/configlib.sh @@ -0,0 +1,104 @@ +# Copyright (C) 2017, Red Hat, Inc. +# +# Core configuration file library. + +# Configurations are determined by sha values. The way to determine is by +# the special text: +# $FILE_COMMENT_TYPE -*- cfg-sha: $SHA256 -*- + +export LC_ALL=C + +# check required binaries +__check_reqd_binaries() { + local BIN __binaries=("egrep" "sort" "sha256sum" "sed") + for BIN in $__binaries; do + if ! type -P $BIN >/dev/null 2>&1; then + echo "Binary $BIN not found. Please install." + exit 1 + fi + done +} + +# Calculates a sha from a file +# The algorithm for generating a sha from a config is thus: +# +# 1. Remove all comment lines and blank lines +# 2. Sort the content +# 3. generate the sha-256 sum +# +# From a script perspective, this means: +# egrep -v ^\# %file% | egrep -v ^$ | sort -u | sha256sum +# +# Params: +# $1 = output variable +# $2 = file to use to calculate the shasum +# $3 = file comment type (defaults to # if unspecified) +calc_sha() { + __check_reqd_binaries + + if [ "$1" == "" ]; then + echo "Please pass in a storage variable." + return 1 + fi + + local __resultvar=$1 + __retval=1 + shift + + local __file=$1 + local cmnt=${2:-#} + + if [ -f "$__file" ]; then + local __shasum=$(egrep -v ^"$cmnt" "$__file" | egrep -v ^$ | sort -u | sha256sum -t | cut -d" " -f1) + eval $__resultvar="'$__shasum'" + __retval=0 + fi + return $__retval +} + +# Retrieves a sha stored in a file +# Param: +# $1 = output variable +# $2 = file to use to calculate the shasum +# $3 = file comment type (defaults to # if unspecified) +retr_sha() { + __check_reqd_binaries + + if [ "$1" == "" ]; then + echo "Please pass in a storage variable." + return 1 + fi + + local __resultvar=$1 + __retval=1 + shift + + local __file=$1 + local cmnt=${2:-#} + + if [ -f "$__file" ]; then + if grep -q "$cmnt -\*- cfg-sha:" "$__file"; then + local __shasum=$(grep "$cmnt -\*- cfg-sha:" "$__file" | sed -e "s@$cmnt -\*- cfg-sha: @@" | cut -d" " -f1) + eval $__resultvar="'$__shasum'" + __retval=0 + fi + fi + return $__retval +} + + +# Set a config value +# set_conf dpdk_build_tree parameter value +# dpdk_build_tree is the directory where the .config lives +# parameter is the config parameter +# value is the value to set for the config parameter +set_conf() { + c="$1/.config" + shift + + if grep -q "$1" "$c"; then + sed -i "s:^$1=.*$:$1=$2:g" $c + else + echo $1=$2 >> "$c" + fi +} diff --git a/SOURCES/dpdk-dev-v2-1-4-net-virtio-fix-vector-Rx-break-caused-by-rxq-flushing.patch b/SOURCES/dpdk-dev-v2-1-4-net-virtio-fix-vector-Rx-break-caused-by-rxq-flushing.patch new file mode 100644 index 0000000..6bc7eed --- /dev/null +++ b/SOURCES/dpdk-dev-v2-1-4-net-virtio-fix-vector-Rx-break-caused-by-rxq-flushing.patch @@ -0,0 +1,84 @@ +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index e0328f61d..64a0cc608 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -1860,7 +1860,7 @@ virtio_dev_start(struct rte_eth_dev *dev) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxvq = dev->data->rx_queues[i]; + /* Flush the old packets */ +- virtqueue_flush(rxvq->vq); ++ virtqueue_rxvq_flush(rxvq->vq); + virtqueue_notify(rxvq->vq); + } + +diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c +index c3a536f8a..696d0e4a4 100644 +--- a/drivers/net/virtio/virtqueue.c ++++ b/drivers/net/virtio/virtqueue.c +@@ -37,6 +37,7 @@ + #include "virtqueue.h" + #include "virtio_logs.h" + #include "virtio_pci.h" ++#include "virtio_rxtx_simple.h" + + /* + * Two types of mbuf to be cleaned: +@@ -62,8 +63,10 @@ virtqueue_detatch_unused(struct virtqueue *vq) + + /* Flush the elements in the used ring. */ + void +-virtqueue_flush(struct virtqueue *vq) ++virtqueue_rxvq_flush(struct virtqueue *vq) + { ++ struct virtnet_rx *rxq = &vq->rxq; ++ struct virtio_hw *hw = vq->hw; + struct vring_used_elem *uep; + struct vq_desc_extra *dxp; + uint16_t used_idx, desc_idx; +@@ -74,13 +77,27 @@ virtqueue_flush(struct virtqueue *vq) + for (i = 0; i < nb_used; i++) { + used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); + uep = &vq->vq_ring.used->ring[used_idx]; +- desc_idx = (uint16_t)uep->id; +- dxp = &vq->vq_descx[desc_idx]; +- if (dxp->cookie != NULL) { +- rte_pktmbuf_free(dxp->cookie); +- dxp->cookie = NULL; ++ if (hw->use_simple_rx) { ++ desc_idx = used_idx; ++ rte_pktmbuf_free(vq->sw_ring[desc_idx]); ++ vq->vq_free_cnt++; ++ } else { ++ desc_idx = (uint16_t)uep->id; ++ dxp = &vq->vq_descx[desc_idx]; ++ if (dxp->cookie != NULL) { ++ rte_pktmbuf_free(dxp->cookie); ++ dxp->cookie = NULL; ++ } ++ vq_ring_free_chain(vq, desc_idx); + } + vq->vq_used_cons_idx++; +- vq_ring_free_chain(vq, desc_idx); ++ } ++ ++ if (hw->use_simple_rx) { ++ while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { ++ virtio_rxq_rearm_vec(rxq); ++ if (virtqueue_kick_prepare(vq)) ++ virtqueue_notify(vq); ++ } + } + } +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index 2305d91a4..ab466c2db 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -304,7 +304,7 @@ void virtqueue_dump(struct virtqueue *vq); + struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq); + + /* Flush the elements in the used ring. */ +-void virtqueue_flush(struct virtqueue *vq); ++void virtqueue_rxvq_flush(struct virtqueue *vq); + + static inline int + virtqueue_full(const struct virtqueue *vq) diff --git a/SOURCES/eal-ppc-fix-mmap-for-memory-initialization.patch b/SOURCES/eal-ppc-fix-mmap-for-memory-initialization.patch deleted file mode 100644 index b59a6d8..0000000 --- a/SOURCES/eal-ppc-fix-mmap-for-memory-initialization.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 284ae3e9ff9a92575c28c858efd2c85c8de6d440 Mon Sep 17 00:00:00 2001 -From: Chao Zhu -Date: Thu, 6 Apr 2017 15:36:09 +0530 -Subject: [PATCH] eal/ppc: fix mmap for memory initialization - -On IBM POWER platform, when mapping /dev/zero file to hugepage memory -space, mmap will not respect the requested address hint. This will cause -the memory initialization for the second process fails. This patch adds -the required mmap flags to make it work. Beside this, users need to set -the nr_overcommit_hugepages to expand the VA range. When -doing the initialization, users need to set both nr_hugepages and -nr_overcommit_hugepages to the same value, like 64, 128, etc. - -Signed-off-by: Chao Zhu -Acked-by: Sergio Gonzalez Monroy -Acked-by: John McNamara ---- - doc/guides/linux_gsg/sys_reqs.rst | 6 ++++++ - lib/librte_eal/linuxapp/eal/eal_memory.c | 16 ++++++++++++++-- - 2 files changed, 20 insertions(+), 2 deletions(-) - -diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst -index 61222c699b7d..3a28c9e51775 100644 ---- a/doc/guides/linux_gsg/sys_reqs.rst -+++ b/doc/guides/linux_gsg/sys_reqs.rst -@@ -200,6 +200,12 @@ On a NUMA machine, pages should be allocated explicitly on separate nodes:: - - For 1G pages, it is not possible to reserve the hugepage memory after the system has booted. - -+ On IBM POWER system, the nr_overcommit_hugepages should be set to the same value as nr_hugepages. -+ For example, if the required page number is 128, the following commands are used:: -+ -+ echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_hugepages -+ echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_overcommit_hugepages -+ - Using Hugepages with the DPDK - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c -index 90cc3224be85..618a09b429bc 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_memory.c -+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c -@@ -331,7 +331,13 @@ get_virtual_area(size_t *size, size_t hugepage_sz) - } - do { - addr = mmap(addr, -- (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0); -+ (*size) + hugepage_sz, PROT_READ, -+#ifdef RTE_ARCH_PPC_64 -+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -+#else -+ MAP_PRIVATE, -+#endif -+ fd, 0); - if (addr == MAP_FAILED) - *size -= hugepage_sz; - } while (addr == MAP_FAILED && *size > 0); -@@ -1359,7 +1365,13 @@ rte_eal_hugepage_attach(void) - * use mmap to get identical addresses as the primary process. - */ - base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len, -- PROT_READ, MAP_PRIVATE, fd_zero, 0); -+ PROT_READ, -+#ifdef RTE_ARCH_PPC_64 -+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -+#else -+ MAP_PRIVATE, -+#endif -+ fd_zero, 0); - if (base_addr == MAP_FAILED || - base_addr != mcfg->memseg[s].addr) { - max_seg = s; --- -2.9.4 - diff --git a/SOURCES/eal-ppc-support-sPAPR-IOMMU-for-vfio-pci.patch b/SOURCES/eal-ppc-support-sPAPR-IOMMU-for-vfio-pci.patch deleted file mode 100644 index ed46bfa..0000000 --- a/SOURCES/eal-ppc-support-sPAPR-IOMMU-for-vfio-pci.patch +++ /dev/null @@ -1,184 +0,0 @@ -From f4ce18acd42d27c8aaa090004989d81b40334715 Mon Sep 17 00:00:00 2001 -From: Gowrishankar Muthukrishnan -Date: Wed, 3 May 2017 12:00:27 +0530 -Subject: [PATCH 2/2] eal/ppc: support sPAPR IOMMU for vfio-pci - -Below changes adds pci probing support for vfio-pci devices in power8. - -Signed-off-by: Gowrishankar Muthukrishnan -Acked-by: Anatoly Burakov -Acked-by: Chao Zhu ---- - doc/guides/rel_notes/release_16_11.rst | 4 ++ - lib/librte_eal/linuxapp/eal/eal_vfio.c | 90 ++++++++++++++++++++++++++++++++++ - lib/librte_eal/linuxapp/eal/eal_vfio.h | 25 ++++++++++ - 3 files changed, 119 insertions(+) - -diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst -index 6186337..1e12002 100644 ---- a/doc/guides/rel_notes/release_16_11.rst -+++ b/doc/guides/rel_notes/release_16_11.rst -@@ -159,6 +159,10 @@ New Features - - i40e PMD and its vector PMD enabled by default in powerpc. - -+* **Added powerpc support in pci probing for vfio-pci devices.** -+ -+ sPAPR IOMMU based pci probing enabled for vfio-pci devices. -+ - Resolved Issues - --------------- - -diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c -index 702f7a2..9377a66 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_vfio.c -+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c -@@ -50,12 +50,15 @@ - static struct vfio_config vfio_cfg; - - static int vfio_type1_dma_map(int); -+static int vfio_spapr_dma_map(int); - static int vfio_noiommu_dma_map(int); - - /* IOMMU types we support */ - static const struct vfio_iommu_type iommu_types[] = { - /* x86 IOMMU, otherwise known as type 1 */ - { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map}, -+ /* ppc64 IOMMU, otherwise known as spapr */ -+ { RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map}, - /* IOMMU-less mode */ - { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map}, - }; -@@ -540,6 +543,93 @@ int vfio_setup_device(const char *sysfs_base, const char *dev_addr, - } - - static int -+vfio_spapr_dma_map(int vfio_container_fd) -+{ -+ const struct rte_memseg *ms = rte_eal_get_physmem_layout(); -+ int i, ret; -+ -+ struct vfio_iommu_spapr_register_memory reg = { -+ .argsz = sizeof(reg), -+ .flags = 0 -+ }; -+ struct vfio_iommu_spapr_tce_info info = { -+ .argsz = sizeof(info), -+ }; -+ struct vfio_iommu_spapr_tce_create create = { -+ .argsz = sizeof(create), -+ }; -+ struct vfio_iommu_spapr_tce_remove remove = { -+ .argsz = sizeof(remove), -+ }; -+ -+ /* query spapr iommu info */ -+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); -+ if (ret) { -+ RTE_LOG(ERR, EAL, " cannot get iommu info, " -+ "error %i (%s)\n", errno, strerror(errno)); -+ return -1; -+ } -+ -+ /* remove default DMA of 32 bit window */ -+ remove.start_addr = info.dma32_window_start; -+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove); -+ if (ret) { -+ RTE_LOG(ERR, EAL, " cannot remove default DMA window, " -+ "error %i (%s)\n", errno, strerror(errno)); -+ return -1; -+ } -+ -+ /* calculate window size based on number of hugepages configured */ -+ create.window_size = rte_eal_get_physmem_size(); -+ create.page_shift = __builtin_ctzll(ms->hugepage_sz); -+ create.levels = 2; -+ -+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); -+ if (ret) { -+ RTE_LOG(ERR, EAL, " cannot create new DMA window, " -+ "error %i (%s)\n", errno, strerror(errno)); -+ return -1; -+ } -+ -+ /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */ -+ for (i = 0; i < RTE_MAX_MEMSEG; i++) { -+ struct vfio_iommu_type1_dma_map dma_map; -+ -+ if (ms[i].addr == NULL) -+ break; -+ -+ reg.vaddr = (uintptr_t) ms[i].addr; -+ reg.size = ms[i].len; -+ ret = ioctl(vfio_container_fd, -+ VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®); -+ if (ret) { -+ RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, " -+ "error %i (%s)\n", errno, strerror(errno)); -+ return -1; -+ } -+ -+ memset(&dma_map, 0, sizeof(dma_map)); -+ dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map); -+ dma_map.vaddr = ms[i].addr_64; -+ dma_map.size = ms[i].len; -+ dma_map.iova = ms[i].phys_addr; -+ dma_map.flags = VFIO_DMA_MAP_FLAG_READ | -+ VFIO_DMA_MAP_FLAG_WRITE; -+ -+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map); -+ -+ if (ret) { -+ RTE_LOG(ERR, EAL, " cannot set up DMA remapping, " -+ "error %i (%s)\n", errno, strerror(errno)); -+ return -1; -+ } -+ -+ } -+ -+ return 0; -+} -+ -+static int - vfio_noiommu_dma_map(int __rte_unused vfio_container_fd) - { - /* No-IOMMU mode does not need DMA mapping */ -diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.h b/lib/librte_eal/linuxapp/eal/eal_vfio.h -index 29f7f3e..ac31a4f 100644 ---- a/lib/librte_eal/linuxapp/eal/eal_vfio.h -+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h -@@ -54,6 +54,31 @@ - - #define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU - -+#ifndef VFIO_SPAPR_TCE_v2_IOMMU -+#define RTE_VFIO_SPAPR 7 -+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17) -+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19) -+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20) -+struct vfio_iommu_spapr_register_memory { -+ uint32_t argsz; -+ uint32_t flags; -+ uint64_t vaddr; -+ uint64_t size; -+}; -+struct vfio_iommu_spapr_tce_create { -+ uint32_t argsz; -+ uint32_t page_shift; -+ uint64_t window_size; -+ uint32_t levels; -+}; -+struct vfio_iommu_spapr_tce_remove { -+ uint32_t argsz; -+ uint64_t start_addr; -+}; -+#else -+#define RTE_VFIO_SPAPR VFIO_SPAPR_TCE_v2_IOMMU -+#endif -+ - #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) - #define RTE_VFIO_NOIOMMU 8 - #else --- -1.9.1 - diff --git a/SOURCES/gen_config_group.sh b/SOURCES/gen_config_group.sh new file mode 100755 index 0000000..eac8692 --- /dev/null +++ b/SOURCES/gen_config_group.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +source configlib.sh + +# Generates arch configurations in the current directory based on +# 1. an dpdk.spec file +# 2. an expanded dpdk tree + +if (( $# != 2 )); then + echo "$0: dpdk.spec dpdk_tree" >&2 + exit 1 +fi + +DPDKSPEC="$1" +DPDKDIR="$2" + +# accumulate all arch + name triples +DPDK_CONF_MACH_ARCH=() +for arch in $(grep %define\ machine_arch "$DPDKSPEC" | sed 's@%define machine_arch @@') +do + DPDK_CONF_MACH_ARCH+=($arch) +done + +DPDK_CONF_MACH_TMPL=() +for tmpl in $(grep %define\ machine_tmpl "$DPDKSPEC" | sed 's@%define machine_tmpl @@') +do + DPDK_CONF_MACH_TMPL+=($tmpl) +done + +DPDK_CONF_MACH=() +for mach in $(grep %define\ machine\ "$DPDKSPEC" | sed 's@%define machine @@') +do + DPDK_CONF_MACH+=($mach) +done + +DPDK_TARGETS=() +for ((i=0; i < ${#DPDK_CONF_MACH[@]}; i++)); +do + DPDK_TARGETS+=("${DPDK_CONF_MACH_ARCH[$i]}-${DPDK_CONF_MACH_TMPL[$i]}-linuxapp-gcc") + echo "DPDK-target: ${DPDK_TARGETS[$i]}" +done + +OUTPUT_DIR=$(pwd) +pushd "$DPDKDIR" +for ((i=0; i < ${#DPDK_TARGETS[@]}; i++)); +do + echo "For ${DPDK_TARGETS[$i]}:" + + echo " a. Generating initial config" + echo " make V=1 T=${DPDK_TARGETS[$i]} O=${DPDK_TARGETS[$i]}" + make V=1 T=${DPDK_TARGETS[$i]} O=${DPDK_TARGETS[$i]} -j8 config + ORIG_SHA="" + OUTDIR="${DPDK_TARGETS[$i]}" + + echo " b. calculating and applying sha" + calc_sha ORIG_SHA "${OUTDIR}/.config" + if [ "$ORIG_SHA" == "" ]; then + echo "ERROR: Unable to get sha for arch ${DPDK_TARGETS[$i]}" + exit 1 + fi + echo "# -*- cfg-sha: ${ORIG_SHA}" > ${OUTDIR}/.config.new + cat "${OUTDIR}/.config" >> "${OUTDIR}/.config.new" + cp "${OUTDIR}/.config" "${OUTDIR}/.config.orig" + mv -f "${OUTDIR}/.config.new" "${OUTDIR}/.config" + + echo " c. setting initial configurations" + # these are the original setconf values from dpdk.spec + set_conf "${OUTDIR}" CONFIG_RTE_MACHINE "\\\"${DPDK_CONF_MACH[$i]}\\\"" + + # Enable automatic driver loading from this path + set_conf "${OUTDIR}" CONFIG_RTE_EAL_PMD_PATH '"/usr/lib64/dpdk-pmds"' + + # start by disabling ALL PMDs + for pmd in $(grep _PMD= "${OUTDIR}/.config" | sed 's@=\(y\|n\)@@g') + do + set_conf "${OUTDIR}" $pmd n + done + + # PMDs which have their own naming scheme + # the default for this was 'n' at one point. Make sure we keep it + # as such + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_QAT n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_VHOST n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_KNI n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_XENVIRT n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_NULL n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_TAP n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_PCAP n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_BOND n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_AF_PACKET n + + # whitelist of enabled PMDs + # Soft PMDs to enable + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_RING y + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_VHOST y + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VIRTIO_PMD y + + # HW PMDs + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_I40E_PMD y + case "${DPDK_CONF_MACH_ARCH[i]}" in + x86_64) + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_ENIC_PMD y + ;& + arm64) + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_IXGBE_PMD y + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_IGB_PMD y + ;; + esac + + # Compile the PMD test application + set_conf "${OUTDIR}" CONFIG_RTE_TEST_PMD y + + # Enable vhost-numa build, the added deps are ok for us + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VHOST_NUMA y + + # Disable kernel modules + set_conf "${OUTDIR}" CONFIG_RTE_EAL_IGB_UIO n + set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_KNI n + set_conf "${OUTDIR}" CONFIG_RTE_KNI_KMOD n + + # Disable experimental stuff + set_conf "${OUTDIR}" CONFIG_RTE_NEXT_ABI n + + # Build DPDK as shared library + set_conf "${OUTDIR}" CONFIG_RTE_BUILD_SHARED_LIB y + + cp "${OUTDIR}/.config" "${OUTPUT_DIR}/${DPDK_TARGETS[$i]}-config" +done +popd >/dev/null + +echo -n "For each arch ( " +for ((i=0; i < ${#DPDK_CONF_MACH_ARCH[@]}; i++)); +do + echo -n "${DPDK_CONF_MACH_ARCH[i]} " +done +echo "):" +echo "1. ensure you enable the requisite hw" diff --git a/SOURCES/mk-move-PMD-libraries-linking-to-applications.patch b/SOURCES/mk-move-PMD-libraries-linking-to-applications.patch deleted file mode 100644 index 88aef67..0000000 --- a/SOURCES/mk-move-PMD-libraries-linking-to-applications.patch +++ /dev/null @@ -1,143 +0,0 @@ -From ab12f71b310c344a4903a7105ea79ff5b2c25a15 Mon Sep 17 00:00:00 2001 -From: Ferruh Yigit -Date: Tue, 31 Jan 2017 15:04:48 +0000 -Subject: [PATCH] mk: move PMD libraries linking to applications - -Some PMDs provide device specific APIs. Bond and xenvirt are existing -samples for this. - -And since these are PMD libraries, there are two options on how to link -them for shared library build: - -1- They can be linked to all applications by default, using common -rte.app.mk file. - -2- They can be explicitly linked to applications that use device -specific API. - -Currently option one is in use, this patch switches to the option two. - -Moves library linking to the Makefile of application Makefile that uses -device specific API. - -This prevent these PMD libraries to be a dependency to applications -that don't use these device specific APIs. - -Signed-off-by: Ferruh Yigit ---- -JWL -- backported this to 16.11.2 base. Most differences from upstream -are contextual, but references to CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER -were removed due to lack of relevance to 16.11.2 base. - - app/test-pmd/Makefile | 9 +++++++++ - app/test/Makefile | 21 +++++++++++++++++---- - examples/bond/Makefile | 4 ++++ - mk/rte.app.mk | 5 ++--- - 4 files changed, 32 insertions(+), 7 deletions(-) - -diff -up dpdk-stable-16.11.2/app/test-pmd/Makefile.pmd_ring dpdk-stable-16.11.2/app/test-pmd/Makefile ---- dpdk-stable-16.11.2/app/test-pmd/Makefile.pmd_ring 2017-05-31 03:20:05.000000000 -0400 -+++ dpdk-stable-16.11.2/app/test-pmd/Makefile 2017-06-08 12:19:58.681959006 -0400 -@@ -59,6 +59,11 @@ SRCS-y += icmpecho.c - SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c - - ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y) -+ -+ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y) -+LDLIBS += -lrte_pmd_bond -+endif -+ - _LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += -lrte_pmd_ixgbe - endif - -@@ -69,4 +74,8 @@ DEPDIRS-y += lib drivers - - include $(RTE_SDK)/mk/rte.app.mk - -+ifeq ($(CONFIG_RTE_LIBRTE_PMD_XENVIRT),y) -+LDLIBS += -lrte_pmd_xenvirt -+endif -+ - endif -diff -up dpdk-stable-16.11.2/app/test/Makefile.pmd_ring dpdk-stable-16.11.2/app/test/Makefile ---- dpdk-stable-16.11.2/app/test/Makefile.pmd_ring 2017-05-31 03:20:05.000000000 -0400 -+++ dpdk-stable-16.11.2/app/test/Makefile 2017-06-08 12:18:25.439330508 -0400 -@@ -185,9 +185,6 @@ endif - - ifeq ($(CONFIG_RTE_LIBRTE_PMD_NULL),y) - SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += test_link_bonding_rssconf.c --ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y) --LDLIBS += -lrte_pmd_null --endif - endif - - SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring.c -@@ -221,11 +218,27 @@ DEPDIRS-y += lib drivers - ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y) - ifneq ($(CONFIG_RTE_LIBRTE_PMD_RING),y) - $(error Link bonding tests require CONFIG_RTE_LIBRTE_PMD_RING=y) --else -+endif -+endif -+ - ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y) -+ -+ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y) -+LDLIBS += -lrte_pmd_bond -+endif -+ -+ifeq ($(CONFIG_RTE_LIBRTE_PMD_NULL),y) -+LDLIBS += -lrte_pmd_null -+endif -+ -+ifeq ($(CONFIG_RTE_LIBRTE_PMD_RING),y) - LDLIBS += -lrte_pmd_ring - endif -+ -+ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y) -+LDLIBS += -lrte_pmd_crypto_scheduler - endif -+ - endif - - ifeq ($(CONFIG_RTE_APP_TEST_RESOURCE_TAR),y) -diff -up dpdk-stable-16.11.2/examples/bond/Makefile.pmd_ring dpdk-stable-16.11.2/examples/bond/Makefile ---- dpdk-stable-16.11.2/examples/bond/Makefile.pmd_ring 2017-05-31 03:20:05.000000000 -0400 -+++ dpdk-stable-16.11.2/examples/bond/Makefile 2017-06-08 12:18:25.440330526 -0400 -@@ -54,4 +54,8 @@ endif - - CFLAGS += -O3 - -+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y) -+LDLIBS += -lrte_pmd_bond -+endif -+ - include $(RTE_SDK)/mk/rte.extapp.mk -diff -up dpdk-stable-16.11.2/mk/rte.app.mk.pmd_ring dpdk-stable-16.11.2/mk/rte.app.mk ---- dpdk-stable-16.11.2/mk/rte.app.mk.pmd_ring 2017-05-31 03:20:05.000000000 -0400 -+++ dpdk-stable-16.11.2/mk/rte.app.mk 2017-06-08 12:22:48.458924188 -0400 -@@ -99,15 +99,13 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) - _LDLIBS-$(CONFIG_RTE_LIBRTE_CMDLINE) += -lrte_cmdline - _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile - --_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond --_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore -- - ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n) - # plugins (link only if static libraries) - - _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet - _LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz - _LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt -+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond - _LDLIBS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += -lrte_pmd_cxgbe - _LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += -lrte_pmd_e1000 - _LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += -lrte_pmd_ena -@@ -130,6 +128,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y) - _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += -lrte_pmd_vhost - endif # $(CONFIG_RTE_LIBRTE_VHOST) - _LDLIBS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += -lrte_pmd_vmxnet3_uio -+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore - - ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y) - _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb diff --git a/SOURCES/net-i40e-implement-vector-PMD-for-altivec.patch b/SOURCES/net-i40e-implement-vector-PMD-for-altivec.patch deleted file mode 100644 index 0820da1..0000000 --- a/SOURCES/net-i40e-implement-vector-PMD-for-altivec.patch +++ /dev/null @@ -1,753 +0,0 @@ -From 6fd9dc71febe68f4033a6330e6c9bb03610ba1ac Mon Sep 17 00:00:00 2001 -From: Gowrishankar Muthukrishnan -Date: Wed, 3 May 2017 11:55:58 +0530 -Subject: [PATCH 1/2] net/i40e: implement vector PMD for altivec - -This patch enables i40e driver in PowerPC along with its altivec -intrinsic support. - -Signed-off-by: Gowrishankar Muthukrishnan -Acked-by: Chao Zhu ---- - MAINTAINERS | 1 + - config/defconfig_ppc_64-power8-linuxapp-gcc | 1 - - doc/guides/nics/features/i40e.ini | 1 + - doc/guides/nics/features/i40e_vec.ini | 1 + - doc/guides/rel_notes/release_16_11.rst | 3 + - drivers/net/i40e/Makefile | 2 + - drivers/net/i40e/i40e_rxtx_vec_altivec.c | 654 ++++++++++++++++++++++++++++ - 7 files changed, 662 insertions(+), 1 deletion(-) - create mode 100644 drivers/net/i40e/i40e_rxtx_vec_altivec.c - -diff --git a/MAINTAINERS b/MAINTAINERS -index 065397b..a380b5d 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -166,6 +166,7 @@ IBM POWER - M: Chao Zhu - F: lib/librte_eal/common/arch/ppc_64/ - F: lib/librte_eal/common/include/arch/ppc_64/ -+F: drivers/net/i40e/i40e_rxtx_vec_altivec.c - - Intel x86 - M: Bruce Richardson -diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc b/config/defconfig_ppc_64-power8-linuxapp-gcc -index f953e61..5f160bc 100644 ---- a/config/defconfig_ppc_64-power8-linuxapp-gcc -+++ b/config/defconfig_ppc_64-power8-linuxapp-gcc -@@ -49,7 +49,6 @@ CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n - # Note: Initially, all of the PMD drivers compilation are turned off on Power - # Will turn on them only after the successful testing on Power - CONFIG_RTE_LIBRTE_IXGBE_PMD=n --CONFIG_RTE_LIBRTE_I40E_PMD=n - CONFIG_RTE_LIBRTE_VIRTIO_PMD=y - CONFIG_RTE_LIBRTE_VMXNET3_PMD=n - CONFIG_RTE_LIBRTE_PMD_BOND=n -diff --git a/doc/guides/nics/features/i40e.ini b/doc/guides/nics/features/i40e.ini -index 0d143bc..36ac337 100644 ---- a/doc/guides/nics/features/i40e.ini -+++ b/doc/guides/nics/features/i40e.ini -@@ -46,3 +46,4 @@ Linux VFIO = Y - x86-32 = Y - x86-64 = Y - ARMv8 = Y -+Power8 = Y -diff --git a/doc/guides/nics/features/i40e_vec.ini b/doc/guides/nics/features/i40e_vec.ini -index edd6b71..5ec4088 100644 ---- a/doc/guides/nics/features/i40e_vec.ini -+++ b/doc/guides/nics/features/i40e_vec.ini -@@ -38,3 +38,4 @@ Linux VFIO = Y - x86-32 = Y - x86-64 = Y - ARMv8 = Y -+Power8 = Y -diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst -index 8c9ec65..6186337 100644 ---- a/doc/guides/rel_notes/release_16_11.rst -+++ b/doc/guides/rel_notes/release_16_11.rst -@@ -155,6 +155,9 @@ New Features - The GCC 4.9 ``-march`` option supports the Intel processor code names. - The config option ``RTE_MACHINE`` can be used to pass code names to the compiler via the ``-march`` flag. - -+* **Added powerpc support for i40e and its vector PMD .** -+ -+ i40e PMD and its vector PMD enabled by default in powerpc. - - Resolved Issues - --------------- -diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile -index 13085fb..9c9a867 100644 ---- a/drivers/net/i40e/Makefile -+++ b/drivers/net/i40e/Makefile -@@ -99,6 +99,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c - SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c - ifeq ($(CONFIG_RTE_ARCH_ARM64),y) - SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c -+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y) -+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c - else - SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c - endif -diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c -new file mode 100644 -index 0000000..40d1929 ---- /dev/null -+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c -@@ -0,0 +1,654 @@ -+/*- -+ * BSD LICENSE -+ * -+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. -+ * Copyright(c) 2017 IBM Corporation. -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in -+ * the documentation and/or other materials provided with the -+ * distribution. -+ * * Neither the name of Intel Corporation nor the names of its -+ * contributors may be used to endorse or promote products derived -+ * from this software without specific prior written permission. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+ -+#include "base/i40e_prototype.h" -+#include "base/i40e_type.h" -+#include "i40e_ethdev.h" -+#include "i40e_rxtx.h" -+#include "i40e_rxtx_vec_common.h" -+ -+#include -+ -+#pragma GCC diagnostic ignored "-Wcast-qual" -+ -+static inline void -+i40e_rxq_rearm(struct i40e_rx_queue *rxq) -+{ -+ int i; -+ uint16_t rx_id; -+ volatile union i40e_rx_desc *rxdp; -+ -+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; -+ struct rte_mbuf *mb0, *mb1; -+ -+ vector unsigned long hdr_room = (vector unsigned long){ -+ RTE_PKTMBUF_HEADROOM, -+ RTE_PKTMBUF_HEADROOM}; -+ vector unsigned long dma_addr0, dma_addr1; -+ -+ rxdp = rxq->rx_ring + rxq->rxrearm_start; -+ -+ /* Pull 'n' more MBUFs into the software ring */ -+ if (rte_mempool_get_bulk(rxq->mp, -+ (void *)rxep, -+ RTE_I40E_RXQ_REARM_THRESH) < 0) { -+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= -+ rxq->nb_rx_desc) { -+ dma_addr0 = (vector unsigned long){}; -+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { -+ rxep[i].mbuf = &rxq->fake_mbuf; -+ vec_st(dma_addr0, 0, -+ (vector unsigned long *)&rxdp[i].read); -+ } -+ } -+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += -+ RTE_I40E_RXQ_REARM_THRESH; -+ return; -+ } -+ -+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */ -+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { -+ vector unsigned long vaddr0, vaddr1; -+ uintptr_t p0, p1; -+ -+ mb0 = rxep[0].mbuf; -+ mb1 = rxep[1].mbuf; -+ -+ /* Flush mbuf with pkt template. -+ * Data to be rearmed is 6 bytes long. -+ * Though, RX will overwrite ol_flags that are coming next -+ * anyway. So overwrite whole 8 bytes with one load: -+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags. -+ */ -+ p0 = (uintptr_t)&mb0->rearm_data; -+ *(uint64_t *)p0 = rxq->mbuf_initializer; -+ p1 = (uintptr_t)&mb1->rearm_data; -+ *(uint64_t *)p1 = rxq->mbuf_initializer; -+ -+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ -+ vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr); -+ vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr); -+ -+ /* convert pa to dma_addr hdr/data */ -+ dma_addr0 = vec_mergel(vaddr0, vaddr0); -+ dma_addr1 = vec_mergel(vaddr1, vaddr1); -+ -+ /* add headroom to pa values */ -+ dma_addr0 = vec_add(dma_addr0, hdr_room); -+ dma_addr1 = vec_add(dma_addr1, hdr_room); -+ -+ /* flush desc with pa dma_addr */ -+ vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read); -+ vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read); -+ } -+ -+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; -+ if (rxq->rxrearm_start >= rxq->nb_rx_desc) -+ rxq->rxrearm_start = 0; -+ -+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; -+ -+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? -+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); -+ -+ /* Update the tail pointer on the NIC */ -+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); -+} -+ -+/* Handling the offload flags (olflags) field takes computation -+ * time when receiving packets. Therefore we provide a flag to disable -+ * the processing of the olflags field when they are not needed. This -+ * gives improved performance, at the cost of losing the offload info -+ * in the received packet -+ */ -+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE -+ -+static inline void -+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts) -+{ -+ vector unsigned int vlan0, vlan1, rss, l3_l4e; -+ -+ /* mask everything except RSS, flow director and VLAN flags -+ * bit2 is for VLAN tag, bit11 for flow director indication -+ * bit13:12 for RSS indication. -+ */ -+ const vector unsigned int rss_vlan_msk = (vector unsigned int){ -+ (int32_t)0x1c03804, (int32_t)0x1c03804, -+ (int32_t)0x1c03804, (int32_t)0x1c03804}; -+ -+ /* map rss and vlan type to rss hash and vlan flag */ -+ const vector unsigned char vlan_flags = (vector unsigned char){ -+ 0, 0, 0, 0, -+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0, -+ 0, 0, 0, 0, -+ 0, 0, 0, 0}; -+ -+ const vector unsigned char rss_flags = (vector unsigned char){ -+ 0, PKT_RX_FDIR, 0, 0, -+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR, -+ 0, 0, 0, 0, -+ 0, 0, 0, 0}; -+ -+ const vector unsigned char l3_l4e_flags = (vector unsigned char){ -+ 0, -+ PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, -+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD -+ | PKT_RX_IP_CKSUM_BAD, -+ 0, 0, 0, 0, 0, 0, 0, 0}; -+ -+ vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]); -+ vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]); -+ vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1); -+ -+ vlan1 = vec_and(vlan0, rss_vlan_msk); -+ vlan0 = (vector unsigned int)vec_perm(vlan_flags, -+ (vector unsigned char){}, -+ *(vector unsigned char *)&vlan1); -+ -+ rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11}); -+ rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){}, -+ *(vector unsigned char *)&rss); -+ -+ l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22}); -+ l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags, -+ (vector unsigned char){}, -+ *(vector unsigned char *)&l3_l4e); -+ -+ vlan0 = vec_or(vlan0, rss); -+ vlan0 = vec_or(vlan0, l3_l4e); -+ -+ rx_pkts[0]->ol_flags = (uint64_t)vlan0[2]; -+ rx_pkts[1]->ol_flags = (uint64_t)vlan0[3]; -+ rx_pkts[2]->ol_flags = (uint64_t)vlan0[0]; -+ rx_pkts[3]->ol_flags = (uint64_t)vlan0[1]; -+} -+#else -+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0) -+#endif -+ -+#define PKTLEN_SHIFT 10 -+ -+static inline void -+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts) -+{ -+ vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]); -+ vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]); -+ -+ ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30}); -+ ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30}); -+ -+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping( -+ (*(vector unsigned char *)&ptype0)[0]); -+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping( -+ (*(vector unsigned char *)&ptype0)[8]); -+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping( -+ (*(vector unsigned char *)&ptype1)[0]); -+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping( -+ (*(vector unsigned char *)&ptype1)[8]); -+} -+ -+ /* Notice: -+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet -+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST -+ * numbers of DD bits -+ */ -+static inline uint16_t -+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, -+ uint16_t nb_pkts, uint8_t *split_packet) -+{ -+ volatile union i40e_rx_desc *rxdp; -+ struct i40e_rx_entry *sw_ring; -+ uint16_t nb_pkts_recd; -+ int pos; -+ uint64_t var; -+ vector unsigned char shuf_msk; -+ -+ vector unsigned short crc_adjust = (vector unsigned short){ -+ 0, 0, /* ignore pkt_type field */ -+ rxq->crc_len, /* sub crc on pkt_len */ -+ 0, /* ignore high-16bits of pkt_len */ -+ rxq->crc_len, /* sub crc on data_len */ -+ 0, 0, 0 /* ignore non-length fields */ -+ }; -+ vector unsigned long dd_check, eop_check; -+ -+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ -+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); -+ -+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ -+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); -+ -+ /* Just the act of getting into the function from the application is -+ * going to cost about 7 cycles -+ */ -+ rxdp = rxq->rx_ring + rxq->rx_tail; -+ -+ rte_prefetch0(rxdp); -+ -+ /* See if we need to rearm the RX queue - gives the prefetch a bit -+ * of time to act -+ */ -+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) -+ i40e_rxq_rearm(rxq); -+ -+ /* Before we start moving massive data around, check to see if -+ * there is actually a packet available -+ */ -+ if (!(rxdp->wb.qword1.status_error_len & -+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) -+ return 0; -+ -+ /* 4 packets DD mask */ -+ dd_check = (vector unsigned long){0x0000000100000001ULL, -+ 0x0000000100000001ULL}; -+ -+ /* 4 packets EOP mask */ -+ eop_check = (vector unsigned long){0x0000000200000002ULL, -+ 0x0000000200000002ULL}; -+ -+ /* mask to shuffle from desc. to mbuf */ -+ shuf_msk = (vector unsigned char){ -+ 0xFF, 0xFF, /* pkt_type set as unknown */ -+ 0xFF, 0xFF, /* pkt_type set as unknown */ -+ 14, 15, /* octet 15~14, low 16 bits pkt_len */ -+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ -+ 14, 15, /* octet 15~14, 16 bits data_len */ -+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */ -+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */ -+ }; -+ -+ /* Cache is empty -> need to scan the buffer rings, but first move -+ * the next 'n' mbufs into the cache -+ */ -+ sw_ring = &rxq->sw_ring[rxq->rx_tail]; -+ -+ /* A. load 4 packet in one loop -+ * [A*. mask out 4 unused dirty field in desc] -+ * B. copy 4 mbuf point from swring to rx_pkts -+ * C. calc the number of DD bits among the 4 packets -+ * [C*. extract the end-of-packet bit, if requested] -+ * D. fill info. from desc to mbuf -+ */ -+ -+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; -+ pos += RTE_I40E_DESCS_PER_LOOP, -+ rxdp += RTE_I40E_DESCS_PER_LOOP) { -+ vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP]; -+ vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; -+ vector unsigned short staterr, sterr_tmp1, sterr_tmp2; -+ vector unsigned long mbp1, mbp2; /* two mbuf pointer -+ * in one XMM reg. -+ */ -+ -+ /* B.1 load 1 mbuf point */ -+ mbp1 = *(vector unsigned long *)&sw_ring[pos]; -+ /* Read desc statuses backwards to avoid race condition */ -+ /* A.1 load 4 pkts desc */ -+ descs[3] = *(vector unsigned long *)(rxdp + 3); -+ rte_compiler_barrier(); -+ -+ /* B.2 copy 2 mbuf point into rx_pkts */ -+ *(vector unsigned long *)&rx_pkts[pos] = mbp1; -+ -+ /* B.1 load 1 mbuf point */ -+ mbp2 = *(vector unsigned long *)&sw_ring[pos + 2]; -+ -+ descs[2] = *(vector unsigned long *)(rxdp + 2); -+ rte_compiler_barrier(); -+ /* B.1 load 2 mbuf point */ -+ descs[1] = *(vector unsigned long *)(rxdp + 1); -+ rte_compiler_barrier(); -+ descs[0] = *(vector unsigned long *)(rxdp); -+ -+ /* B.2 copy 2 mbuf point into rx_pkts */ -+ *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2; -+ -+ if (split_packet) { -+ rte_mbuf_prefetch_part2(rx_pkts[pos]); -+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); -+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); -+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); -+ } -+ -+ /* avoid compiler reorder optimization */ -+ rte_compiler_barrier(); -+ -+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ -+ const vector unsigned int len3 = vec_sl( -+ vec_ld(0, (vector unsigned int *)&descs[3]), -+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); -+ -+ const vector unsigned int len2 = vec_sl( -+ vec_ld(0, (vector unsigned int *)&descs[2]), -+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); -+ -+ /* merge the now-aligned packet length fields back in */ -+ descs[3] = (vector unsigned long)len3; -+ descs[2] = (vector unsigned long)len2; -+ -+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */ -+ pkt_mb4 = vec_perm((vector unsigned char)descs[3], -+ (vector unsigned char){}, shuf_msk); -+ pkt_mb3 = vec_perm((vector unsigned char)descs[2], -+ (vector unsigned char){}, shuf_msk); -+ -+ /* C.1 4=>2 filter staterr info only */ -+ sterr_tmp2 = vec_mergel((vector unsigned short)descs[3], -+ (vector unsigned short)descs[2]); -+ /* C.1 4=>2 filter staterr info only */ -+ sterr_tmp1 = vec_mergel((vector unsigned short)descs[1], -+ (vector unsigned short)descs[0]); -+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ -+ pkt_mb4 = (vector unsigned char)vec_sub( -+ (vector unsigned short)pkt_mb4, crc_adjust); -+ pkt_mb3 = (vector unsigned char)vec_sub( -+ (vector unsigned short)pkt_mb3, crc_adjust); -+ -+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ -+ const vector unsigned int len1 = vec_sl( -+ vec_ld(0, (vector unsigned int *)&descs[1]), -+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); -+ const vector unsigned int len0 = vec_sl( -+ vec_ld(0, (vector unsigned int *)&descs[0]), -+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT}); -+ -+ /* merge the now-aligned packet length fields back in */ -+ descs[1] = (vector unsigned long)len1; -+ descs[0] = (vector unsigned long)len0; -+ -+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */ -+ pkt_mb2 = vec_perm((vector unsigned char)descs[1], -+ (vector unsigned char){}, shuf_msk); -+ pkt_mb1 = vec_perm((vector unsigned char)descs[0], -+ (vector unsigned char){}, shuf_msk); -+ -+ /* C.2 get 4 pkts staterr value */ -+ staterr = (vector unsigned short)vec_mergeh( -+ sterr_tmp1, sterr_tmp2); -+ -+ /* D.3 copy final 3,4 data to rx_pkts */ -+ vec_st(pkt_mb4, 0, -+ (vector unsigned char *)&rx_pkts[pos + 3] -+ ->rx_descriptor_fields1 -+ ); -+ vec_st(pkt_mb3, 0, -+ (vector unsigned char *)&rx_pkts[pos + 2] -+ ->rx_descriptor_fields1 -+ ); -+ -+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ -+ pkt_mb2 = (vector unsigned char)vec_sub( -+ (vector unsigned short)pkt_mb2, crc_adjust); -+ pkt_mb1 = (vector unsigned char)vec_sub( -+ (vector unsigned short)pkt_mb1, crc_adjust); -+ -+ /* C* extract and record EOP bit */ -+ if (split_packet) { -+ vector unsigned char eop_shuf_mask = -+ (vector unsigned char){ -+ 0xFF, 0xFF, 0xFF, 0xFF, -+ 0xFF, 0xFF, 0xFF, 0xFF, -+ 0xFF, 0xFF, 0xFF, 0xFF, -+ 0x04, 0x0C, 0x00, 0x08 -+ }; -+ -+ /* and with mask to extract bits, flipping 1-0 */ -+ vector unsigned char eop_bits = vec_and( -+ (vector unsigned char)vec_nor(staterr, staterr), -+ (vector unsigned char)eop_check); -+ /* the staterr values are not in order, as the count -+ * count of dd bits doesn't care. However, for end of -+ * packet tracking, we do care, so shuffle. This also -+ * compresses the 32-bit values to 8-bit -+ */ -+ eop_bits = vec_perm(eop_bits, (vector unsigned char){}, -+ eop_shuf_mask); -+ /* store the resulting 32-bit value */ -+ *split_packet = (vec_ld(0, -+ (vector unsigned int *)&eop_bits))[0]; -+ split_packet += RTE_I40E_DESCS_PER_LOOP; -+ -+ /* zero-out next pointers */ -+ rx_pkts[pos]->next = NULL; -+ rx_pkts[pos + 1]->next = NULL; -+ rx_pkts[pos + 2]->next = NULL; -+ rx_pkts[pos + 3]->next = NULL; -+ } -+ -+ /* C.3 calc available number of desc */ -+ staterr = vec_and(staterr, (vector unsigned short)dd_check); -+ -+ /* D.3 copy final 1,2 data to rx_pkts */ -+ vec_st(pkt_mb2, 0, -+ (vector unsigned char *)&rx_pkts[pos + 1] -+ ->rx_descriptor_fields1 -+ ); -+ vec_st(pkt_mb1, 0, -+ (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1 -+ ); -+ desc_to_ptype_v(descs, &rx_pkts[pos]); -+ desc_to_olflags_v(descs, &rx_pkts[pos]); -+ -+ /* C.4 calc avaialbe number of desc */ -+ var = __builtin_popcountll((vec_ld(0, -+ (vector unsigned long *)&staterr)[0])); -+ nb_pkts_recd += var; -+ if (likely(var != RTE_I40E_DESCS_PER_LOOP)) -+ break; -+ } -+ -+ /* Update our internal tail pointer */ -+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); -+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); -+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); -+ -+ return nb_pkts_recd; -+} -+ -+ /* Notice: -+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet -+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST -+ * numbers of DD bits -+ */ -+uint16_t -+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, -+ uint16_t nb_pkts) -+{ -+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); -+} -+ -+ /* vPMD receive routine that reassembles scattered packets -+ * Notice: -+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet -+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST -+ * numbers of DD bits -+ */ -+uint16_t -+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, -+ uint16_t nb_pkts) -+{ -+ struct i40e_rx_queue *rxq = rx_queue; -+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; -+ -+ /* get some new buffers */ -+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, -+ split_flags); -+ if (nb_bufs == 0) -+ return 0; -+ -+ /* happy day case, full burst + no packets to be joined */ -+ const uint64_t *split_fl64 = (uint64_t *)split_flags; -+ -+ if (rxq->pkt_first_seg == NULL && -+ split_fl64[0] == 0 && split_fl64[1] == 0 && -+ split_fl64[2] == 0 && split_fl64[3] == 0) -+ return nb_bufs; -+ -+ /* reassemble any packets that need reassembly*/ -+ unsigned int i = 0; -+ -+ if (!rxq->pkt_first_seg) { -+ /* find the first split flag, and only reassemble then*/ -+ while (i < nb_bufs && !split_flags[i]) -+ i++; -+ if (i == nb_bufs) -+ return nb_bufs; -+ } -+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, -+ &split_flags[i]); -+} -+ -+static inline void -+vtx1(volatile struct i40e_tx_desc *txdp, -+ struct rte_mbuf *pkt, uint64_t flags) -+{ -+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | -+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | -+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); -+ -+ vector unsigned long descriptor = (vector unsigned long){ -+ pkt->buf_physaddr + pkt->data_off, high_qw}; -+ *(vector unsigned long *)txdp = descriptor; -+} -+ -+static inline void -+vtx(volatile struct i40e_tx_desc *txdp, -+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) -+{ -+ int i; -+ -+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) -+ vtx1(txdp, *pkt, flags); -+} -+ -+uint16_t -+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, -+ uint16_t nb_pkts) -+{ -+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; -+ volatile struct i40e_tx_desc *txdp; -+ struct i40e_tx_entry *txep; -+ uint16_t n, nb_commit, tx_id; -+ uint64_t flags = I40E_TD_CMD; -+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; -+ int i; -+ -+ /* cross rx_thresh boundary is not allowed */ -+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); -+ -+ if (txq->nb_tx_free < txq->tx_free_thresh) -+ i40e_tx_free_bufs(txq); -+ -+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); -+ nb_commit = nb_pkts; -+ if (unlikely(nb_pkts == 0)) -+ return 0; -+ -+ tx_id = txq->tx_tail; -+ txdp = &txq->tx_ring[tx_id]; -+ txep = &txq->sw_ring[tx_id]; -+ -+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); -+ -+ n = (uint16_t)(txq->nb_tx_desc - tx_id); -+ if (nb_commit >= n) { -+ tx_backlog_entry(txep, tx_pkts, n); -+ -+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) -+ vtx1(txdp, *tx_pkts, flags); -+ -+ vtx1(txdp, *tx_pkts++, rs); -+ -+ nb_commit = (uint16_t)(nb_commit - n); -+ -+ tx_id = 0; -+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); -+ -+ /* avoid reach the end of ring */ -+ txdp = &txq->tx_ring[tx_id]; -+ txep = &txq->sw_ring[tx_id]; -+ } -+ -+ tx_backlog_entry(txep, tx_pkts, nb_commit); -+ -+ vtx(txdp, tx_pkts, nb_commit, flags); -+ -+ tx_id = (uint16_t)(tx_id + nb_commit); -+ if (tx_id > txq->tx_next_rs) { -+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= -+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << -+ I40E_TXD_QW1_CMD_SHIFT); -+ txq->tx_next_rs = -+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); -+ } -+ -+ txq->tx_tail = tx_id; -+ -+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); -+ -+ return nb_pkts; -+} -+ -+void __attribute__((cold)) -+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) -+{ -+ _i40e_rx_queue_release_mbufs_vec(rxq); -+} -+ -+int __attribute__((cold)) -+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) -+{ -+ return i40e_rxq_vec_setup_default(rxq); -+} -+ -+int __attribute__((cold)) -+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq) -+{ -+ return 0; -+} -+ -+int __attribute__((cold)) -+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) -+{ -+ return i40e_rx_vec_dev_conf_condition_check_default(dev); -+} --- -1.9.1 - diff --git a/SOURCES/ppc_64-power8-linuxapp-gcc-config b/SOURCES/ppc_64-power8-linuxapp-gcc-config new file mode 100644 index 0000000..a8dcb9a --- /dev/null +++ b/SOURCES/ppc_64-power8-linuxapp-gcc-config @@ -0,0 +1,534 @@ +# -*- cfg-sha: 4d1578565c23e449d8e5c1c18e88181f05769b5132b7f22dcbed6bce900e9d0c +# BSD LICENSE +# Copyright (C) IBM Corporation 2014. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of IBM Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD LICENSE +# Copyright(c) 2010-2016 Intel Corporation. All rights reserved. +# All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD LICENSE +# Copyright(c) 2010-2017 Intel Corporation. All rights reserved. +# All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# RTE_EXEC_ENV values are the directories in mk/exec-env/ +CONFIG_RTE_EXEC_ENV="linuxapp" +# RTE_ARCH values are architecture we compile for. directories in mk/arch/ +CONFIG_RTE_ARCH="ppc_64" +# machine can define specific variables or action for a specific board +# RTE_MACHINE values are architecture we compile for. directories in mk/machine/ +CONFIG_RTE_MACHINE="power8" +# The compiler we use. +# RTE_TOOLCHAIN values are architecture we compile for. directories in mk/toolchain/ +CONFIG_RTE_TOOLCHAIN="gcc" +# Use intrinsics or assembly code for key routines +CONFIG_RTE_FORCE_INTRINSICS=n +# Machine forces strict alignment constraints. +CONFIG_RTE_ARCH_STRICT_ALIGN=n +# Compile to share library +CONFIG_RTE_BUILD_SHARED_LIB=y +# Use newest code breaking previous ABI +CONFIG_RTE_NEXT_ABI=n +# Major ABI to overwrite library specific LIBABIVER +CONFIG_RTE_MAJOR_ABI= +# Machine's cache line size +CONFIG_RTE_CACHE_LINE_SIZE=128 +# Compile Environment Abstraction Layer +CONFIG_RTE_LIBRTE_EAL=y +CONFIG_RTE_MAX_LCORE=256 +CONFIG_RTE_MAX_NUMA_NODES=32 +CONFIG_RTE_MAX_MEMSEG=256 +CONFIG_RTE_MAX_MEMZONE=2560 +CONFIG_RTE_MAX_TAILQ=32 +CONFIG_RTE_ENABLE_ASSERT=n +CONFIG_RTE_LOG_LEVEL=RTE_LOG_INFO +CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO +CONFIG_RTE_LOG_HISTORY=256 +CONFIG_RTE_BACKTRACE=y +CONFIG_RTE_LIBEAL_USE_HPET=n +CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n +CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n +CONFIG_RTE_EAL_IGB_UIO=n +CONFIG_RTE_EAL_VFIO=y +CONFIG_RTE_MALLOC_DEBUG=n +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y +# Recognize/ignore architecture we compile for. AVX/AVX512 CPU flags for performance/power testing. +# AVX512 is marked as experimental for now, will enable it after enough +# field test and possible optimization. +CONFIG_RTE_ENABLE_AVX=y +CONFIG_RTE_ENABLE_AVX512=n +# Default driver path (or "" to disable) +CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds" +# Compile Environment Abstraction Layer to support Vmware TSC map +CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n +# Compile architecture we compile for. PCI library +CONFIG_RTE_LIBRTE_PCI=y +# Compile architecture we compile for. argument parser library +CONFIG_RTE_LIBRTE_KVARGS=y +# Compile generic ethernet library +CONFIG_RTE_LIBRTE_ETHER=y +CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n +CONFIG_RTE_MAX_ETHPORTS=32 +CONFIG_RTE_MAX_QUEUES_PER_PORT=1024 +CONFIG_RTE_LIBRTE_IEEE1588=n +CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16 +CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y +CONFIG_RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS=n +# Turn off Tx preparation stage +# Warning: rte_eth_tx_prepare() can be safely disabled only if using a +# driver which do not implement any Tx preparation. +CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n +# Compile PCI bus driver +CONFIG_RTE_LIBRTE_PCI_BUS=y +# Compile architecture we compile for. vdev bus +CONFIG_RTE_LIBRTE_VDEV_BUS=y +# Compile burst-oriented Amazon ENA PMD driver +CONFIG_RTE_LIBRTE_ENA_PMD=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_RX=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_TX=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_ENA_COM_DEBUG=n +# Compile burst-oriented IGB & EM PMD drivers +CONFIG_RTE_LIBRTE_EM_PMD=n +CONFIG_RTE_LIBRTE_IGB_PMD=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n +# Compile burst-oriented IXGBE PMD driver +CONFIG_RTE_LIBRTE_IXGBE_PMD=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n +CONFIG_RTE_IXGBE_INC_VECTOR=y +CONFIG_RTE_LIBRTE_IXGBE_BYPASS=n +# Compile burst-oriented I40E PMD driver +CONFIG_RTE_LIBRTE_I40E_PMD=y +CONFIG_RTE_LIBRTE_I40E_DEBUG_RX=n +CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n +CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y +CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y +CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64 +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4 +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4 +# interval up to 8160 us, aligned to 2 (or default value) +CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1 +# Compile burst-oriented FM10K PMD +CONFIG_RTE_LIBRTE_FM10K_PMD=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_RX=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y +CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y +# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD +CONFIG_RTE_LIBRTE_MLX4_PMD=n +CONFIG_RTE_LIBRTE_MLX4_DEBUG=n +CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n +CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 +# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD +CONFIG_RTE_LIBRTE_MLX5_PMD=n +CONFIG_RTE_LIBRTE_MLX5_DEBUG=n +CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 +# Compile burst-oriented Broadcom PMD driver +CONFIG_RTE_LIBRTE_BNX2X_PMD=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_RX=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_TX=n +CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n +# Compile burst-oriented Chelsio Terminator (CXGBE) PMD +CONFIG_RTE_LIBRTE_CXGBE_PMD=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_CXGBE_TPUT=y +# Compile burst-oriented Cisco ENIC PMD driver +CONFIG_RTE_LIBRTE_ENIC_PMD=n +CONFIG_RTE_LIBRTE_ENIC_DEBUG=n +CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW=n +# Compile burst-oriented Netronome NFP PMD driver +CONFIG_RTE_LIBRTE_NFP_PMD=n +CONFIG_RTE_LIBRTE_NFP_DEBUG=n +# Compile Marvell PMD driver +CONFIG_RTE_LIBRTE_MRVL_PMD=n +# Compile burst-oriented Broadcom BNXT PMD driver +CONFIG_RTE_LIBRTE_BNXT_PMD=n +# Compile burst-oriented Solarflare libefx-based PMD +CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n +CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n +# Compile SOFTNIC PMD +CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y +# Compile software PMD backed by SZEDATA2 device +CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n +# Defines firmware type address space. +# See documentation for supported values. +# Other values raise compile time error. +CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0 +# Compile burst-oriented Cavium Thunderx NICVF PMD driver +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n +# Compile burst-oriented Cavium LiquidIO PMD driver +CONFIG_RTE_LIBRTE_LIO_PMD=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_RX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n +# NXP DPAA Bus +CONFIG_RTE_LIBRTE_DPAA_BUS=n +CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n +CONFIG_RTE_LIBRTE_DPAA_PMD=n +# Compile burst-oriented Cavium OCTEONTX network PMD driver +CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_RX=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_TX=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_MBOX=n +# Compile NXP DPAA2 FSL-MC Bus +CONFIG_RTE_LIBRTE_FSLMC_BUS=n +# Compile Support Libraries for NXP DPAA2 +CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n +CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y +# Compile burst-oriented NXP DPAA2 PMD driver +CONFIG_RTE_LIBRTE_DPAA2_PMD=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n +# Compile burst-oriented VIRTIO PMD driver +CONFIG_RTE_LIBRTE_VIRTIO_PMD=y +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_RX=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_TX=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DUMP=n +# Compile virtio device emulation inside virtio PMD driver +CONFIG_RTE_VIRTIO_USER=y +# Compile burst-oriented VMXNET3 PMD driver +CONFIG_RTE_LIBRTE_VMXNET3_PMD=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_RX=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_DRIVER=n +# Compile example software rings based PMD +CONFIG_RTE_LIBRTE_PMD_RING=y +CONFIG_RTE_PMD_RING_MAX_RX_RINGS=16 +CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16 +# Compile software PMD backed by PCAP files +CONFIG_RTE_LIBRTE_PMD_PCAP=n +# Compile link bonding PMD library +CONFIG_RTE_LIBRTE_PMD_BOND=n +CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n +CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n +# QLogic 10G/25G/40G/50G/100G PMD +CONFIG_RTE_LIBRTE_QEDE_PMD=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_QEDE_VF_TX_SWITCH=y +#Provides abs path/name of architecture we compile for. firmware file. +#Empty string denotes driver will use default firmware +CONFIG_RTE_LIBRTE_QEDE_FW="" +# Compile software PMD backed by AF_PACKET sockets (Linux only) +CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n +# Compile ARK PMD +CONFIG_RTE_LIBRTE_ARK_PMD=n +CONFIG_RTE_LIBRTE_ARK_PAD_TX=y +CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n +# Compile WRS accelerated virtual port (AVP) guest PMD driver +CONFIG_RTE_LIBRTE_AVP_PMD=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_TX=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_DRIVER=y +CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n +# Compile architecture we compile for. TAP PMD +# It is enabled by default for Linux only. +CONFIG_RTE_LIBRTE_PMD_TAP=n +# Compile null PMD +CONFIG_RTE_LIBRTE_PMD_NULL=n +# Compile fail-safe PMD +CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y +# Do prefetch of packet data within PMD driver receive function +CONFIG_RTE_PMD_PACKET_PREFETCH=y +# Compile generic crypto device library +CONFIG_RTE_LIBRTE_CRYPTODEV=y +CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n +CONFIG_RTE_CRYPTO_MAX_DEVS=64 +CONFIG_RTE_CRYPTODEV_NAME_LEN=64 +# Compile PMD for ARMv8 Crypto device +CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n +CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n +# Compile NXP DPAA2 crypto sec driver for CAAM HW +CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n +# NXP DPAA caam - crypto driver +CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n +# Compile PMD for QuickAssist based devices +CONFIG_RTE_LIBRTE_PMD_QAT=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER=n +# Number of sessions to create in architecture we compile for. session memory pool +# on a single QuickAssist device. +CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048 +# Compile PMD for AESNI backed device +CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n +CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n +# Compile PMD for Software backed device +CONFIG_RTE_LIBRTE_PMD_OPENSSL=n +CONFIG_RTE_LIBRTE_PMD_OPENSSL_DEBUG=n +# Compile PMD for AESNI GCM device +CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n +CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n +# Compile PMD for SNOW 3G device +CONFIG_RTE_LIBRTE_PMD_SNOW3G=n +CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n +# Compile PMD for KASUMI device +CONFIG_RTE_LIBRTE_PMD_KASUMI=n +CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n +# Compile PMD for ZUC device +CONFIG_RTE_LIBRTE_PMD_ZUC=n +CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n +# Compile PMD for Crypto Scheduler device +CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=n +CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n +# Compile PMD for NULL Crypto device +CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n +# Compile PMD for Marvell Crypto device +CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n +CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n +# Compile generic security library +CONFIG_RTE_LIBRTE_SECURITY=y +# Compile generic event device library +CONFIG_RTE_LIBRTE_EVENTDEV=y +CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n +CONFIG_RTE_EVENT_MAX_DEVS=16 +CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64 +# Compile PMD for skeleton event device +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=n +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n +# Compile PMD for software event device +CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n +CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV_DEBUG=n +# Compile PMD for octeontx sso event device +CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n +CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG=n +# Compile librte_ring +CONFIG_RTE_LIBRTE_RING=y +# Compile librte_mempool +CONFIG_RTE_LIBRTE_MEMPOOL=y +CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512 +CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n +# Compile Mempool drivers +CONFIG_RTE_DRIVER_MEMPOOL_RING=y +CONFIG_RTE_DRIVER_MEMPOOL_STACK=y +# Compile PMD for octeontx fpa mempool device +CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=y +CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG=n +# Compile librte_mbuf +CONFIG_RTE_LIBRTE_MBUF=y +CONFIG_RTE_LIBRTE_MBUF_DEBUG=n +CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" +CONFIG_RTE_MBUF_REFCNT_ATOMIC=y +CONFIG_RTE_PKTMBUF_HEADROOM=128 +# Compile librte_timer +CONFIG_RTE_LIBRTE_TIMER=y +CONFIG_RTE_LIBRTE_TIMER_DEBUG=n +# Compile librte_cfgfile +CONFIG_RTE_LIBRTE_CFGFILE=y +# Compile librte_cmdline +CONFIG_RTE_LIBRTE_CMDLINE=y +CONFIG_RTE_LIBRTE_CMDLINE_DEBUG=n +# Compile librte_hash +CONFIG_RTE_LIBRTE_HASH=y +CONFIG_RTE_LIBRTE_HASH_DEBUG=n +# Compile librte_efd +CONFIG_RTE_LIBRTE_EFD=y +# Compile librte_member +CONFIG_RTE_LIBRTE_MEMBER=y +# Compile librte_jobstats +CONFIG_RTE_LIBRTE_JOBSTATS=y +# Compile architecture we compile for. device metrics library +CONFIG_RTE_LIBRTE_METRICS=y +# Compile architecture we compile for. bitrate statistics library +CONFIG_RTE_LIBRTE_BITRATE=y +# Compile architecture we compile for. latency statistics library +CONFIG_RTE_LIBRTE_LATENCY_STATS=y +# Compile librte_lpm +CONFIG_RTE_LIBRTE_LPM=y +CONFIG_RTE_LIBRTE_LPM_DEBUG=n +# Compile librte_acl +CONFIG_RTE_LIBRTE_ACL=y +CONFIG_RTE_LIBRTE_ACL_DEBUG=n +# Compile librte_power +CONFIG_RTE_LIBRTE_POWER=y +CONFIG_RTE_LIBRTE_POWER_DEBUG=n +CONFIG_RTE_MAX_LCORE_FREQS=64 +# Compile librte_net +CONFIG_RTE_LIBRTE_NET=y +# Compile librte_ip_frag +CONFIG_RTE_LIBRTE_IP_FRAG=y +CONFIG_RTE_LIBRTE_IP_FRAG_DEBUG=n +CONFIG_RTE_LIBRTE_IP_FRAG_MAX_FRAG=4 +CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n +# Compile GRO library +CONFIG_RTE_LIBRTE_GRO=y +# Compile GSO library +CONFIG_RTE_LIBRTE_GSO=y +# Compile librte_meter +CONFIG_RTE_LIBRTE_METER=y +# Compile librte_classify +CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=y +# Compile librte_sched +CONFIG_RTE_LIBRTE_SCHED=y +CONFIG_RTE_SCHED_DEBUG=n +CONFIG_RTE_SCHED_RED=n +CONFIG_RTE_SCHED_COLLECT_STATS=n +CONFIG_RTE_SCHED_SUBPORT_TC_OV=n +CONFIG_RTE_SCHED_PORT_N_GRINDERS=8 +CONFIG_RTE_SCHED_VECTOR=n +# Compile architecture we compile for. distributor library +CONFIG_RTE_LIBRTE_DISTRIBUTOR=y +# Compile architecture we compile for. reorder library +CONFIG_RTE_LIBRTE_REORDER=y +# Compile librte_port +CONFIG_RTE_LIBRTE_PORT=y +CONFIG_RTE_PORT_STATS_COLLECT=n +CONFIG_RTE_PORT_PCAP=n +# Compile librte_table +CONFIG_RTE_LIBRTE_TABLE=y +CONFIG_RTE_TABLE_STATS_COLLECT=n +# Compile librte_pipeline +CONFIG_RTE_LIBRTE_PIPELINE=y +CONFIG_RTE_PIPELINE_STATS_COLLECT=n +# Compile librte_kni +CONFIG_RTE_LIBRTE_KNI=n +CONFIG_RTE_LIBRTE_PMD_KNI=n +CONFIG_RTE_KNI_KMOD=n +CONFIG_RTE_KNI_KMOD_ETHTOOL=n +CONFIG_RTE_KNI_PREEMPT_DEFAULT=y +# Compile architecture we compile for. pdump library +CONFIG_RTE_LIBRTE_PDUMP=y +# Compile vhost user library +CONFIG_RTE_LIBRTE_VHOST=y +CONFIG_RTE_LIBRTE_VHOST_NUMA=y +CONFIG_RTE_LIBRTE_VHOST_DEBUG=n +# Compile vhost PMD +# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled. +CONFIG_RTE_LIBRTE_PMD_VHOST=y +# Compile architecture we compile for. test application +CONFIG_RTE_APP_TEST=y +CONFIG_RTE_APP_TEST_RESOURCE_TAR=n +# Compile architecture we compile for. PMD test application +CONFIG_RTE_TEST_PMD=y +CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n +CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n +# Compile architecture we compile for. crypto performance application +CONFIG_RTE_APP_CRYPTO_PERF=y +# Compile architecture we compile for. eventdev application +CONFIG_RTE_APP_EVENTDEV=y +CONFIG_RTE_EXEC_ENV_LINUXAPP=y +CONFIG_RTE_ARCH_PPC_64=y +CONFIG_RTE_ARCH_64=y +CONFIG_RTE_TOOLCHAIN_GCC=y +# Note: Power doesn't have this support +# Note: Initially, all of architecture we compile for. PMD drivers compilation are turned off on Power +# Will turn on them only after architecture we compile for. successful testing on Power +CONFIG_RTE_LIBRTE_PMD_XENVIRT=n diff --git a/SOURCES/set_config.sh b/SOURCES/set_config.sh new file mode 100755 index 0000000..002386b --- /dev/null +++ b/SOURCES/set_config.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright (C) 2017, Red Hat, Inc. +# +# set_config.sh will copy a configuration from $1 to $2, in the process +# checking that the sha header for $1 matches the header in $2 + +source configlib.sh + +if (( $# < 2 )); then + echo "$0: source dest [comment-marker]" + exit 1 +fi + +if [ ! -f "$1" ]; then + echo "Source file $1 must exist." + exit 1 +fi +src_file=$1 +shift + +if [ ! -f "$1" ]; then + echo "Dest file $1 must exist." + exit 1 +fi +dst_file=$1 +shift + +comment_sep=${1:-#} + +export LANG=en_US.utf8 + +DEST_FILE_SHA="" +SRC_FILE_SHA="" + +calc_sha DEST_FILE_SHA "$dst_file" "$comment_sep" || echo "Failed to calc sha" +retr_sha SRC_FILE_SHA "$src_file" "$comment_sep" || echo "Failed to retrieve sha" + +if [ "$DEST_FILE_SHA" != "$SRC_FILE_SHA" ]; then + echo "ERROR: The requisite starting sha from $dst_file does not match the" + echo " specified sha in $src_file." + echo "[ $DEST_FILE_SHA ] vs [ $SRC_FILE_SHA ]" + exit 1 +fi + +mv "$dst_file" "$dst_file".OLD +cp "$src_file" "$dst_file" +echo "copied 1 config file." +exit 0 diff --git a/SOURCES/x86_64-native-linuxapp-gcc-config b/SOURCES/x86_64-native-linuxapp-gcc-config new file mode 100644 index 0000000..9e18711 --- /dev/null +++ b/SOURCES/x86_64-native-linuxapp-gcc-config @@ -0,0 +1,533 @@ +# -*- cfg-sha: 56176386deef83f9f1fd9d1c143a20be1294c8ed5e720aaef37e4b007ccbbde3 +# BSD LICENSE +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD LICENSE +# Copyright(c) 2010-2016 Intel Corporation. All rights reserved. +# All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# BSD LICENSE +# Copyright(c) 2010-2017 Intel Corporation. All rights reserved. +# All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# RTE_EXEC_ENV values are the directories in mk/exec-env/ +CONFIG_RTE_EXEC_ENV="linuxapp" +# RTE_ARCH values are architecture we compile for. directories in mk/arch/ +CONFIG_RTE_ARCH="x86_64" +# machine can define specific variables or action for a specific board +# RTE_MACHINE values are architecture we compile for. directories in mk/machine/ +CONFIG_RTE_MACHINE="default" +# The compiler we use. +# RTE_TOOLCHAIN values are architecture we compile for. directories in mk/toolchain/ +CONFIG_RTE_TOOLCHAIN="gcc" +# Use intrinsics or assembly code for key routines +CONFIG_RTE_FORCE_INTRINSICS=n +# Machine forces strict alignment constraints. +CONFIG_RTE_ARCH_STRICT_ALIGN=n +# Compile to share library +CONFIG_RTE_BUILD_SHARED_LIB=y +# Use newest code breaking previous ABI +CONFIG_RTE_NEXT_ABI=n +# Major ABI to overwrite library specific LIBABIVER +CONFIG_RTE_MAJOR_ABI= +# Machine's cache line size +CONFIG_RTE_CACHE_LINE_SIZE=64 +# Compile Environment Abstraction Layer +CONFIG_RTE_LIBRTE_EAL=y +CONFIG_RTE_MAX_LCORE=128 +CONFIG_RTE_MAX_NUMA_NODES=8 +CONFIG_RTE_MAX_MEMSEG=256 +CONFIG_RTE_MAX_MEMZONE=2560 +CONFIG_RTE_MAX_TAILQ=32 +CONFIG_RTE_ENABLE_ASSERT=n +CONFIG_RTE_LOG_LEVEL=RTE_LOG_INFO +CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO +CONFIG_RTE_LOG_HISTORY=256 +CONFIG_RTE_BACKTRACE=y +CONFIG_RTE_LIBEAL_USE_HPET=n +CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n +CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n +CONFIG_RTE_EAL_IGB_UIO=n +CONFIG_RTE_EAL_VFIO=y +CONFIG_RTE_MALLOC_DEBUG=n +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y +# Recognize/ignore architecture we compile for. AVX/AVX512 CPU flags for performance/power testing. +# AVX512 is marked as experimental for now, will enable it after enough +# field test and possible optimization. +CONFIG_RTE_ENABLE_AVX=y +CONFIG_RTE_ENABLE_AVX512=n +# Default driver path (or "" to disable) +CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds" +# Compile Environment Abstraction Layer to support Vmware TSC map +CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=y +# Compile architecture we compile for. PCI library +CONFIG_RTE_LIBRTE_PCI=y +# Compile architecture we compile for. argument parser library +CONFIG_RTE_LIBRTE_KVARGS=y +# Compile generic ethernet library +CONFIG_RTE_LIBRTE_ETHER=y +CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n +CONFIG_RTE_MAX_ETHPORTS=32 +CONFIG_RTE_MAX_QUEUES_PER_PORT=1024 +CONFIG_RTE_LIBRTE_IEEE1588=n +CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16 +CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y +CONFIG_RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS=n +# Turn off Tx preparation stage +# Warning: rte_eth_tx_prepare() can be safely disabled only if using a +# driver which do not implement any Tx preparation. +CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n +# Compile PCI bus driver +CONFIG_RTE_LIBRTE_PCI_BUS=y +# Compile architecture we compile for. vdev bus +CONFIG_RTE_LIBRTE_VDEV_BUS=y +# Compile burst-oriented Amazon ENA PMD driver +CONFIG_RTE_LIBRTE_ENA_PMD=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_RX=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_TX=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_ENA_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_ENA_COM_DEBUG=n +# Compile burst-oriented IGB & EM PMD drivers +CONFIG_RTE_LIBRTE_EM_PMD=n +CONFIG_RTE_LIBRTE_IGB_PMD=y +CONFIG_RTE_LIBRTE_E1000_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_E1000_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n +# Compile burst-oriented IXGBE PMD driver +CONFIG_RTE_LIBRTE_IXGBE_PMD=y +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n +CONFIG_RTE_IXGBE_INC_VECTOR=y +CONFIG_RTE_LIBRTE_IXGBE_BYPASS=n +# Compile burst-oriented I40E PMD driver +CONFIG_RTE_LIBRTE_I40E_PMD=y +CONFIG_RTE_LIBRTE_I40E_DEBUG_RX=n +CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n +CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y +CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y +CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64 +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4 +CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4 +# interval up to 8160 us, aligned to 2 (or default value) +CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1 +# Compile burst-oriented FM10K PMD +CONFIG_RTE_LIBRTE_FM10K_PMD=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_RX=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_FM10K_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y +CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y +# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD +CONFIG_RTE_LIBRTE_MLX4_PMD=n +CONFIG_RTE_LIBRTE_MLX4_DEBUG=n +CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n +CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 +# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD +CONFIG_RTE_LIBRTE_MLX5_PMD=n +CONFIG_RTE_LIBRTE_MLX5_DEBUG=n +CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 +# Compile burst-oriented Broadcom PMD driver +CONFIG_RTE_LIBRTE_BNX2X_PMD=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_RX=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_TX=n +CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n +CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n +# Compile burst-oriented Chelsio Terminator (CXGBE) PMD +CONFIG_RTE_LIBRTE_CXGBE_PMD=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_CXGBE_TPUT=y +# Compile burst-oriented Cisco ENIC PMD driver +CONFIG_RTE_LIBRTE_ENIC_PMD=y +CONFIG_RTE_LIBRTE_ENIC_DEBUG=n +CONFIG_RTE_LIBRTE_ENIC_DEBUG_FLOW=n +# Compile burst-oriented Netronome NFP PMD driver +CONFIG_RTE_LIBRTE_NFP_PMD=n +CONFIG_RTE_LIBRTE_NFP_DEBUG=n +# Compile Marvell PMD driver +CONFIG_RTE_LIBRTE_MRVL_PMD=n +# Compile burst-oriented Broadcom BNXT PMD driver +CONFIG_RTE_LIBRTE_BNXT_PMD=n +# Compile burst-oriented Solarflare libefx-based PMD +CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n +CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n +# Compile SOFTNIC PMD +CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y +# Compile software PMD backed by SZEDATA2 device +CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n +# Defines firmware type address space. +# See documentation for supported values. +# Other values raise compile time error. +CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0 +# Compile burst-oriented Cavium Thunderx NICVF PMD driver +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n +# Compile burst-oriented Cavium LiquidIO PMD driver +CONFIG_RTE_LIBRTE_LIO_PMD=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_RX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n +CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n +# NXP DPAA Bus +CONFIG_RTE_LIBRTE_DPAA_BUS=n +CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n +CONFIG_RTE_LIBRTE_DPAA_PMD=n +# Compile burst-oriented Cavium OCTEONTX network PMD driver +CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_RX=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_TX=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_OCTEONTX_DEBUG_MBOX=n +# Compile NXP DPAA2 FSL-MC Bus +CONFIG_RTE_LIBRTE_FSLMC_BUS=n +# Compile Support Libraries for NXP DPAA2 +CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n +CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y +# Compile burst-oriented NXP DPAA2 PMD driver +CONFIG_RTE_LIBRTE_DPAA2_PMD=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n +CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n +# Compile burst-oriented VIRTIO PMD driver +CONFIG_RTE_LIBRTE_VIRTIO_PMD=y +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_RX=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_TX=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DUMP=n +# Compile virtio device emulation inside virtio PMD driver +CONFIG_RTE_VIRTIO_USER=y +# Compile burst-oriented VMXNET3 PMD driver +CONFIG_RTE_LIBRTE_VMXNET3_PMD=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_RX=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_DRIVER=n +# Compile example software rings based PMD +CONFIG_RTE_LIBRTE_PMD_RING=y +CONFIG_RTE_PMD_RING_MAX_RX_RINGS=16 +CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16 +# Compile software PMD backed by PCAP files +CONFIG_RTE_LIBRTE_PMD_PCAP=n +# Compile link bonding PMD library +CONFIG_RTE_LIBRTE_PMD_BOND=n +CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n +CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n +# QLogic 10G/25G/40G/50G/100G PMD +CONFIG_RTE_LIBRTE_QEDE_PMD=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_QEDE_VF_TX_SWITCH=y +#Provides abs path/name of architecture we compile for. firmware file. +#Empty string denotes driver will use default firmware +CONFIG_RTE_LIBRTE_QEDE_FW="" +# Compile software PMD backed by AF_PACKET sockets (Linux only) +CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n +# Compile ARK PMD +CONFIG_RTE_LIBRTE_ARK_PMD=n +CONFIG_RTE_LIBRTE_ARK_PAD_TX=y +CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n +CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n +# Compile WRS accelerated virtual port (AVP) guest PMD driver +CONFIG_RTE_LIBRTE_AVP_PMD=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_TX=n +CONFIG_RTE_LIBRTE_AVP_DEBUG_DRIVER=y +CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n +# Compile architecture we compile for. TAP PMD +# It is enabled by default for Linux only. +CONFIG_RTE_LIBRTE_PMD_TAP=n +# Compile null PMD +CONFIG_RTE_LIBRTE_PMD_NULL=n +# Compile fail-safe PMD +CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y +# Do prefetch of packet data within PMD driver receive function +CONFIG_RTE_PMD_PACKET_PREFETCH=y +# Compile generic crypto device library +CONFIG_RTE_LIBRTE_CRYPTODEV=y +CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n +CONFIG_RTE_CRYPTO_MAX_DEVS=64 +CONFIG_RTE_CRYPTODEV_NAME_LEN=64 +# Compile PMD for ARMv8 Crypto device +CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n +CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n +# Compile NXP DPAA2 crypto sec driver for CAAM HW +CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n +# NXP DPAA caam - crypto driver +CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n +CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n +# Compile PMD for QuickAssist based devices +CONFIG_RTE_LIBRTE_PMD_QAT=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n +CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER=n +# Number of sessions to create in architecture we compile for. session memory pool +# on a single QuickAssist device. +CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048 +# Compile PMD for AESNI backed device +CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n +CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n +# Compile PMD for Software backed device +CONFIG_RTE_LIBRTE_PMD_OPENSSL=n +CONFIG_RTE_LIBRTE_PMD_OPENSSL_DEBUG=n +# Compile PMD for AESNI GCM device +CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n +CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n +# Compile PMD for SNOW 3G device +CONFIG_RTE_LIBRTE_PMD_SNOW3G=n +CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n +# Compile PMD for KASUMI device +CONFIG_RTE_LIBRTE_PMD_KASUMI=n +CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n +# Compile PMD for ZUC device +CONFIG_RTE_LIBRTE_PMD_ZUC=n +CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n +# Compile PMD for Crypto Scheduler device +CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=n +CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n +# Compile PMD for NULL Crypto device +CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n +# Compile PMD for Marvell Crypto device +CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n +CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n +# Compile generic security library +CONFIG_RTE_LIBRTE_SECURITY=y +# Compile generic event device library +CONFIG_RTE_LIBRTE_EVENTDEV=y +CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n +CONFIG_RTE_EVENT_MAX_DEVS=16 +CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64 +# Compile PMD for skeleton event device +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=n +CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n +# Compile PMD for software event device +CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n +CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV_DEBUG=n +# Compile PMD for octeontx sso event device +CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n +CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG=n +# Compile librte_ring +CONFIG_RTE_LIBRTE_RING=y +# Compile librte_mempool +CONFIG_RTE_LIBRTE_MEMPOOL=y +CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512 +CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n +# Compile Mempool drivers +CONFIG_RTE_DRIVER_MEMPOOL_RING=y +CONFIG_RTE_DRIVER_MEMPOOL_STACK=y +# Compile PMD for octeontx fpa mempool device +CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=y +CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG=n +# Compile librte_mbuf +CONFIG_RTE_LIBRTE_MBUF=y +CONFIG_RTE_LIBRTE_MBUF_DEBUG=n +CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc" +CONFIG_RTE_MBUF_REFCNT_ATOMIC=y +CONFIG_RTE_PKTMBUF_HEADROOM=128 +# Compile librte_timer +CONFIG_RTE_LIBRTE_TIMER=y +CONFIG_RTE_LIBRTE_TIMER_DEBUG=n +# Compile librte_cfgfile +CONFIG_RTE_LIBRTE_CFGFILE=y +# Compile librte_cmdline +CONFIG_RTE_LIBRTE_CMDLINE=y +CONFIG_RTE_LIBRTE_CMDLINE_DEBUG=n +# Compile librte_hash +CONFIG_RTE_LIBRTE_HASH=y +CONFIG_RTE_LIBRTE_HASH_DEBUG=n +# Compile librte_efd +CONFIG_RTE_LIBRTE_EFD=y +# Compile librte_member +CONFIG_RTE_LIBRTE_MEMBER=y +# Compile librte_jobstats +CONFIG_RTE_LIBRTE_JOBSTATS=y +# Compile architecture we compile for. device metrics library +CONFIG_RTE_LIBRTE_METRICS=y +# Compile architecture we compile for. bitrate statistics library +CONFIG_RTE_LIBRTE_BITRATE=y +# Compile architecture we compile for. latency statistics library +CONFIG_RTE_LIBRTE_LATENCY_STATS=y +# Compile librte_lpm +CONFIG_RTE_LIBRTE_LPM=y +CONFIG_RTE_LIBRTE_LPM_DEBUG=n +# Compile librte_acl +CONFIG_RTE_LIBRTE_ACL=y +CONFIG_RTE_LIBRTE_ACL_DEBUG=n +# Compile librte_power +CONFIG_RTE_LIBRTE_POWER=y +CONFIG_RTE_LIBRTE_POWER_DEBUG=n +CONFIG_RTE_MAX_LCORE_FREQS=64 +# Compile librte_net +CONFIG_RTE_LIBRTE_NET=y +# Compile librte_ip_frag +CONFIG_RTE_LIBRTE_IP_FRAG=y +CONFIG_RTE_LIBRTE_IP_FRAG_DEBUG=n +CONFIG_RTE_LIBRTE_IP_FRAG_MAX_FRAG=4 +CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n +# Compile GRO library +CONFIG_RTE_LIBRTE_GRO=y +# Compile GSO library +CONFIG_RTE_LIBRTE_GSO=y +# Compile librte_meter +CONFIG_RTE_LIBRTE_METER=y +# Compile librte_classify +CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=y +# Compile librte_sched +CONFIG_RTE_LIBRTE_SCHED=y +CONFIG_RTE_SCHED_DEBUG=n +CONFIG_RTE_SCHED_RED=n +CONFIG_RTE_SCHED_COLLECT_STATS=n +CONFIG_RTE_SCHED_SUBPORT_TC_OV=n +CONFIG_RTE_SCHED_PORT_N_GRINDERS=8 +CONFIG_RTE_SCHED_VECTOR=n +# Compile architecture we compile for. distributor library +CONFIG_RTE_LIBRTE_DISTRIBUTOR=y +# Compile architecture we compile for. reorder library +CONFIG_RTE_LIBRTE_REORDER=y +# Compile librte_port +CONFIG_RTE_LIBRTE_PORT=y +CONFIG_RTE_PORT_STATS_COLLECT=n +CONFIG_RTE_PORT_PCAP=n +# Compile librte_table +CONFIG_RTE_LIBRTE_TABLE=y +CONFIG_RTE_TABLE_STATS_COLLECT=n +# Compile librte_pipeline +CONFIG_RTE_LIBRTE_PIPELINE=y +CONFIG_RTE_PIPELINE_STATS_COLLECT=n +# Compile librte_kni +CONFIG_RTE_LIBRTE_KNI=n +CONFIG_RTE_LIBRTE_PMD_KNI=n +CONFIG_RTE_KNI_KMOD=n +CONFIG_RTE_KNI_KMOD_ETHTOOL=n +CONFIG_RTE_KNI_PREEMPT_DEFAULT=y +# Compile architecture we compile for. pdump library +CONFIG_RTE_LIBRTE_PDUMP=y +# Compile vhost user library +CONFIG_RTE_LIBRTE_VHOST=y +CONFIG_RTE_LIBRTE_VHOST_NUMA=y +CONFIG_RTE_LIBRTE_VHOST_DEBUG=n +# Compile vhost PMD +# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled. +CONFIG_RTE_LIBRTE_PMD_VHOST=y +# Compile architecture we compile for. test application +CONFIG_RTE_APP_TEST=y +CONFIG_RTE_APP_TEST_RESOURCE_TAR=n +# Compile architecture we compile for. PMD test application +CONFIG_RTE_TEST_PMD=y +CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n +CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n +# Compile architecture we compile for. crypto performance application +CONFIG_RTE_APP_CRYPTO_PERF=y +# Compile architecture we compile for. eventdev application +CONFIG_RTE_APP_EVENTDEV=y +CONFIG_RTE_EXEC_ENV_LINUXAPP=y +CONFIG_RTE_ARCH_X86_64=y +CONFIG_RTE_ARCH_X86=y +CONFIG_RTE_ARCH_64=y +CONFIG_RTE_TOOLCHAIN_GCC=y +CONFIG_RTE_LIBRTE_PMD_XENVIRT=n diff --git a/SPECS/dpdk.spec b/SPECS/dpdk.spec index 92a8af5..7f769d3 100644 --- a/SPECS/dpdk.spec +++ b/SPECS/dpdk.spec @@ -1,15 +1,13 @@ -# Add option to build as static libraries (--without shared) -%bcond_without shared # Add option to build with examples %bcond_with examples # Add option to build without tools %bcond_without tools # Dont edit Version: and Release: directly, only these: -%define ver 16.11.2 -%define rel 4 +%define ver 17.11 +%define rel 7 -%define srcname dpdk-stable +%define srcname dpdk # Define when building git snapshots #define snapver 2086.git263333bb @@ -19,15 +17,25 @@ Name: dpdk Version: %{ver} Release: %{?snapver:0.%{snapver}.}%{rel}%{?dist} URL: http://dpdk.org -Source: http://dpdk.org/browse/%{srcname}/snapshot/%{srcname}-%{srcver}.tar.xz -Patch0: mk-move-PMD-libraries-linking-to-applications.patch -Patch1: net-i40e-implement-vector-PMD-for-altivec.patch -Patch2: eal-ppc-support-sPAPR-IOMMU-for-vfio-pci.patch -Patch3: eal-ppc-fix-mmap-for-memory-initialization.patch +Source: http://fast.dpdk.org/rel/dpdk-%{srcver}.tar.xz # Only needed for creating snapshot tarballs, not used in build itself Source100: dpdk-snapshot.sh +Source500: configlib.sh +Source501: gen_config_group.sh +Source502: set_config.sh + +# Important: source503 is used as the actual copy file +# @TODO: this causes a warning - fix it? +Source504: arm64-armv8a-linuxapp-gcc-config +Source505: ppc_64-power8-linuxapp-gcc-config +Source506: x86_64-native-linuxapp-gcc-config + +Patch0: dpdk-dev-v2-1-4-net-virtio-fix-vector-Rx-break-caused-by-rxq-flushing.patch +Patch1: 0001-vhost_user_protect_active_rings_from_async_ring_changes.patch +Patch2: 0001-bus-pci-forbid-IOVA-mode-if-IOMMU-address-width-too-.patch + Summary: Set of libraries and drivers for fast packet processing # @@ -41,7 +49,7 @@ License: BSD and LGPLv2 and GPLv2 # The DPDK is designed to optimize througput of network traffic using, among # other techniques, carefully crafted assembly instructions. As such it # needs extensive work to port it to other architectures. -ExclusiveArch: x86_64 i686 aarch64 ppc64le +ExclusiveArch: x86_64 aarch64 ppc64le # machine_arch maps between rpm and dpdk arch name, often same as _target_cpu # machine_tmpl is the config template machine name, often "native" @@ -51,11 +59,6 @@ ExclusiveArch: x86_64 i686 aarch64 ppc64le %define machine_tmpl native %define machine default %endif -%ifarch i686 -%define machine_arch i686 -%define machine_tmpl native -%define machine atm -%endif %ifarch aarch64 %define machine_arch arm64 %define machine_tmpl armv8a @@ -74,7 +77,7 @@ ExclusiveArch: x86_64 i686 aarch64 ppc64le %define incdir %{_includedir}/%{name} %define pmddir %{_libdir}/%{name}-pmds -BuildRequires: kernel-headers, libpcap-devel, zlib-devel, numactl-devel +BuildRequires: kernel-headers, zlib-devel, numactl-devel BuildRequires: doxygen, python-sphinx %description @@ -84,9 +87,6 @@ fast packet processing in the user space. %package devel Summary: Data Plane Development Kit development files Requires: %{name}%{?_isa} = %{version}-%{release} -%if ! %{with shared} -Provides: %{name}-static = %{version}-%{release} -%endif %description devel This package contains the headers and other files needed for developing @@ -102,7 +102,8 @@ API programming documentation for the Data Plane Development Kit. %if %{with tools} %package tools Summary: Tools for setting up Data Plane Development Kit environment -Requires: kmod pciutils findutils iproute +Requires: %{name} = %{version}-%{release} +Requires: kmod pciutils findutils iproute python %description tools %{summary} @@ -123,18 +124,8 @@ as L2 and L3 forwarding. %patch0 -p1 %patch1 -p1 %patch2 -p1 -%patch3 -p1 %build -function setconf() -{ - cf=%{target}/.config - if grep -q ^$1= $cf; then - sed -i "s:^$1=.*$:$1=$2:g" $cf - else - echo $1=$2 >> $cf - fi -} # In case dpdk-devel is installed unset RTE_SDK RTE_INCLUDE RTE_TARGET @@ -151,34 +142,8 @@ export EXTRA_CFLAGS="$(echo %{optflags} | sed -e 's:-Wall::g' -e 's:-march=[[:al make V=1 O=%{target} T=%{target} %{?_smp_mflags} config -setconf CONFIG_RTE_MACHINE '"%{machine}"' - -# Enable automatic driver loading from this path -setconf CONFIG_RTE_EAL_PMD_PATH '"%{pmddir}"' - -# Enable pcap and vhost-numa build, the added deps are ok for us -setconf CONFIG_RTE_LIBRTE_PMD_PCAP y -setconf CONFIG_RTE_LIBRTE_VHOST_NUMA y -# Disable unstable driver(s) -setconf CONFIG_RTE_LIBRTE_BNX2X_PMD n - -%if %{with shared} -setconf CONFIG_RTE_BUILD_SHARED_LIB y -%endif - -# Disable kernel modules -setconf CONFIG_RTE_EAL_IGB_UIO n -setconf CONFIG_RTE_LIBRTE_KNI n -setconf CONFIG_RTE_KNI_KMOD n - -# Disable experimental and ABI-breaking code -setconf CONFIG_RTE_NEXT_ABI n - -# Disable some PMDs on fdProd -setconf CONFIG_RTE_LIBRTE_BNXT_PMD n -setconf CONFIG_RTE_LIBRTE_ENA_PMD n -setconf CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO n -setconf CONFIG_RTE_LIBRTE_QEDE_PMD n +cp -f %{SOURCE500} %{SOURCE502} "%{_sourcedir}/%{target}-config" . +%{SOURCE502} %{target}-config "%{target}/.config" make V=1 O=%{target} %{?_smp_mflags} @@ -197,18 +162,18 @@ unset RTE_SDK RTE_INCLUDE RTE_TARGET # Create a driver directory with symlinks to all pmds mkdir -p %{buildroot}/%{pmddir} -%if %{with shared} for f in %{buildroot}/%{_libdir}/*_pmd_*.so.*; do bn=$(basename ${f}) ln -s ../${bn} %{buildroot}%{pmddir}/${bn} done -%endif %if ! %{with tools} -rm -rf %{buildroot}%{sdkdir}/tools +rm -rf %{buildroot}%{sdkdir}/usertools rm -rf %{buildroot}%{_sbindir}/dpdk-devbind %endif -rm -f %{buildroot}%{sdkdir}/tools/setup.sh +rm -f %{buildroot}%{sdkdir}/usertools/dpdk-setup.sh +rm -f %{buildroot}%{_bindir}/dpdk-test-crypto-perf +rm -rf %{buildroot}%{_bindir}/dpdk-test-eventdev %if %{with examples} find %{target}/examples/ -name "*.map" | xargs rm -f @@ -248,10 +213,8 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi %{_bindir}/dpdk-procinfo %{_bindir}/dpdk-pdump %dir %{pmddir} -%if %{with shared} %{_libdir}/*.so.* %{pmddir}/*.so.* -%endif %files doc #BSD @@ -262,17 +225,13 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi %{incdir}/ %{sdkdir}/ %if %{with tools} -%exclude %{sdkdir}/tools/ +%exclude %{sdkdir}/usertools/ %endif %if %{with examples} %exclude %{sdkdir}/examples/ %endif %{_sysconfdir}/profile.d/dpdk-sdk-*.* -%if %{with shared} %{_libdir}/*.so -%else -%{_libdir}/*.a -%endif %if %{with examples} %files examples %exclude %{_bindir}/dpdk-procinfo @@ -284,12 +243,43 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi %if %{with tools} %files tools -%{sdkdir}/tools/ +%{sdkdir}/usertools/ %{_sbindir}/dpdk-devbind %{_bindir}/dpdk-pmdinfo %endif %changelog +* Wed Jan 31 2018 Kevin Traynor - 17.11-7 +- Backport to forbid IOVA mode if IOMMU address width too small (#1530957) + +* Wed Jan 31 2018 Aaron Conole - 17.11-6 +- Backport to protect active vhost_user rings (#1525446) + +* Tue Jan 09 2018 Timothy Redaelli - 17.11-5 +- Real backport of "net/virtio: fix vector Rx break caused by rxq flushing" + +* Thu Dec 14 2017 Timothy Redaelli - 17.11-4 +- Backport "net/virtio: fix vector Rx break caused by rxq flushing" + +* Wed Dec 06 2017 Timothy Redaelli - 17.11-3 +- Enable ENIC only for x86_64 + +* Wed Dec 06 2017 Timothy Redaelli - 17.11-2 +- Re-add main package dependency from dpdk-tools +- Add explicit python dependency to dpdk-tools + +* Tue Nov 28 2017 Timothy Redaelli - 17.11-1 +- Update to DPDK 17.11 (#1522700) +- Use a static configuration file +- Remove i686 from ExclusiveArch since it's not supported on RHEL7 +- Remove "--without shared" support + +* Fri Oct 13 2017 Josh Boyer - 16.11.2-6 +- Rebuild to pick up all arches + +* Fri Oct 13 2017 Timothy Redaelli - 16.11.2-5 +- Enable only supported PMDs (#1497384) + * Fri Jun 23 2017 John W. Linville - 16.11.2-4 - Backport "eal/ppc: fix mmap for memory initialization"