|
|
e336be |
From 371f6ca7f5cbad70f6e5fafc12d5448d7b6f0750 Mon Sep 17 00:00:00 2001
|
|
|
e336be |
From: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Date: Thu, 11 Oct 2018 11:26:16 +0300
|
|
|
e336be |
Subject: [PATCH 02/16] net: ena: minor performance improvement
|
|
|
e336be |
|
|
|
e336be |
Reduce fastpath overhead by making ena_com_tx_comp_req_id_get() inline.
|
|
|
e336be |
Also move it to ena_eth_com.h file with its dependency function
|
|
|
e336be |
ena_com_cq_inc_head().
|
|
|
e336be |
|
|
|
e336be |
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
|
e336be |
---
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_eth_com.c | 43 -----------------
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_eth_com.h | 46 ++++++++++++++++++-
|
|
|
e336be |
2 files changed, 44 insertions(+), 45 deletions(-)
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
|
e336be |
index 2b3ff0c20155..9c0511e9f9a2 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
|
e336be |
@@ -59,15 +59,6 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
|
|
|
e336be |
return cdesc;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
|
|
|
e336be |
-{
|
|
|
e336be |
- io_cq->head++;
|
|
|
e336be |
-
|
|
|
e336be |
- /* Switch phase bit in case of wrap around */
|
|
|
e336be |
- if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
|
|
|
e336be |
- io_cq->phase ^= 1;
|
|
|
e336be |
-}
|
|
|
e336be |
-
|
|
|
e336be |
static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
{
|
|
|
e336be |
u16 tail_masked;
|
|
|
e336be |
@@ -477,40 +468,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
return 0;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
|
|
|
e336be |
-{
|
|
|
e336be |
- u8 expected_phase, cdesc_phase;
|
|
|
e336be |
- struct ena_eth_io_tx_cdesc *cdesc;
|
|
|
e336be |
- u16 masked_head;
|
|
|
e336be |
-
|
|
|
e336be |
- masked_head = io_cq->head & (io_cq->q_depth - 1);
|
|
|
e336be |
- expected_phase = io_cq->phase;
|
|
|
e336be |
-
|
|
|
e336be |
- cdesc = (struct ena_eth_io_tx_cdesc *)
|
|
|
e336be |
- ((uintptr_t)io_cq->cdesc_addr.virt_addr +
|
|
|
e336be |
- (masked_head * io_cq->cdesc_entry_size_in_bytes));
|
|
|
e336be |
-
|
|
|
e336be |
- /* When the current completion descriptor phase isn't the same as the
|
|
|
e336be |
- * expected, it mean that the device still didn't update
|
|
|
e336be |
- * this completion.
|
|
|
e336be |
- */
|
|
|
e336be |
- cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
|
|
|
e336be |
- if (cdesc_phase != expected_phase)
|
|
|
e336be |
- return -EAGAIN;
|
|
|
e336be |
-
|
|
|
e336be |
- dma_rmb();
|
|
|
e336be |
- if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
|
|
|
e336be |
- pr_err("Invalid req id %d\n", cdesc->req_id);
|
|
|
e336be |
- return -EINVAL;
|
|
|
e336be |
- }
|
|
|
e336be |
-
|
|
|
e336be |
- ena_com_cq_inc_head(io_cq);
|
|
|
e336be |
-
|
|
|
e336be |
- *req_id = READ_ONCE(cdesc->req_id);
|
|
|
e336be |
-
|
|
|
e336be |
- return 0;
|
|
|
e336be |
-}
|
|
|
e336be |
-
|
|
|
e336be |
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
|
|
|
e336be |
{
|
|
|
e336be |
struct ena_eth_io_rx_cdesc_base *cdesc;
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
|
e336be |
index 2f7657227cfe..4930324e9d8d 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
|
e336be |
@@ -86,8 +86,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
struct ena_com_buf *ena_buf,
|
|
|
e336be |
u16 req_id);
|
|
|
e336be |
|
|
|
e336be |
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
|
|
|
e336be |
-
|
|
|
e336be |
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
|
|
|
e336be |
|
|
|
e336be |
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
|
|
|
e336be |
@@ -159,4 +157,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
|
|
|
e336be |
io_sq->next_to_comp += elem;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
|
|
|
e336be |
+{
|
|
|
e336be |
+ io_cq->head++;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* Switch phase bit in case of wrap around */
|
|
|
e336be |
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
|
|
|
e336be |
+ io_cq->phase ^= 1;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
|
|
|
e336be |
+ u16 *req_id)
|
|
|
e336be |
+{
|
|
|
e336be |
+ u8 expected_phase, cdesc_phase;
|
|
|
e336be |
+ struct ena_eth_io_tx_cdesc *cdesc;
|
|
|
e336be |
+ u16 masked_head;
|
|
|
e336be |
+
|
|
|
e336be |
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
|
|
|
e336be |
+ expected_phase = io_cq->phase;
|
|
|
e336be |
+
|
|
|
e336be |
+ cdesc = (struct ena_eth_io_tx_cdesc *)
|
|
|
e336be |
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
|
|
|
e336be |
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
|
|
|
e336be |
+
|
|
|
e336be |
+ /* When the current completion descriptor phase isn't the same as the
|
|
|
e336be |
+ * expected, it mean that the device still didn't update
|
|
|
e336be |
+ * this completion.
|
|
|
e336be |
+ */
|
|
|
e336be |
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
|
|
|
e336be |
+ if (cdesc_phase != expected_phase)
|
|
|
e336be |
+ return -EAGAIN;
|
|
|
e336be |
+
|
|
|
e336be |
+ dma_rmb();
|
|
|
e336be |
+
|
|
|
e336be |
+ *req_id = READ_ONCE(cdesc->req_id);
|
|
|
e336be |
+ if (unlikely(*req_id >= io_cq->q_depth)) {
|
|
|
e336be |
+ pr_err("Invalid req id %d\n", cdesc->req_id);
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ ena_com_cq_inc_head(io_cq);
|
|
|
e336be |
+
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
#endif /* ENA_ETH_COM_H_ */
|
|
|
e336be |
--
|
|
|
e336be |
2.19.1
|
|
|
e336be |
|
|
|
e336be |
From df44a6755f48dfc5c94d878e80807931460c3846 Mon Sep 17 00:00:00 2001
|
|
|
e336be |
From: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Date: Thu, 11 Oct 2018 11:26:17 +0300
|
|
|
e336be |
Subject: [PATCH 03/16] net: ena: complete host info to match latest ENA spec
|
|
|
e336be |
|
|
|
e336be |
Add new fields and definitions to host info and fill them
|
|
|
e336be |
according to the latest ENA spec version.
|
|
|
e336be |
|
|
|
e336be |
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
|
e336be |
---
|
|
|
e336be |
.../net/ethernet/amazon/ena/ena_admin_defs.h | 31 ++++++++++++++++++-
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_com.c | 12 +++----
|
|
|
e336be |
.../net/ethernet/amazon/ena/ena_common_defs.h | 4 +--
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_netdev.c | 10 ++++--
|
|
|
e336be |
4 files changed, 43 insertions(+), 14 deletions(-)
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
|
|
|
e336be |
index 4532e574ebcd..d735164efea3 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
|
|
|
e336be |
@@ -63,6 +63,8 @@ enum ena_admin_aq_completion_status {
|
|
|
e336be |
ENA_ADMIN_ILLEGAL_PARAMETER = 5,
|
|
|
e336be |
|
|
|
e336be |
ENA_ADMIN_UNKNOWN_ERROR = 6,
|
|
|
e336be |
+
|
|
|
e336be |
+ ENA_ADMIN_RESOURCE_BUSY = 7,
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
enum ena_admin_aq_feature_id {
|
|
|
e336be |
@@ -702,6 +704,10 @@ enum ena_admin_os_type {
|
|
|
e336be |
ENA_ADMIN_OS_FREEBSD = 4,
|
|
|
e336be |
|
|
|
e336be |
ENA_ADMIN_OS_IPXE = 5,
|
|
|
e336be |
+
|
|
|
e336be |
+ ENA_ADMIN_OS_ESXI = 6,
|
|
|
e336be |
+
|
|
|
e336be |
+ ENA_ADMIN_OS_GROUPS_NUM = 6,
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
struct ena_admin_host_info {
|
|
|
e336be |
@@ -723,11 +729,27 @@ struct ena_admin_host_info {
|
|
|
e336be |
/* 7:0 : major
|
|
|
e336be |
* 15:8 : minor
|
|
|
e336be |
* 23:16 : sub_minor
|
|
|
e336be |
+ * 31:24 : module_type
|
|
|
e336be |
*/
|
|
|
e336be |
u32 driver_version;
|
|
|
e336be |
|
|
|
e336be |
/* features bitmap */
|
|
|
e336be |
- u32 supported_network_features[4];
|
|
|
e336be |
+ u32 supported_network_features[2];
|
|
|
e336be |
+
|
|
|
e336be |
+ /* ENA spec version of driver */
|
|
|
e336be |
+ u16 ena_spec_version;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* ENA device's Bus, Device and Function
|
|
|
e336be |
+ * 2:0 : function
|
|
|
e336be |
+ * 7:3 : device
|
|
|
e336be |
+ * 15:8 : bus
|
|
|
e336be |
+ */
|
|
|
e336be |
+ u16 bdf;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* Number of CPUs */
|
|
|
e336be |
+ u16 num_cpus;
|
|
|
e336be |
+
|
|
|
e336be |
+ u16 reserved;
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
struct ena_admin_rss_ind_table_entry {
|
|
|
e336be |
@@ -1008,6 +1030,13 @@ struct ena_admin_ena_mmio_req_read_less_resp {
|
|
|
e336be |
#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
|
|
|
e336be |
#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
|
|
|
e336be |
#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
|
|
|
e336be |
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24
|
|
|
e336be |
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24)
|
|
|
e336be |
+#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
|
|
|
e336be |
+#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3
|
|
|
e336be |
+#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
|
|
|
e336be |
+#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
|
|
|
e336be |
+#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
|
|
|
e336be |
|
|
|
e336be |
/* aenq_common_desc */
|
|
|
e336be |
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
|
|
|
e336be |
index 7635c38e77dd..b6e6a4721931 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
|
|
|
e336be |
@@ -41,9 +41,6 @@
|
|
|
e336be |
#define ENA_ASYNC_QUEUE_DEPTH 16
|
|
|
e336be |
#define ENA_ADMIN_QUEUE_DEPTH 32
|
|
|
e336be |
|
|
|
e336be |
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
|
|
|
e336be |
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
|
|
|
e336be |
- | (ENA_COMMON_SPEC_VERSION_MINOR))
|
|
|
e336be |
|
|
|
e336be |
#define ENA_CTRL_MAJOR 0
|
|
|
e336be |
#define ENA_CTRL_MINOR 0
|
|
|
e336be |
@@ -1400,11 +1397,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
|
|
|
e336be |
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
|
|
|
e336be |
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
|
|
|
e336be |
|
|
|
e336be |
- if (ver < MIN_ENA_VER) {
|
|
|
e336be |
- pr_err("ENA version is lower than the minimal version the driver supports\n");
|
|
|
e336be |
- return -1;
|
|
|
e336be |
- }
|
|
|
e336be |
-
|
|
|
e336be |
pr_info("ena controller version: %d.%d.%d implementation version %d\n",
|
|
|
e336be |
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
|
|
|
e336be |
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
|
|
|
e336be |
@@ -2441,6 +2433,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
|
|
|
e336be |
if (unlikely(!host_attr->host_info))
|
|
|
e336be |
return -ENOMEM;
|
|
|
e336be |
|
|
|
e336be |
+ host_attr->host_info->ena_spec_version =
|
|
|
e336be |
+ ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
|
|
|
e336be |
+ (ENA_COMMON_SPEC_VERSION_MINOR));
|
|
|
e336be |
+
|
|
|
e336be |
return 0;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
|
|
|
e336be |
index bb8d73676eab..23beb7e7ed7b 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
|
|
|
e336be |
@@ -32,8 +32,8 @@
|
|
|
e336be |
#ifndef _ENA_COMMON_H_
|
|
|
e336be |
#define _ENA_COMMON_H_
|
|
|
e336be |
|
|
|
e336be |
-#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
|
|
|
e336be |
-#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
|
|
|
e336be |
+#define ENA_COMMON_SPEC_VERSION_MAJOR 2
|
|
|
e336be |
+#define ENA_COMMON_SPEC_VERSION_MINOR 0
|
|
|
e336be |
|
|
|
e336be |
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
|
|
|
e336be |
struct ena_common_mem_addr {
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
index 69a49784b204..0c9c0d3ce856 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
@@ -2206,7 +2206,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|
|
e336be |
return qid;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static void ena_config_host_info(struct ena_com_dev *ena_dev)
|
|
|
e336be |
+static void ena_config_host_info(struct ena_com_dev *ena_dev,
|
|
|
e336be |
+ struct pci_dev *pdev)
|
|
|
e336be |
{
|
|
|
e336be |
struct ena_admin_host_info *host_info;
|
|
|
e336be |
int rc;
|
|
|
e336be |
@@ -2220,6 +2221,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
|
|
|
e336be |
|
|
|
e336be |
host_info = ena_dev->host_attr.host_info;
|
|
|
e336be |
|
|
|
e336be |
+ host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
|
|
|
e336be |
host_info->os_type = ENA_ADMIN_OS_LINUX;
|
|
|
e336be |
host_info->kernel_ver = LINUX_VERSION_CODE;
|
|
Pablo Greco |
f2aff3 |
strlcpy(host_info->kernel_ver_str, utsname()->version,
|
|
|
e336be |
@@ -2230,7 +2232,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
|
|
|
e336be |
host_info->driver_version =
|
|
|
e336be |
(DRV_MODULE_VER_MAJOR) |
|
|
|
e336be |
(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
|
|
|
e336be |
- (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
|
|
|
e336be |
+ (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
|
|
|
e336be |
+ ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
|
|
|
e336be |
+ host_info->num_cpus = num_online_cpus();
|
|
|
e336be |
|
|
|
e336be |
rc = ena_com_set_host_attributes(ena_dev);
|
|
|
e336be |
if (rc) {
|
|
|
e336be |
@@ -2454,7 +2458,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
|
|
|
e336be |
*/
|
|
|
e336be |
ena_com_set_admin_polling_mode(ena_dev, true);
|
|
|
e336be |
|
|
|
e336be |
- ena_config_host_info(ena_dev);
|
|
|
e336be |
+ ena_config_host_info(ena_dev, pdev);
|
|
|
e336be |
|
|
|
e336be |
/* Get Device Attributes*/
|
|
|
e336be |
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
|
|
|
e336be |
--
|
|
|
e336be |
2.19.1
|
|
|
e336be |
|
|
|
e336be |
From 0a66e6d1fe86cb3d49fcd76057b4f7a50e0fe49a Mon Sep 17 00:00:00 2001
|
|
|
e336be |
From: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Date: Thu, 11 Oct 2018 11:26:18 +0300
|
|
|
e336be |
Subject: [PATCH 04/16] net: ena: introduce Low Latency Queues data structures
|
|
|
e336be |
according to ENA spec
|
|
|
e336be |
|
|
|
e336be |
Low Latency Queues(LLQ) allow usage of device's memory for descriptors
|
|
|
e336be |
and headers. Such queues decrease processing time since data is already
|
|
|
e336be |
located on the device when driver rings the doorbell.
|
|
|
e336be |
|
|
|
e336be |
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
|
e336be |
---
|
|
|
e336be |
.../net/ethernet/amazon/ena/ena_admin_defs.h | 90 ++++++++++++++++++-
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_com.h | 38 ++++++++
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 +-
|
|
|
e336be |
3 files changed, 128 insertions(+), 6 deletions(-)
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
|
|
|
e336be |
index d735164efea3..b439ec1b3edb 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
|
|
|
e336be |
@@ -74,6 +74,8 @@ enum ena_admin_aq_feature_id {
|
|
|
e336be |
|
|
|
e336be |
ENA_ADMIN_HW_HINTS = 3,
|
|
|
e336be |
|
|
|
e336be |
+ ENA_ADMIN_LLQ = 4,
|
|
|
e336be |
+
|
|
|
e336be |
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
|
|
|
e336be |
|
|
|
e336be |
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
|
|
|
e336be |
@@ -485,8 +487,85 @@ struct ena_admin_device_attr_feature_desc {
|
|
|
e336be |
u32 max_mtu;
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
+enum ena_admin_llq_header_location {
|
|
|
e336be |
+ /* header is in descriptor list */
|
|
|
e336be |
+ ENA_ADMIN_INLINE_HEADER = 1,
|
|
|
e336be |
+ /* header in a separate ring, implies 16B descriptor list entry */
|
|
|
e336be |
+ ENA_ADMIN_HEADER_RING = 2,
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
+enum ena_admin_llq_ring_entry_size {
|
|
|
e336be |
+ ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1,
|
|
|
e336be |
+ ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2,
|
|
|
e336be |
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4,
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
+enum ena_admin_llq_num_descs_before_header {
|
|
|
e336be |
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0,
|
|
|
e336be |
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1,
|
|
|
e336be |
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2,
|
|
|
e336be |
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4,
|
|
|
e336be |
+ ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8,
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
+/* packet descriptor list entry always starts with one or more descriptors,
|
|
|
e336be |
+ * followed by a header. The rest of the descriptors are located in the
|
|
|
e336be |
+ * beginning of the subsequent entry. Stride refers to how the rest of the
|
|
|
e336be |
+ * descriptors are placed. This field is relevant only for inline header
|
|
|
e336be |
+ * mode
|
|
|
e336be |
+ */
|
|
|
e336be |
+enum ena_admin_llq_stride_ctrl {
|
|
|
e336be |
+ ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1,
|
|
|
e336be |
+ ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
+struct ena_admin_feature_llq_desc {
|
|
|
e336be |
+ u32 max_llq_num;
|
|
|
e336be |
+
|
|
|
e336be |
+ u32 max_llq_depth;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* specify the header locations the device supports. bitfield of
|
|
|
e336be |
+ * enum ena_admin_llq_header_location.
|
|
|
e336be |
+ */
|
|
|
e336be |
+ u16 header_location_ctrl_supported;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* the header location the driver selected to use. */
|
|
|
e336be |
+ u16 header_location_ctrl_enabled;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* if inline header is specified - this is the size of descriptor
|
|
|
e336be |
+ * list entry. If header in a separate ring is specified - this is
|
|
|
e336be |
+ * the size of header ring entry. bitfield of enum
|
|
|
e336be |
+ * ena_admin_llq_ring_entry_size. specify the entry sizes the device
|
|
|
e336be |
+ * supports
|
|
|
e336be |
+ */
|
|
|
e336be |
+ u16 entry_size_ctrl_supported;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* the entry size the driver selected to use. */
|
|
|
e336be |
+ u16 entry_size_ctrl_enabled;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* valid only if inline header is specified. First entry associated
|
|
|
e336be |
+ * with the packet includes descriptors and header. Rest of the
|
|
|
e336be |
+ * entries occupied by descriptors. This parameter defines the max
|
|
|
e336be |
+ * number of descriptors precedding the header in the first entry.
|
|
|
e336be |
+ * The field is bitfield of enum
|
|
|
e336be |
+ * ena_admin_llq_num_descs_before_header and specify the values the
|
|
|
e336be |
+ * device supports
|
|
|
e336be |
+ */
|
|
|
e336be |
+ u16 desc_num_before_header_supported;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* the desire field the driver selected to use */
|
|
|
e336be |
+ u16 desc_num_before_header_enabled;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* valid only if inline was chosen. bitfield of enum
|
|
|
e336be |
+ * ena_admin_llq_stride_ctrl
|
|
|
e336be |
+ */
|
|
|
e336be |
+ u16 descriptors_stride_ctrl_supported;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* the stride control the driver selected to use */
|
|
|
e336be |
+ u16 descriptors_stride_ctrl_enabled;
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
struct ena_admin_queue_feature_desc {
|
|
|
e336be |
- /* including LLQs */
|
|
|
e336be |
u32 max_sq_num;
|
|
|
e336be |
|
|
|
e336be |
u32 max_sq_depth;
|
|
|
e336be |
@@ -495,9 +574,9 @@ struct ena_admin_queue_feature_desc {
|
|
|
e336be |
|
|
|
e336be |
u32 max_cq_depth;
|
|
|
e336be |
|
|
|
e336be |
- u32 max_llq_num;
|
|
|
e336be |
+ u32 max_legacy_llq_num;
|
|
|
e336be |
|
|
|
e336be |
- u32 max_llq_depth;
|
|
|
e336be |
+ u32 max_legacy_llq_depth;
|
|
|
e336be |
|
|
|
e336be |
u32 max_header_size;
|
|
|
e336be |
|
|
|
e336be |
@@ -822,6 +901,8 @@ struct ena_admin_get_feat_resp {
|
|
|
e336be |
|
|
|
e336be |
struct ena_admin_device_attr_feature_desc dev_attr;
|
|
|
e336be |
|
|
|
e336be |
+ struct ena_admin_feature_llq_desc llq;
|
|
|
e336be |
+
|
|
|
e336be |
struct ena_admin_queue_feature_desc max_queue;
|
|
|
e336be |
|
|
|
e336be |
struct ena_admin_feature_aenq_desc aenq;
|
|
|
e336be |
@@ -869,6 +950,9 @@ struct ena_admin_set_feat_cmd {
|
|
|
e336be |
|
|
|
e336be |
/* rss indirection table */
|
|
|
e336be |
struct ena_admin_feature_rss_ind_table ind_table;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* LLQ configuration */
|
|
|
e336be |
+ struct ena_admin_feature_llq_desc llq;
|
|
|
e336be |
} u;
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
|
|
|
e336be |
index 7b784f8a06a6..50e6c8f6f138 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
|
|
|
e336be |
@@ -108,6 +108,14 @@ enum ena_intr_moder_level {
|
|
|
e336be |
ENA_INTR_MAX_NUM_OF_LEVELS,
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
+struct ena_llq_configurations {
|
|
|
e336be |
+ enum ena_admin_llq_header_location llq_header_location;
|
|
|
e336be |
+ enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
|
|
|
e336be |
+ enum ena_admin_llq_stride_ctrl llq_stride_ctrl;
|
|
|
e336be |
+ enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
|
|
|
e336be |
+ u16 llq_ring_entry_size_value;
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
struct ena_intr_moder_entry {
|
|
|
e336be |
unsigned int intr_moder_interval;
|
|
|
e336be |
unsigned int pkts_per_interval;
|
|
|
e336be |
@@ -142,6 +150,15 @@ struct ena_com_tx_meta {
|
|
|
e336be |
u16 l4_hdr_len; /* In words */
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
+struct ena_com_llq_info {
|
|
|
e336be |
+ u16 header_location_ctrl;
|
|
|
e336be |
+ u16 desc_stride_ctrl;
|
|
|
e336be |
+ u16 desc_list_entry_size_ctrl;
|
|
|
e336be |
+ u16 desc_list_entry_size;
|
|
|
e336be |
+ u16 descs_num_before_header;
|
|
|
e336be |
+ u16 descs_per_entry;
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
struct ena_com_io_cq {
|
|
|
e336be |
struct ena_com_io_desc_addr cdesc_addr;
|
|
|
e336be |
|
|
|
e336be |
@@ -179,6 +196,20 @@ struct ena_com_io_cq {
|
|
|
e336be |
|
|
|
e336be |
} ____cacheline_aligned;
|
|
|
e336be |
|
|
|
e336be |
+struct ena_com_io_bounce_buffer_control {
|
|
|
e336be |
+ u8 *base_buffer;
|
|
|
e336be |
+ u16 next_to_use;
|
|
|
e336be |
+ u16 buffer_size;
|
|
|
e336be |
+ u16 buffers_num; /* Must be a power of 2 */
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
+/* This struct is to keep tracking the current location of the next llq entry */
|
|
|
e336be |
+struct ena_com_llq_pkt_ctrl {
|
|
|
e336be |
+ u8 *curr_bounce_buf;
|
|
|
e336be |
+ u16 idx;
|
|
|
e336be |
+ u16 descs_left_in_line;
|
|
|
e336be |
+};
|
|
|
e336be |
+
|
|
|
e336be |
struct ena_com_io_sq {
|
|
|
e336be |
struct ena_com_io_desc_addr desc_addr;
|
|
|
e336be |
|
|
|
e336be |
@@ -190,6 +221,9 @@ struct ena_com_io_sq {
|
|
|
e336be |
|
|
|
e336be |
u32 msix_vector;
|
|
|
e336be |
struct ena_com_tx_meta cached_tx_meta;
|
|
|
e336be |
+ struct ena_com_llq_info llq_info;
|
|
|
e336be |
+ struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
|
|
|
e336be |
+ struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
|
|
|
e336be |
|
|
|
e336be |
u16 q_depth;
|
|
|
e336be |
u16 qid;
|
|
|
e336be |
@@ -197,6 +231,7 @@ struct ena_com_io_sq {
|
|
|
e336be |
u16 idx;
|
|
|
e336be |
u16 tail;
|
|
|
e336be |
u16 next_to_comp;
|
|
|
e336be |
+ u16 llq_last_copy_tail;
|
|
|
e336be |
u32 tx_max_header_size;
|
|
|
e336be |
u8 phase;
|
|
|
e336be |
u8 desc_entry_size;
|
|
|
e336be |
@@ -334,6 +369,8 @@ struct ena_com_dev {
|
|
|
e336be |
u16 intr_delay_resolution;
|
|
|
e336be |
u32 intr_moder_tx_interval;
|
|
|
e336be |
struct ena_intr_moder_entry *intr_moder_tbl;
|
|
|
e336be |
+
|
|
|
e336be |
+ struct ena_com_llq_info llq_info;
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
struct ena_com_dev_get_features_ctx {
|
|
|
e336be |
@@ -342,6 +379,7 @@ struct ena_com_dev_get_features_ctx {
|
|
|
e336be |
struct ena_admin_feature_aenq_desc aenq;
|
|
|
e336be |
struct ena_admin_feature_offload_desc offload;
|
|
|
e336be |
struct ena_admin_ena_hw_hints hw_hints;
|
|
|
e336be |
+ struct ena_admin_feature_llq_desc llq;
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
struct ena_com_create_io_ctx {
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
index 0c9c0d3ce856..789556960b8e 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
@@ -2959,7 +2959,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
|
|
|
e336be |
|
|
|
e336be |
/* In case of LLQ use the llq number in the get feature cmd */
|
|
|
e336be |
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
|
|
e336be |
- io_sq_num = get_feat_ctx->max_queues.max_llq_num;
|
|
|
e336be |
+ io_sq_num = get_feat_ctx->max_queues.max_legacy_llq_num;
|
|
|
e336be |
|
|
|
e336be |
if (io_sq_num == 0) {
|
|
|
e336be |
dev_err(&pdev->dev,
|
|
|
e336be |
@@ -2995,7 +2995,7 @@ static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
|
|
|
e336be |
has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
|
|
|
e336be |
|
|
|
e336be |
/* Enable push mode if device supports LLQ */
|
|
|
e336be |
- if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
|
|
|
e336be |
+ if (has_mem_bar && get_feat_ctx->max_queues.max_legacy_llq_num > 0)
|
|
|
e336be |
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
|
|
|
e336be |
else
|
|
|
e336be |
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
|
|
e336be |
@@ -3131,7 +3131,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
|
|
|
e336be |
|
|
|
e336be |
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
|
|
e336be |
queue_size = min_t(u32, queue_size,
|
|
|
e336be |
- get_feat_ctx->max_queues.max_llq_depth);
|
|
|
e336be |
+ get_feat_ctx->max_queues.max_legacy_llq_depth);
|
|
|
e336be |
|
|
|
e336be |
queue_size = rounddown_pow_of_two(queue_size);
|
|
|
e336be |
|
|
|
e336be |
--
|
|
|
e336be |
2.19.1
|
|
|
e336be |
|
|
|
e336be |
From 8e9ebea20ab8db4f3a993e815e0b6b84ce98bbfb Mon Sep 17 00:00:00 2001
|
|
|
e336be |
From: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Date: Thu, 11 Oct 2018 11:26:19 +0300
|
|
|
e336be |
Subject: [PATCH 05/16] net: ena: add functions for handling Low Latency Queues
|
|
|
e336be |
in ena_com
|
|
|
e336be |
|
|
|
e336be |
This patch introduces APIs for detection, initialization, configuration
|
|
|
e336be |
and actual usage of low latency queues(LLQ). It extends transmit API with
|
|
|
e336be |
creation of LLQ descriptors in device memory (which include host buffers
|
|
|
e336be |
descriptors as well as packet header)
|
|
|
e336be |
|
|
|
e336be |
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
|
e336be |
---
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_com.c | 249 +++++++++++++++++-
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_com.h | 28 ++
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_eth_com.c | 231 ++++++++++++----
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_eth_com.h | 25 +-
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_netdev.c | 21 +-
|
|
|
e336be |
5 files changed, 474 insertions(+), 80 deletions(-)
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
|
|
|
e336be |
index b6e6a4721931..5220c7578d6b 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
|
|
|
e336be |
@@ -58,6 +58,8 @@
|
|
|
e336be |
|
|
|
e336be |
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
|
|
|
e336be |
|
|
|
e336be |
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
|
|
|
e336be |
+
|
|
|
e336be |
#define ENA_REGS_ADMIN_INTR_MASK 1
|
|
|
e336be |
|
|
|
e336be |
#define ENA_POLL_MS 5
|
|
|
e336be |
@@ -352,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
|
|
|
e336be |
&io_sq->desc_addr.phys_addr,
|
|
|
e336be |
GFP_KERNEL);
|
|
|
e336be |
}
|
|
|
e336be |
- } else {
|
|
|
e336be |
+
|
|
|
e336be |
+ if (!io_sq->desc_addr.virt_addr) {
|
|
|
e336be |
+ pr_err("memory allocation failed");
|
|
|
e336be |
+ return -ENOMEM;
|
|
|
e336be |
+ }
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
|
|
e336be |
+ /* Allocate bounce buffers */
|
|
|
e336be |
+ io_sq->bounce_buf_ctrl.buffer_size =
|
|
|
e336be |
+ ena_dev->llq_info.desc_list_entry_size;
|
|
|
e336be |
+ io_sq->bounce_buf_ctrl.buffers_num =
|
|
|
e336be |
+ ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
|
|
|
e336be |
+ io_sq->bounce_buf_ctrl.next_to_use = 0;
|
|
|
e336be |
+
|
|
|
e336be |
+ size = io_sq->bounce_buf_ctrl.buffer_size *
|
|
|
e336be |
+ io_sq->bounce_buf_ctrl.buffers_num;
|
|
|
e336be |
+
|
|
|
e336be |
dev_node = dev_to_node(ena_dev->dmadev);
|
|
|
e336be |
set_dev_node(ena_dev->dmadev, ctx->numa_node);
|
|
|
e336be |
- io_sq->desc_addr.virt_addr =
|
|
|
e336be |
+ io_sq->bounce_buf_ctrl.base_buffer =
|
|
|
e336be |
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
|
|
|
e336be |
set_dev_node(ena_dev->dmadev, dev_node);
|
|
|
e336be |
- if (!io_sq->desc_addr.virt_addr) {
|
|
|
e336be |
- io_sq->desc_addr.virt_addr =
|
|
|
e336be |
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
|
|
|
e336be |
+ io_sq->bounce_buf_ctrl.base_buffer =
|
|
|
e336be |
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
|
|
|
e336be |
+
|
|
|
e336be |
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
|
|
|
e336be |
+ pr_err("bounce buffer memory allocation failed");
|
|
|
e336be |
+ return -ENOMEM;
|
|
|
e336be |
}
|
|
|
e336be |
- }
|
|
|
e336be |
|
|
|
e336be |
- if (!io_sq->desc_addr.virt_addr) {
|
|
|
e336be |
- pr_err("memory allocation failed");
|
|
|
e336be |
- return -ENOMEM;
|
|
|
e336be |
+ memcpy(&io_sq->llq_info, &ena_dev->llq_info,
|
|
|
e336be |
+ sizeof(io_sq->llq_info));
|
|
|
e336be |
+
|
|
|
e336be |
+ /* Initiate the first bounce buffer */
|
|
|
e336be |
+ io_sq->llq_buf_ctrl.curr_bounce_buf =
|
|
|
e336be |
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
|
|
|
e336be |
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
|
|
e336be |
+ 0x0, io_sq->llq_info.desc_list_entry_size);
|
|
|
e336be |
+ io_sq->llq_buf_ctrl.descs_left_in_line =
|
|
|
e336be |
+ io_sq->llq_info.descs_num_before_header;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
io_sq->tail = 0;
|
|
|
e336be |
@@ -554,6 +583,156 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
|
|
|
e336be |
return ret;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
+/**
|
|
|
e336be |
+ * Set the LLQ configurations of the firmware
|
|
|
e336be |
+ *
|
|
|
e336be |
+ * The driver provides only the enabled feature values to the device,
|
|
|
e336be |
+ * which in turn, checks if they are supported.
|
|
|
e336be |
+ */
|
|
|
e336be |
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
|
|
|
e336be |
+{
|
|
|
e336be |
+ struct ena_com_admin_queue *admin_queue;
|
|
|
e336be |
+ struct ena_admin_set_feat_cmd cmd;
|
|
|
e336be |
+ struct ena_admin_set_feat_resp resp;
|
|
|
e336be |
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
|
|
|
e336be |
+ int ret;
|
|
|
e336be |
+
|
|
|
e336be |
+ memset(&cmd, 0x0, sizeof(cmd));
|
|
|
e336be |
+ admin_queue = &ena_dev->admin_queue;
|
|
|
e336be |
+
|
|
|
e336be |
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
|
|
|
e336be |
+ cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
|
|
|
e336be |
+
|
|
|
e336be |
+ cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
|
|
|
e336be |
+ cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
|
|
|
e336be |
+ cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
|
|
|
e336be |
+ cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
|
|
|
e336be |
+
|
|
|
e336be |
+ ret = ena_com_execute_admin_command(admin_queue,
|
|
|
e336be |
+ (struct ena_admin_aq_entry *)&cmd,
|
|
|
e336be |
+ sizeof(cmd),
|
|
|
e336be |
+ (struct ena_admin_acq_entry *)&resp,
|
|
|
e336be |
+ sizeof(resp));
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely(ret))
|
|
|
e336be |
+ pr_err("Failed to set LLQ configurations: %d\n", ret);
|
|
|
e336be |
+
|
|
|
e336be |
+ return ret;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
|
|
|
e336be |
+ struct ena_admin_feature_llq_desc *llq_features,
|
|
|
e336be |
+ struct ena_llq_configurations *llq_default_cfg)
|
|
|
e336be |
+{
|
|
|
e336be |
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
|
|
|
e336be |
+ u16 supported_feat;
|
|
|
e336be |
+ int rc;
|
|
|
e336be |
+
|
|
|
e336be |
+ memset(llq_info, 0, sizeof(*llq_info));
|
|
|
e336be |
+
|
|
|
e336be |
+ supported_feat = llq_features->header_location_ctrl_supported;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
|
|
|
e336be |
+ llq_info->header_location_ctrl =
|
|
|
e336be |
+ llq_default_cfg->llq_header_location;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ pr_err("Invalid header location control, supported: 0x%x\n",
|
|
|
e336be |
+ supported_feat);
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
|
|
|
e336be |
+ supported_feat = llq_features->descriptors_stride_ctrl_supported;
|
|
|
e336be |
+ if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
|
|
|
e336be |
+ llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
|
|
|
e336be |
+ llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
|
|
|
e336be |
+ } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
|
|
|
e336be |
+ llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
|
|
|
e336be |
+ supported_feat);
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
|
|
|
e336be |
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
|
|
|
e336be |
+ llq_info->desc_stride_ctrl);
|
|
|
e336be |
+ }
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ llq_info->desc_stride_ctrl = 0;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ supported_feat = llq_features->entry_size_ctrl_supported;
|
|
|
e336be |
+ if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
|
|
|
e336be |
+ llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
|
|
|
e336be |
+ llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
|
|
|
e336be |
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
|
|
|
e336be |
+ llq_info->desc_list_entry_size = 128;
|
|
|
e336be |
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
|
|
|
e336be |
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
|
|
|
e336be |
+ llq_info->desc_list_entry_size = 192;
|
|
|
e336be |
+ } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
|
|
|
e336be |
+ llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
|
|
|
e336be |
+ llq_info->desc_list_entry_size = 256;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
|
|
|
e336be |
+ supported_feat);
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
|
|
|
e336be |
+ llq_default_cfg->llq_ring_entry_size, supported_feat,
|
|
|
e336be |
+ llq_info->desc_list_entry_size);
|
|
|
e336be |
+ }
|
|
|
e336be |
+ if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
|
|
|
e336be |
+ /* The desc list entry size should be whole multiply of 8
|
|
|
e336be |
+ * This requirement comes from __iowrite64_copy()
|
|
|
e336be |
+ */
|
|
|
e336be |
+ pr_err("illegal entry size %d\n",
|
|
|
e336be |
+ llq_info->desc_list_entry_size);
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
|
|
|
e336be |
+ llq_info->descs_per_entry = llq_info->desc_list_entry_size /
|
|
|
e336be |
+ sizeof(struct ena_eth_io_tx_desc);
|
|
|
e336be |
+ else
|
|
|
e336be |
+ llq_info->descs_per_entry = 1;
|
|
|
e336be |
+
|
|
|
e336be |
+ supported_feat = llq_features->desc_num_before_header_supported;
|
|
|
e336be |
+ if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
|
|
|
e336be |
+ llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
|
|
|
e336be |
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
|
|
|
e336be |
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
|
|
|
e336be |
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
|
|
|
e336be |
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
|
|
|
e336be |
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
|
|
|
e336be |
+ } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
|
|
|
e336be |
+ llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
|
|
|
e336be |
+ supported_feat);
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
|
|
|
e336be |
+ llq_default_cfg->llq_num_decs_before_header,
|
|
|
e336be |
+ supported_feat, llq_info->descs_num_before_header);
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ rc = ena_com_set_llq(ena_dev);
|
|
|
e336be |
+ if (rc)
|
|
|
e336be |
+ pr_err("Cannot set LLQ configuration: %d\n", rc);
|
|
|
e336be |
+
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
|
|
|
e336be |
struct ena_com_admin_queue *admin_queue)
|
|
|
e336be |
{
|
|
|
e336be |
@@ -725,15 +904,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
|
|
|
e336be |
if (io_sq->desc_addr.virt_addr) {
|
|
|
e336be |
size = io_sq->desc_entry_size * io_sq->q_depth;
|
|
|
e336be |
|
|
|
e336be |
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
|
|
e336be |
- dma_free_coherent(ena_dev->dmadev, size,
|
|
|
e336be |
- io_sq->desc_addr.virt_addr,
|
|
|
e336be |
- io_sq->desc_addr.phys_addr);
|
|
|
e336be |
- else
|
|
|
e336be |
- devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
|
|
|
e336be |
+ dma_free_coherent(ena_dev->dmadev, size,
|
|
|
e336be |
+ io_sq->desc_addr.virt_addr,
|
|
|
e336be |
+ io_sq->desc_addr.phys_addr);
|
|
|
e336be |
|
|
|
e336be |
io_sq->desc_addr.virt_addr = NULL;
|
|
|
e336be |
}
|
|
|
e336be |
+
|
|
|
e336be |
+ if (io_sq->bounce_buf_ctrl.base_buffer) {
|
|
|
e336be |
+ devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
|
|
|
e336be |
+ io_sq->bounce_buf_ctrl.base_buffer = NULL;
|
|
|
e336be |
+ }
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
|
|
|
e336be |
@@ -1740,6 +1921,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
|
|
|
e336be |
else
|
|
|
e336be |
return rc;
|
|
|
e336be |
|
|
|
e336be |
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
|
|
|
e336be |
+ if (!rc)
|
|
|
e336be |
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
|
|
|
e336be |
+ sizeof(get_resp.u.llq));
|
|
|
e336be |
+ else if (rc == -EOPNOTSUPP)
|
|
|
e336be |
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
|
|
|
e336be |
+ else
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
+
|
|
|
e336be |
return 0;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
@@ -2708,3 +2898,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
|
|
|
e336be |
intr_moder_tbl[level].pkts_per_interval;
|
|
|
e336be |
entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
|
|
|
e336be |
}
|
|
|
e336be |
+
|
|
|
e336be |
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
|
|
|
e336be |
+ struct ena_admin_feature_llq_desc *llq_features,
|
|
|
e336be |
+ struct ena_llq_configurations *llq_default_cfg)
|
|
|
e336be |
+{
|
|
|
e336be |
+ int rc;
|
|
|
e336be |
+ int size;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (!llq_features->max_llq_num) {
|
|
|
e336be |
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
|
|
|
e336be |
+ if (rc)
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* Validate the descriptor is not too big */
|
|
|
e336be |
+ size = ena_dev->tx_max_header_size;
|
|
|
e336be |
+ size += ena_dev->llq_info.descs_num_before_header *
|
|
|
e336be |
+ sizeof(struct ena_eth_io_tx_desc);
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
|
|
|
e336be |
+ pr_err("the size of the LLQ entry is smaller than needed\n");
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
|
|
|
e336be |
+
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+}
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
|
|
|
e336be |
index 50e6c8f6f138..25af8d025919 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
|
|
|
e336be |
@@ -37,6 +37,7 @@
|
|
|
e336be |
#include <linux/delay.h>
|
|
|
e336be |
#include <linux/dma-mapping.h>
|
|
|
e336be |
#include <linux/gfp.h>
|
|
|
e336be |
+#include <linux/io.h>
|
|
|
e336be |
#include <linux/sched.h>
|
|
|
e336be |
#include <linux/sizes.h>
|
|
|
e336be |
#include <linux/spinlock.h>
|
|
|
e336be |
@@ -973,6 +974,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
|
|
|
e336be |
enum ena_intr_moder_level level,
|
|
|
e336be |
struct ena_intr_moder_entry *entry);
|
|
|
e336be |
|
|
|
e336be |
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
|
|
|
e336be |
+ * @ena_dev: ENA communication layer struct
|
|
|
e336be |
+ * @llq_features: LLQ feature descriptor, retrieve via
|
|
|
e336be |
+ * ena_com_get_dev_attr_feat.
|
|
|
e336be |
+ * @ena_llq_config: The default driver LLQ parameters configurations
|
|
|
e336be |
+ */
|
|
|
e336be |
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
|
|
|
e336be |
+ struct ena_admin_feature_llq_desc *llq_features,
|
|
|
e336be |
+ struct ena_llq_configurations *llq_default_config);
|
|
|
e336be |
+
|
|
|
e336be |
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
|
|
|
e336be |
{
|
|
|
e336be |
return ena_dev->adaptive_coalescing;
|
|
|
e336be |
@@ -1082,4 +1093,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
|
|
|
e336be |
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
|
|
|
e336be |
+{
|
|
|
e336be |
+ u16 size, buffers_num;
|
|
|
e336be |
+ u8 *buf;
|
|
|
e336be |
+
|
|
|
e336be |
+ size = bounce_buf_ctrl->buffer_size;
|
|
|
e336be |
+ buffers_num = bounce_buf_ctrl->buffers_num;
|
|
|
e336be |
+
|
|
|
e336be |
+ buf = bounce_buf_ctrl->base_buffer +
|
|
|
e336be |
+ (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
|
|
|
e336be |
+
|
|
|
e336be |
+ prefetchw(bounce_buf_ctrl->base_buffer +
|
|
|
e336be |
+ (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
|
|
|
e336be |
+
|
|
|
e336be |
+ return buf;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
#endif /* !(ENA_COM) */
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
|
e336be |
index 9c0511e9f9a2..17107ca107e3 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
|
|
|
e336be |
@@ -59,7 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
|
|
|
e336be |
return cdesc;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
{
|
|
|
e336be |
u16 tail_masked;
|
|
|
e336be |
u32 offset;
|
|
|
e336be |
@@ -71,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
+ u8 *bounce_buffer)
|
|
|
e336be |
{
|
|
|
e336be |
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
|
|
|
e336be |
- u32 offset = tail_masked * io_sq->desc_entry_size;
|
|
|
e336be |
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
|
|
e336be |
|
|
|
e336be |
- /* In case this queue isn't a LLQ */
|
|
|
e336be |
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
|
|
e336be |
- return;
|
|
|
e336be |
+ u16 dst_tail_mask;
|
|
|
e336be |
+ u32 dst_offset;
|
|
|
e336be |
|
|
|
e336be |
- memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
|
|
|
e336be |
- io_sq->desc_addr.virt_addr + offset,
|
|
|
e336be |
- io_sq->desc_entry_size);
|
|
|
e336be |
-}
|
|
|
e336be |
+ dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
|
|
|
e336be |
+ dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* Make sure everything was written into the bounce buffer before
|
|
|
e336be |
+ * writing the bounce buffer to the device
|
|
|
e336be |
+ */
|
|
|
e336be |
+ wmb();
|
|
|
e336be |
+
|
|
|
e336be |
+ /* The line is completed. Copy it to dev */
|
|
|
e336be |
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
|
|
|
e336be |
+ bounce_buffer, (llq_info->desc_list_entry_size) / 8);
|
|
|
e336be |
|
|
|
e336be |
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
-{
|
|
|
e336be |
io_sq->tail++;
|
|
|
e336be |
|
|
|
e336be |
/* Switch phase bit in case of wrap around */
|
|
|
e336be |
if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
|
|
|
e336be |
io_sq->phase ^= 1;
|
|
|
e336be |
+
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
- u8 *head_src, u16 header_len)
|
|
|
e336be |
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
+ u8 *header_src,
|
|
|
e336be |
+ u16 header_len)
|
|
|
e336be |
{
|
|
|
e336be |
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
|
|
|
e336be |
- u8 __iomem *dev_head_addr =
|
|
|
e336be |
- io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
|
|
|
e336be |
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
|
|
e336be |
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
|
|
e336be |
+ u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
|
|
|
e336be |
+ u16 header_offset;
|
|
|
e336be |
|
|
|
e336be |
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
|
|
e336be |
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
|
|
|
e336be |
return 0;
|
|
|
e336be |
|
|
|
e336be |
- if (unlikely(!io_sq->header_addr)) {
|
|
|
e336be |
- pr_err("Push buffer header ptr is NULL\n");
|
|
|
e336be |
- return -EINVAL;
|
|
|
e336be |
+ header_offset =
|
|
|
e336be |
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely((header_offset + header_len) >
|
|
|
e336be |
+ llq_info->desc_list_entry_size)) {
|
|
|
e336be |
+ pr_err("trying to write header larger than llq entry can accommodate\n");
|
|
|
e336be |
+ return -EFAULT;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely(!bounce_buffer)) {
|
|
|
e336be |
+ pr_err("bounce buffer is NULL\n");
|
|
|
e336be |
+ return -EFAULT;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ memcpy(bounce_buffer + header_offset, header_src, header_len);
|
|
|
e336be |
+
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+{
|
|
|
e336be |
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
|
|
e336be |
+ u8 *bounce_buffer;
|
|
|
e336be |
+ void *sq_desc;
|
|
|
e336be |
+
|
|
|
e336be |
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely(!bounce_buffer)) {
|
|
|
e336be |
+ pr_err("bounce buffer is NULL\n");
|
|
|
e336be |
+ return NULL;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
|
|
|
e336be |
+ pkt_ctrl->idx++;
|
|
|
e336be |
+ pkt_ctrl->descs_left_in_line--;
|
|
|
e336be |
+
|
|
|
e336be |
+ return sq_desc;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+{
|
|
|
e336be |
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
|
|
e336be |
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
|
|
e336be |
+ int rc;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* bounce buffer was used, so write it and get a new one */
|
|
|
e336be |
+ if (pkt_ctrl->idx) {
|
|
|
e336be |
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
|
|
|
e336be |
+ pkt_ctrl->curr_bounce_buf);
|
|
|
e336be |
+ if (unlikely(rc))
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
+
|
|
|
e336be |
+ pkt_ctrl->curr_bounce_buf =
|
|
|
e336be |
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
|
|
|
e336be |
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
|
|
e336be |
+ 0x0, llq_info->desc_list_entry_size);
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ pkt_ctrl->idx = 0;
|
|
|
e336be |
+ pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+{
|
|
|
e336be |
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
|
|
e336be |
+ return get_sq_desc_llq(io_sq);
|
|
|
e336be |
+
|
|
|
e336be |
+ return get_sq_desc_regular_queue(io_sq);
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+{
|
|
|
e336be |
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
|
|
|
e336be |
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
|
|
|
e336be |
+ int rc;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (!pkt_ctrl->descs_left_in_line) {
|
|
|
e336be |
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
|
|
|
e336be |
+ pkt_ctrl->curr_bounce_buf);
|
|
|
e336be |
+ if (unlikely(rc))
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
+
|
|
|
e336be |
+ pkt_ctrl->curr_bounce_buf =
|
|
|
e336be |
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
|
|
|
e336be |
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
|
|
e336be |
+ 0x0, llq_info->desc_list_entry_size);
|
|
|
e336be |
+
|
|
|
e336be |
+ pkt_ctrl->idx = 0;
|
|
|
e336be |
+ if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
|
|
|
e336be |
+ pkt_ctrl->descs_left_in_line = 1;
|
|
|
e336be |
+ else
|
|
|
e336be |
+ pkt_ctrl->descs_left_in_line =
|
|
|
e336be |
+ llq_info->desc_list_entry_size / io_sq->desc_entry_size;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
- memcpy_toio(dev_head_addr, head_src, header_len);
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+{
|
|
|
e336be |
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
|
|
|
e336be |
+ return ena_com_sq_update_llq_tail(io_sq);
|
|
|
e336be |
+
|
|
|
e336be |
+ io_sq->tail++;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* Switch phase bit in case of wrap around */
|
|
|
e336be |
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
|
|
|
e336be |
+ io_sq->phase ^= 1;
|
|
|
e336be |
|
|
|
e336be |
return 0;
|
|
|
e336be |
}
|
|
|
e336be |
@@ -177,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
return false;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
- struct ena_com_tx_ctx *ena_tx_ctx)
|
|
|
e336be |
+static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
+ struct ena_com_tx_ctx *ena_tx_ctx)
|
|
|
e336be |
{
|
|
|
e336be |
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
|
|
|
e336be |
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
|
|
|
e336be |
@@ -223,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
|
|
|
e336be |
memcpy(&io_sq->cached_tx_meta, ena_meta,
|
|
|
e336be |
sizeof(struct ena_com_tx_meta));
|
|
|
e336be |
|
|
|
e336be |
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
|
|
|
e336be |
- ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
+ return ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
|
|
|
e336be |
@@ -262,18 +375,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
{
|
|
|
e336be |
struct ena_eth_io_tx_desc *desc = NULL;
|
|
|
e336be |
struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
|
|
|
e336be |
- void *push_header = ena_tx_ctx->push_header;
|
|
|
e336be |
+ void *buffer_to_push = ena_tx_ctx->push_header;
|
|
|
e336be |
u16 header_len = ena_tx_ctx->header_len;
|
|
|
e336be |
u16 num_bufs = ena_tx_ctx->num_bufs;
|
|
|
e336be |
- int total_desc, i, rc;
|
|
|
e336be |
+ u16 start_tail = io_sq->tail;
|
|
|
e336be |
+ int i, rc;
|
|
|
e336be |
bool have_meta;
|
|
|
e336be |
u64 addr_hi;
|
|
|
e336be |
|
|
|
e336be |
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
|
|
|
e336be |
|
|
|
e336be |
/* num_bufs +1 for potential meta desc */
|
|
|
e336be |
- if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
|
|
|
e336be |
- pr_err("Not enough space in the tx queue\n");
|
|
|
e336be |
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
|
|
|
e336be |
+ pr_debug("Not enough space in the tx queue\n");
|
|
|
e336be |
return -ENOMEM;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
@@ -283,23 +397,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
return -EINVAL;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
- /* start with pushing the header (if needed) */
|
|
|
e336be |
- rc = ena_com_write_header(io_sq, push_header, header_len);
|
|
|
e336be |
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
|
|
|
e336be |
+ !buffer_to_push))
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+
|
|
|
e336be |
+ rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
|
|
|
e336be |
if (unlikely(rc))
|
|
|
e336be |
return rc;
|
|
|
e336be |
|
|
|
e336be |
have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
|
|
|
e336be |
ena_tx_ctx);
|
|
|
e336be |
- if (have_meta)
|
|
|
e336be |
- ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
|
|
|
e336be |
+ if (have_meta) {
|
|
|
e336be |
+ rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
|
|
|
e336be |
+ if (unlikely(rc))
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
+ }
|
|
|
e336be |
|
|
|
e336be |
- /* If the caller doesn't want send packets */
|
|
|
e336be |
+ /* If the caller doesn't want to send packets */
|
|
|
e336be |
if (unlikely(!num_bufs && !header_len)) {
|
|
|
e336be |
- *nb_hw_desc = have_meta ? 0 : 1;
|
|
|
e336be |
- return 0;
|
|
|
e336be |
+ rc = ena_com_close_bounce_buffer(io_sq);
|
|
|
e336be |
+ *nb_hw_desc = io_sq->tail - start_tail;
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
desc = get_sq_desc(io_sq);
|
|
|
e336be |
+ if (unlikely(!desc))
|
|
|
e336be |
+ return -EFAULT;
|
|
|
e336be |
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
|
|
|
e336be |
|
|
|
e336be |
/* Set first desc when we don't have meta descriptor */
|
|
|
e336be |
@@ -351,10 +474,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
for (i = 0; i < num_bufs; i++) {
|
|
|
e336be |
/* The first desc share the same desc as the header */
|
|
|
e336be |
if (likely(i != 0)) {
|
|
|
e336be |
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
|
|
|
e336be |
- ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
+ rc = ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
+ if (unlikely(rc))
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
|
|
|
e336be |
desc = get_sq_desc(io_sq);
|
|
|
e336be |
+ if (unlikely(!desc))
|
|
|
e336be |
+ return -EFAULT;
|
|
|
e336be |
+
|
|
|
e336be |
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
|
|
|
e336be |
|
|
|
e336be |
desc->len_ctrl |= (io_sq->phase <<
|
|
|
e336be |
@@ -377,15 +504,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
/* set the last desc indicator */
|
|
|
e336be |
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
|
|
|
e336be |
|
|
|
e336be |
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
|
|
|
e336be |
-
|
|
|
e336be |
- ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
+ rc = ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
+ if (unlikely(rc))
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
|
|
|
e336be |
- total_desc = max_t(u16, num_bufs, 1);
|
|
|
e336be |
- total_desc += have_meta ? 1 : 0;
|
|
|
e336be |
+ rc = ena_com_close_bounce_buffer(io_sq);
|
|
|
e336be |
|
|
|
e336be |
- *nb_hw_desc = total_desc;
|
|
|
e336be |
- return 0;
|
|
|
e336be |
+ *nb_hw_desc = io_sq->tail - start_tail;
|
|
|
e336be |
+ return rc;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
|
|
|
e336be |
@@ -444,15 +570,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
|
|
|
e336be |
WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
|
|
|
e336be |
|
|
|
e336be |
- if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
|
|
|
e336be |
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
|
|
|
e336be |
return -ENOSPC;
|
|
|
e336be |
|
|
|
e336be |
desc = get_sq_desc(io_sq);
|
|
|
e336be |
+ if (unlikely(!desc))
|
|
|
e336be |
+ return -EFAULT;
|
|
|
e336be |
+
|
|
|
e336be |
memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
|
|
|
e336be |
|
|
|
e336be |
desc->length = ena_buf->len;
|
|
|
e336be |
|
|
|
e336be |
- desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
|
|
|
e336be |
+ desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
|
|
|
e336be |
desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
|
|
|
e336be |
desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
|
|
|
e336be |
desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
|
|
|
e336be |
@@ -463,9 +592,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
desc->buff_addr_hi =
|
|
|
e336be |
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
|
|
|
e336be |
|
|
|
e336be |
- ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
-
|
|
|
e336be |
- return 0;
|
|
|
e336be |
+ return ena_com_sq_update_tail(io_sq);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
|
e336be |
index 4930324e9d8d..bcc84072367d 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
|
|
|
e336be |
@@ -94,7 +94,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
|
|
|
e336be |
writel(intr_reg->intr_control, io_cq->unmask_reg);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
{
|
|
|
e336be |
u16 tail, next_to_comp, cnt;
|
|
|
e336be |
|
|
|
e336be |
@@ -105,11 +105,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
return io_sq->q_depth - 1 - cnt;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+/* Check if the submission queue has enough space to hold required_buffers */
|
|
|
e336be |
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
|
|
|
e336be |
+ u16 required_buffers)
|
|
|
e336be |
{
|
|
|
e336be |
- u16 tail;
|
|
|
e336be |
+ int temp;
|
|
|
e336be |
|
|
|
e336be |
- tail = io_sq->tail;
|
|
|
e336be |
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
|
|
e336be |
+ return ena_com_free_desc(io_sq) >= required_buffers;
|
|
|
e336be |
+
|
|
|
e336be |
+ /* This calculation doesn't need to be 100% accurate. So to reduce
|
|
|
e336be |
+ * the calculation overhead just Subtract 2 lines from the free descs
|
|
|
e336be |
+ * (one for the header line and one to compensate the devision
|
|
|
e336be |
+ * down calculation.
|
|
|
e336be |
+ */
|
|
|
e336be |
+ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
|
|
|
e336be |
+
|
|
|
e336be |
+ return ena_com_free_desc(io_sq) > temp;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
|
|
|
e336be |
+{
|
|
|
e336be |
+ u16 tail = io_sq->tail;
|
|
|
e336be |
|
|
|
e336be |
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
|
|
|
e336be |
io_sq->qid, tail);
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
index 789556960b8e..e732bd2ddd32 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
@@ -804,12 +804,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
|
|
|
e336be |
*/
|
|
|
e336be |
smp_mb();
|
|
|
e336be |
|
|
|
e336be |
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
|
|
|
e336be |
- ENA_TX_WAKEUP_THRESH;
|
|
|
e336be |
+ above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
|
|
|
e336be |
+ ENA_TX_WAKEUP_THRESH);
|
|
|
e336be |
if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
|
|
|
e336be |
__netif_tx_lock(txq, smp_processor_id());
|
|
|
e336be |
- above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
|
|
|
e336be |
- ENA_TX_WAKEUP_THRESH;
|
|
|
e336be |
+ above_thresh =
|
|
|
e336be |
+ ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
|
|
|
e336be |
+ ENA_TX_WAKEUP_THRESH);
|
|
|
e336be |
if (netif_tx_queue_stopped(txq) && above_thresh) {
|
|
|
e336be |
netif_tx_wake_queue(txq);
|
|
|
e336be |
u64_stats_update_begin(&tx_ring->syncp);
|
|
|
e336be |
@@ -1101,7 +1102,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
|
|
|
e336be |
|
|
|
e336be |
rx_ring->next_to_clean = next_to_clean;
|
|
|
e336be |
|
|
|
e336be |
- refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
|
|
|
e336be |
+ refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
|
|
|
e336be |
refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
|
|
|
e336be |
|
|
|
e336be |
/* Optimization, try to batch new rx buffers */
|
|
|
e336be |
@@ -2115,8 +2116,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
e336be |
* to sgl_size + 2. one for the meta descriptor and one for header
|
|
|
e336be |
* (if the header is larger than tx_max_header_size).
|
|
|
e336be |
*/
|
|
|
e336be |
- if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
|
|
|
e336be |
- (tx_ring->sgl_size + 2))) {
|
|
|
e336be |
+ if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
|
|
|
e336be |
+ tx_ring->sgl_size + 2))) {
|
|
|
e336be |
netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
|
|
|
e336be |
__func__, qid);
|
|
|
e336be |
|
|
|
e336be |
@@ -2135,8 +2136,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
e336be |
*/
|
|
|
e336be |
smp_mb();
|
|
|
e336be |
|
|
|
e336be |
- if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
|
|
|
e336be |
- > ENA_TX_WAKEUP_THRESH) {
|
|
|
e336be |
+ if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
|
|
|
e336be |
+ ENA_TX_WAKEUP_THRESH)) {
|
|
|
e336be |
netif_tx_wake_queue(txq);
|
|
|
e336be |
u64_stats_update_begin(&tx_ring->syncp);
|
|
|
e336be |
tx_ring->tx_stats.queue_wakeup++;
|
|
|
e336be |
@@ -2813,7 +2814,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
|
|
|
e336be |
rx_ring = &adapter->rx_ring[i];
|
|
|
e336be |
|
|
|
e336be |
refill_required =
|
|
|
e336be |
- ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
|
|
|
e336be |
+ ena_com_free_desc(rx_ring->ena_com_io_sq);
|
|
|
e336be |
if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
|
|
|
e336be |
rx_ring->empty_rx_queue++;
|
|
|
e336be |
|
|
|
e336be |
--
|
|
|
e336be |
2.19.1
|
|
|
e336be |
|
|
|
e336be |
From e4729991ed2e7e26e4b061369d7dee054ca4710f Mon Sep 17 00:00:00 2001
|
|
|
e336be |
From: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Date: Thu, 11 Oct 2018 11:26:20 +0300
|
|
|
e336be |
Subject: [PATCH 06/16] net: ena: add functions for handling Low Latency Queues
|
|
|
e336be |
in ena_netdev
|
|
|
e336be |
|
|
|
e336be |
This patch includes all code changes necessary in ena_netdev to enable
|
|
|
e336be |
packet sending via the LLQ placemnt mode.
|
|
|
e336be |
|
|
|
e336be |
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
|
|
|
e336be |
Signed-off-by: David S. Miller <davem@davemloft.net>
|
|
|
e336be |
---
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 +
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_netdev.c | 387 +++++++++++-------
|
|
|
e336be |
drivers/net/ethernet/amazon/ena/ena_netdev.h | 6 +
|
|
|
e336be |
3 files changed, 251 insertions(+), 143 deletions(-)
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
|
|
|
e336be |
index 521607bc4393..fd28bd0d1c1e 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
|
|
|
e336be |
@@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
|
|
|
e336be |
ENA_STAT_TX_ENTRY(doorbells),
|
|
|
e336be |
ENA_STAT_TX_ENTRY(prepare_ctx_err),
|
|
|
e336be |
ENA_STAT_TX_ENTRY(bad_req_id),
|
|
|
e336be |
+ ENA_STAT_TX_ENTRY(llq_buffer_copy),
|
|
|
e336be |
ENA_STAT_TX_ENTRY(missed_tx),
|
|
|
e336be |
};
|
|
|
e336be |
|
|
|
e336be |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
index e732bd2ddd32..fcdfaf0ab8a7 100644
|
|
|
e336be |
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
|
|
|
e336be |
@@ -237,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
|
|
|
e336be |
}
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
+ size = tx_ring->tx_max_header_size;
|
|
|
e336be |
+ tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
|
|
|
e336be |
+ if (!tx_ring->push_buf_intermediate_buf) {
|
|
|
e336be |
+ tx_ring->push_buf_intermediate_buf = vzalloc(size);
|
|
|
e336be |
+ if (!tx_ring->push_buf_intermediate_buf) {
|
|
|
e336be |
+ vfree(tx_ring->tx_buffer_info);
|
|
|
e336be |
+ vfree(tx_ring->free_tx_ids);
|
|
|
e336be |
+ return -ENOMEM;
|
|
|
e336be |
+ }
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
/* Req id ring for TX out of order completions */
|
|
|
e336be |
for (i = 0; i < tx_ring->ring_size; i++)
|
|
|
e336be |
tx_ring->free_tx_ids[i] = i;
|
|
|
e336be |
@@ -265,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
|
|
|
e336be |
|
|
|
e336be |
vfree(tx_ring->free_tx_ids);
|
|
|
e336be |
tx_ring->free_tx_ids = NULL;
|
|
|
e336be |
+
|
|
|
e336be |
+ vfree(tx_ring->push_buf_intermediate_buf);
|
|
|
e336be |
+ tx_ring->push_buf_intermediate_buf = NULL;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
|
|
|
e336be |
@@ -602,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
|
|
|
e336be |
ena_free_rx_bufs(adapter, i);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
+static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
|
|
|
e336be |
+ struct ena_tx_buffer *tx_info)
|
|
|
e336be |
+{
|
|
|
e336be |
+ struct ena_com_buf *ena_buf;
|
|
|
e336be |
+ u32 cnt;
|
|
|
e336be |
+ int i;
|
|
|
e336be |
+
|
|
|
e336be |
+ ena_buf = tx_info->bufs;
|
|
|
e336be |
+ cnt = tx_info->num_of_bufs;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely(!cnt))
|
|
|
e336be |
+ return;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (tx_info->map_linear_data) {
|
|
|
e336be |
+ dma_unmap_single(tx_ring->dev,
|
|
|
e336be |
+ dma_unmap_addr(ena_buf, paddr),
|
|
|
e336be |
+ dma_unmap_len(ena_buf, len),
|
|
|
e336be |
+ DMA_TO_DEVICE);
|
|
|
e336be |
+ ena_buf++;
|
|
|
e336be |
+ cnt--;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ /* unmap remaining mapped pages */
|
|
|
e336be |
+ for (i = 0; i < cnt; i++) {
|
|
|
e336be |
+ dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
|
|
|
e336be |
+ dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
|
|
|
e336be |
+ ena_buf++;
|
|
|
e336be |
+ }
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
/* ena_free_tx_bufs - Free Tx Buffers per Queue
|
|
|
e336be |
* @tx_ring: TX ring for which buffers be freed
|
|
|
e336be |
*/
|
|
|
e336be |
@@ -612,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
|
|
|
e336be |
|
|
|
e336be |
for (i = 0; i < tx_ring->ring_size; i++) {
|
|
|
e336be |
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
|
|
|
e336be |
- struct ena_com_buf *ena_buf;
|
|
|
e336be |
- int nr_frags;
|
|
|
e336be |
- int j;
|
|
|
e336be |
|
|
|
e336be |
if (!tx_info->skb)
|
|
|
e336be |
continue;
|
|
|
e336be |
@@ -630,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
|
|
|
e336be |
tx_ring->qid, i);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
- ena_buf = tx_info->bufs;
|
|
|
e336be |
- dma_unmap_single(tx_ring->dev,
|
|
|
e336be |
- ena_buf->paddr,
|
|
|
e336be |
- ena_buf->len,
|
|
|
e336be |
- DMA_TO_DEVICE);
|
|
|
e336be |
-
|
|
|
e336be |
- /* unmap remaining mapped pages */
|
|
|
e336be |
- nr_frags = tx_info->num_of_bufs - 1;
|
|
|
e336be |
- for (j = 0; j < nr_frags; j++) {
|
|
|
e336be |
- ena_buf++;
|
|
|
e336be |
- dma_unmap_page(tx_ring->dev,
|
|
|
e336be |
- ena_buf->paddr,
|
|
|
e336be |
- ena_buf->len,
|
|
|
e336be |
- DMA_TO_DEVICE);
|
|
|
e336be |
- }
|
|
|
e336be |
+ ena_unmap_tx_skb(tx_ring, tx_info);
|
|
|
e336be |
|
|
|
e336be |
dev_kfree_skb_any(tx_info->skb);
|
|
|
e336be |
}
|
|
|
e336be |
@@ -735,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
|
|
|
e336be |
while (tx_pkts < budget) {
|
|
|
e336be |
struct ena_tx_buffer *tx_info;
|
|
|
e336be |
struct sk_buff *skb;
|
|
|
e336be |
- struct ena_com_buf *ena_buf;
|
|
|
e336be |
- int i, nr_frags;
|
|
|
e336be |
|
|
|
e336be |
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
|
|
|
e336be |
&req_id);
|
|
|
e336be |
@@ -756,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
|
|
|
e336be |
tx_info->skb = NULL;
|
|
|
e336be |
tx_info->last_jiffies = 0;
|
|
|
e336be |
|
|
|
e336be |
- if (likely(tx_info->num_of_bufs != 0)) {
|
|
|
e336be |
- ena_buf = tx_info->bufs;
|
|
|
e336be |
-
|
|
|
e336be |
- dma_unmap_single(tx_ring->dev,
|
|
|
e336be |
- dma_unmap_addr(ena_buf, paddr),
|
|
|
e336be |
- dma_unmap_len(ena_buf, len),
|
|
|
e336be |
- DMA_TO_DEVICE);
|
|
|
e336be |
-
|
|
|
e336be |
- /* unmap remaining mapped pages */
|
|
|
e336be |
- nr_frags = tx_info->num_of_bufs - 1;
|
|
|
e336be |
- for (i = 0; i < nr_frags; i++) {
|
|
|
e336be |
- ena_buf++;
|
|
|
e336be |
- dma_unmap_page(tx_ring->dev,
|
|
|
e336be |
- dma_unmap_addr(ena_buf, paddr),
|
|
|
e336be |
- dma_unmap_len(ena_buf, len),
|
|
|
e336be |
- DMA_TO_DEVICE);
|
|
|
e336be |
- }
|
|
|
e336be |
- }
|
|
|
e336be |
+ ena_unmap_tx_skb(tx_ring, tx_info);
|
|
|
e336be |
|
|
|
e336be |
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
|
|
|
e336be |
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
|
|
|
e336be |
@@ -1300,7 +1308,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
|
|
|
e336be |
|
|
|
e336be |
/* Reserved the max msix vectors we might need */
|
|
|
e336be |
msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
|
|
|
e336be |
-
|
|
|
e336be |
netif_dbg(adapter, probe, adapter->netdev,
|
|
|
e336be |
"trying to enable MSI-X, vectors %d\n", msix_vecs);
|
|
|
e336be |
|
|
|
e336be |
@@ -1591,7 +1598,7 @@ static int ena_up_complete(struct ena_adapter *adapter)
|
|
|
e336be |
|
|
|
e336be |
static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
|
|
|
e336be |
{
|
|
|
e336be |
- struct ena_com_create_io_ctx ctx = { 0 };
|
|
|
e336be |
+ struct ena_com_create_io_ctx ctx;
|
|
|
e336be |
struct ena_com_dev *ena_dev;
|
|
|
e336be |
struct ena_ring *tx_ring;
|
|
|
e336be |
u32 msix_vector;
|
|
|
e336be |
@@ -1604,6 +1611,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
|
|
|
e336be |
msix_vector = ENA_IO_IRQ_IDX(qid);
|
|
|
e336be |
ena_qid = ENA_IO_TXQ_IDX(qid);
|
|
|
e336be |
|
|
|
e336be |
+ memset(&ctx, 0x0, sizeof(ctx));
|
|
|
e336be |
+
|
|
|
e336be |
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
|
|
|
e336be |
ctx.qid = ena_qid;
|
|
|
e336be |
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
|
|
|
e336be |
@@ -1657,7 +1666,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
|
|
|
e336be |
static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
|
|
|
e336be |
{
|
|
|
e336be |
struct ena_com_dev *ena_dev;
|
|
|
e336be |
- struct ena_com_create_io_ctx ctx = { 0 };
|
|
|
e336be |
+ struct ena_com_create_io_ctx ctx;
|
|
|
e336be |
struct ena_ring *rx_ring;
|
|
|
e336be |
u32 msix_vector;
|
|
|
e336be |
u16 ena_qid;
|
|
|
e336be |
@@ -1669,6 +1678,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
|
|
|
e336be |
msix_vector = ENA_IO_IRQ_IDX(qid);
|
|
|
e336be |
ena_qid = ENA_IO_RXQ_IDX(qid);
|
|
|
e336be |
|
|
|
e336be |
+ memset(&ctx, 0x0, sizeof(ctx));
|
|
|
e336be |
+
|
|
|
e336be |
ctx.qid = ena_qid;
|
|
|
e336be |
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
|
|
|
e336be |
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
|
|
e336be |
@@ -1986,73 +1997,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
|
|
|
e336be |
return rc;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-/* Called with netif_tx_lock. */
|
|
|
e336be |
-static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
e336be |
+static int ena_tx_map_skb(struct ena_ring *tx_ring,
|
|
|
e336be |
+ struct ena_tx_buffer *tx_info,
|
|
|
e336be |
+ struct sk_buff *skb,
|
|
|
e336be |
+ void **push_hdr,
|
|
|
e336be |
+ u16 *header_len)
|
|
|
e336be |
{
|
|
|
e336be |
- struct ena_adapter *adapter = netdev_priv(dev);
|
|
|
e336be |
- struct ena_tx_buffer *tx_info;
|
|
|
e336be |
- struct ena_com_tx_ctx ena_tx_ctx;
|
|
|
e336be |
- struct ena_ring *tx_ring;
|
|
|
e336be |
- struct netdev_queue *txq;
|
|
|
e336be |
+ struct ena_adapter *adapter = tx_ring->adapter;
|
|
|
e336be |
struct ena_com_buf *ena_buf;
|
|
|
e336be |
- void *push_hdr;
|
|
|
e336be |
- u32 len, last_frag;
|
|
|
e336be |
- u16 next_to_use;
|
|
|
e336be |
- u16 req_id;
|
|
|
e336be |
- u16 push_len;
|
|
|
e336be |
- u16 header_len;
|
|
|
e336be |
dma_addr_t dma;
|
|
|
e336be |
- int qid, rc, nb_hw_desc;
|
|
|
e336be |
- int i = -1;
|
|
|
e336be |
-
|
|
|
e336be |
- netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
|
|
|
e336be |
- /* Determine which tx ring we will be placed on */
|
|
|
e336be |
- qid = skb_get_queue_mapping(skb);
|
|
|
e336be |
- tx_ring = &adapter->tx_ring[qid];
|
|
|
e336be |
- txq = netdev_get_tx_queue(dev, qid);
|
|
|
e336be |
-
|
|
|
e336be |
- rc = ena_check_and_linearize_skb(tx_ring, skb);
|
|
|
e336be |
- if (unlikely(rc))
|
|
|
e336be |
- goto error_drop_packet;
|
|
|
e336be |
-
|
|
|
e336be |
- skb_tx_timestamp(skb);
|
|
|
e336be |
- len = skb_headlen(skb);
|
|
|
e336be |
+ u32 skb_head_len, frag_len, last_frag;
|
|
|
e336be |
+ u16 push_len = 0;
|
|
|
e336be |
+ u16 delta = 0;
|
|
|
e336be |
+ int i = 0;
|
|
|
e336be |
|
|
|
e336be |
- next_to_use = tx_ring->next_to_use;
|
|
|
e336be |
- req_id = tx_ring->free_tx_ids[next_to_use];
|
|
|
e336be |
- tx_info = &tx_ring->tx_buffer_info[req_id];
|
|
|
e336be |
- tx_info->num_of_bufs = 0;
|
|
|
e336be |
-
|
|
|
e336be |
- WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
|
|
|
e336be |
- ena_buf = tx_info->bufs;
|
|
|
e336be |
+ skb_head_len = skb_headlen(skb);
|
|
|
e336be |
tx_info->skb = skb;
|
|
|
e336be |
+ ena_buf = tx_info->bufs;
|
|
|
e336be |
|
|
|
e336be |
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
|
|
e336be |
- /* prepared the push buffer */
|
|
|
e336be |
- push_len = min_t(u32, len, tx_ring->tx_max_header_size);
|
|
|
e336be |
- header_len = push_len;
|
|
|
e336be |
- push_hdr = skb->data;
|
|
|
e336be |
+ /* When the device is LLQ mode, the driver will copy
|
|
|
e336be |
+ * the header into the device memory space.
|
|
|
e336be |
+ * the ena_com layer assume the header is in a linear
|
|
|
e336be |
+ * memory space.
|
|
|
e336be |
+ * This assumption might be wrong since part of the header
|
|
|
e336be |
+ * can be in the fragmented buffers.
|
|
|
e336be |
+ * Use skb_header_pointer to make sure the header is in a
|
|
|
e336be |
+ * linear memory space.
|
|
|
e336be |
+ */
|
|
|
e336be |
+
|
|
|
e336be |
+ push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
|
|
|
e336be |
+ *push_hdr = skb_header_pointer(skb, 0, push_len,
|
|
|
e336be |
+ tx_ring->push_buf_intermediate_buf);
|
|
|
e336be |
+ *header_len = push_len;
|
|
|
e336be |
+ if (unlikely(skb->data != *push_hdr)) {
|
|
|
e336be |
+ u64_stats_update_begin(&tx_ring->syncp);
|
|
|
e336be |
+ tx_ring->tx_stats.llq_buffer_copy++;
|
|
|
e336be |
+ u64_stats_update_end(&tx_ring->syncp);
|
|
|
e336be |
+
|
|
|
e336be |
+ delta = push_len - skb_head_len;
|
|
|
e336be |
+ }
|
|
|
e336be |
} else {
|
|
|
e336be |
- push_len = 0;
|
|
|
e336be |
- header_len = min_t(u32, len, tx_ring->tx_max_header_size);
|
|
|
e336be |
- push_hdr = NULL;
|
|
|
e336be |
+ *push_hdr = NULL;
|
|
|
e336be |
+ *header_len = min_t(u32, skb_head_len,
|
|
|
e336be |
+ tx_ring->tx_max_header_size);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
- netif_dbg(adapter, tx_queued, dev,
|
|
|
e336be |
+ netif_dbg(adapter, tx_queued, adapter->netdev,
|
|
|
e336be |
"skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
|
|
|
e336be |
- push_hdr, push_len);
|
|
|
e336be |
+ *push_hdr, push_len);
|
|
|
e336be |
|
|
|
e336be |
- if (len > push_len) {
|
|
|
e336be |
+ if (skb_head_len > push_len) {
|
|
|
e336be |
dma = dma_map_single(tx_ring->dev, skb->data + push_len,
|
|
|
e336be |
- len - push_len, DMA_TO_DEVICE);
|
|
|
e336be |
- if (dma_mapping_error(tx_ring->dev, dma))
|
|
|
e336be |
+ skb_head_len - push_len, DMA_TO_DEVICE);
|
|
|
e336be |
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
|
|
|
e336be |
goto error_report_dma_error;
|
|
|
e336be |
|
|
|
e336be |
ena_buf->paddr = dma;
|
|
|
e336be |
- ena_buf->len = len - push_len;
|
|
|
e336be |
+ ena_buf->len = skb_head_len - push_len;
|
|
|
e336be |
|
|
|
e336be |
ena_buf++;
|
|
|
e336be |
tx_info->num_of_bufs++;
|
|
|
e336be |
+ tx_info->map_linear_data = 1;
|
|
|
e336be |
+ } else {
|
|
|
e336be |
+ tx_info->map_linear_data = 0;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
last_frag = skb_shinfo(skb)->nr_frags;
|
|
|
e336be |
@@ -2060,18 +2068,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
e336be |
for (i = 0; i < last_frag; i++) {
|
|
|
e336be |
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
|
|
e336be |
|
|
|
e336be |
- len = skb_frag_size(frag);
|
|
|
e336be |
- dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
|
|
|
e336be |
- DMA_TO_DEVICE);
|
|
|
e336be |
- if (dma_mapping_error(tx_ring->dev, dma))
|
|
|
e336be |
+ frag_len = skb_frag_size(frag);
|
|
|
e336be |
+
|
|
|
e336be |
+ if (unlikely(delta >= frag_len)) {
|
|
|
e336be |
+ delta -= frag_len;
|
|
|
e336be |
+ continue;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
|
|
|
e336be |
+ frag_len - delta, DMA_TO_DEVICE);
|
|
|
e336be |
+ if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
|
|
|
e336be |
goto error_report_dma_error;
|
|
|
e336be |
|
|
|
e336be |
ena_buf->paddr = dma;
|
|
|
e336be |
- ena_buf->len = len;
|
|
|
e336be |
+ ena_buf->len = frag_len - delta;
|
|
|
e336be |
ena_buf++;
|
|
|
e336be |
+ tx_info->num_of_bufs++;
|
|
|
e336be |
+ delta = 0;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
- tx_info->num_of_bufs += last_frag;
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+
|
|
|
e336be |
+error_report_dma_error:
|
|
|
e336be |
+ u64_stats_update_begin(&tx_ring->syncp);
|
|
|
e336be |
+ tx_ring->tx_stats.dma_mapping_err++;
|
|
|
e336be |
+ u64_stats_update_end(&tx_ring->syncp);
|
|
|
e336be |
+ netdev_warn(adapter->netdev, "failed to map skb\n");
|
|
|
e336be |
+
|
|
|
e336be |
+ tx_info->skb = NULL;
|
|
|
e336be |
+
|
|
|
e336be |
+ tx_info->num_of_bufs += i;
|
|
|
e336be |
+ ena_unmap_tx_skb(tx_ring, tx_info);
|
|
|
e336be |
+
|
|
|
e336be |
+ return -EINVAL;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
+/* Called with netif_tx_lock. */
|
|
|
e336be |
+static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
e336be |
+{
|
|
|
e336be |
+ struct ena_adapter *adapter = netdev_priv(dev);
|
|
|
e336be |
+ struct ena_tx_buffer *tx_info;
|
|
|
e336be |
+ struct ena_com_tx_ctx ena_tx_ctx;
|
|
|
e336be |
+ struct ena_ring *tx_ring;
|
|
|
e336be |
+ struct netdev_queue *txq;
|
|
|
e336be |
+ void *push_hdr;
|
|
|
e336be |
+ u16 next_to_use, req_id, header_len;
|
|
|
e336be |
+ int qid, rc, nb_hw_desc;
|
|
|
e336be |
+
|
|
|
e336be |
+ netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
|
|
|
e336be |
+ /* Determine which tx ring we will be placed on */
|
|
|
e336be |
+ qid = skb_get_queue_mapping(skb);
|
|
|
e336be |
+ tx_ring = &adapter->tx_ring[qid];
|
|
|
e336be |
+ txq = netdev_get_tx_queue(dev, qid);
|
|
|
e336be |
+
|
|
|
e336be |
+ rc = ena_check_and_linearize_skb(tx_ring, skb);
|
|
|
e336be |
+ if (unlikely(rc))
|
|
|
e336be |
+ goto error_drop_packet;
|
|
|
e336be |
+
|
|
|
e336be |
+ skb_tx_timestamp(skb);
|
|
|
e336be |
+
|
|
|
e336be |
+ next_to_use = tx_ring->next_to_use;
|
|
|
e336be |
+ req_id = tx_ring->free_tx_ids[next_to_use];
|
|
|
e336be |
+ tx_info = &tx_ring->tx_buffer_info[req_id];
|
|
|
e336be |
+ tx_info->num_of_bufs = 0;
|
|
|
e336be |
+
|
|
|
e336be |
+ WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
|
|
|
e336be |
+
|
|
|
e336be |
+ rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
|
|
|
e336be |
+ if (unlikely(rc))
|
|
|
e336be |
+ goto error_drop_packet;
|
|
|
e336be |
|
|
|
e336be |
memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
|
|
|
e336be |
ena_tx_ctx.ena_bufs = tx_info->bufs;
|
|
|
e336be |
@@ -2087,14 +2152,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
e336be |
rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
|
|
|
e336be |
&nb_hw_desc);
|
|
|
e336be |
|
|
|
e336be |
+ /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
|
|
|
e336be |
+ * since the number of free descriptors in the queue is checked
|
|
|
e336be |
+ * after sending the previous packet. In case there isn't enough
|
|
|
e336be |
+ * space in the queue for the next packet, it is stopped
|
|
|
e336be |
+ * until there is again enough available space in the queue.
|
|
|
e336be |
+ * All other failure reasons of ena_com_prepare_tx() are fatal
|
|
|
e336be |
+ * and therefore require a device reset.
|
|
|
e336be |
+ */
|
|
|
e336be |
if (unlikely(rc)) {
|
|
|
e336be |
netif_err(adapter, tx_queued, dev,
|
|
|
e336be |
"failed to prepare tx bufs\n");
|
|
|
e336be |
u64_stats_update_begin(&tx_ring->syncp);
|
|
|
e336be |
- tx_ring->tx_stats.queue_stop++;
|
|
|
e336be |
tx_ring->tx_stats.prepare_ctx_err++;
|
|
|
e336be |
u64_stats_update_end(&tx_ring->syncp);
|
|
|
e336be |
- netif_tx_stop_queue(txq);
|
|
|
e336be |
+ adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
|
|
|
e336be |
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
|
|
|
e336be |
goto error_unmap_dma;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
@@ -2157,35 +2230,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
e336be |
|
|
|
e336be |
return NETDEV_TX_OK;
|
|
|
e336be |
|
|
|
e336be |
-error_report_dma_error:
|
|
|
e336be |
- u64_stats_update_begin(&tx_ring->syncp);
|
|
|
e336be |
- tx_ring->tx_stats.dma_mapping_err++;
|
|
|
e336be |
- u64_stats_update_end(&tx_ring->syncp);
|
|
|
e336be |
- netdev_warn(adapter->netdev, "failed to map skb\n");
|
|
|
e336be |
-
|
|
|
e336be |
- tx_info->skb = NULL;
|
|
|
e336be |
-
|
|
|
e336be |
error_unmap_dma:
|
|
|
e336be |
- if (i >= 0) {
|
|
|
e336be |
- /* save value of frag that failed */
|
|
|
e336be |
- last_frag = i;
|
|
|
e336be |
-
|
|
|
e336be |
- /* start back at beginning and unmap skb */
|
|
|
e336be |
- tx_info->skb = NULL;
|
|
|
e336be |
- ena_buf = tx_info->bufs;
|
|
|
e336be |
- dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
|
|
|
e336be |
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
|
|
|
e336be |
-
|
|
|
e336be |
- /* unmap remaining mapped pages */
|
|
|
e336be |
- for (i = 0; i < last_frag; i++) {
|
|
|
e336be |
- ena_buf++;
|
|
|
e336be |
- dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
|
|
|
e336be |
- dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
|
|
|
e336be |
- }
|
|
|
e336be |
- }
|
|
|
e336be |
+ ena_unmap_tx_skb(tx_ring, tx_info);
|
|
|
e336be |
+ tx_info->skb = NULL;
|
|
|
e336be |
|
|
|
e336be |
error_drop_packet:
|
|
|
e336be |
-
|
|
|
e336be |
dev_kfree_skb(skb);
|
|
|
e336be |
return NETDEV_TX_OK;
|
|
|
e336be |
}
|
|
|
e336be |
@@ -2621,7 +2670,9 @@ static int ena_restore_device(struct ena_adapter *adapter)
|
|
|
11f5cb |
netif_carrier_on(adapter->netdev);
|
|
|
e336be |
|
|
|
e336be |
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
|
|
|
e336be |
- dev_err(&pdev->dev, "Device reset completed successfully\n");
|
|
|
e336be |
+ dev_err(&pdev->dev,
|
|
|
e336be |
+ "Device reset completed successfully, Driver info: %s\n",
|
|
|
e336be |
+ version);
|
|
|
e336be |
|
|
|
e336be |
return rc;
|
|
|
e336be |
err_disable_msix:
|
|
|
e336be |
@@ -2988,18 +3039,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
|
|
|
e336be |
return io_queue_num;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
-static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
|
|
|
e336be |
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
|
|
|
e336be |
+static int ena_set_queues_placement_policy(struct pci_dev *pdev,
|
|
|
e336be |
+ struct ena_com_dev *ena_dev,
|
|
|
e336be |
+ struct ena_admin_feature_llq_desc *llq,
|
|
|
e336be |
+ struct ena_llq_configurations *llq_default_configurations)
|
|
|
e336be |
{
|
|
|
e336be |
bool has_mem_bar;
|
|
|
e336be |
+ int rc;
|
|
|
e336be |
+ u32 llq_feature_mask;
|
|
|
e336be |
+
|
|
|
e336be |
+ llq_feature_mask = 1 << ENA_ADMIN_LLQ;
|
|
|
e336be |
+ if (!(ena_dev->supported_features & llq_feature_mask)) {
|
|
|
e336be |
+ dev_err(&pdev->dev,
|
|
|
e336be |
+ "LLQ is not supported Fallback to host mode policy.\n");
|
|
|
e336be |
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+ }
|
|
|
e336be |
|
|
|
e336be |
has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
|
|
|
e336be |
|
|
|
e336be |
- /* Enable push mode if device supports LLQ */
|
|
|
e336be |
- if (has_mem_bar && get_feat_ctx->max_queues.max_legacy_llq_num > 0)
|
|
|
e336be |
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
|
|
|
e336be |
- else
|
|
|
e336be |
+ rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
|
|
|
e336be |
+ if (unlikely(rc)) {
|
|
|
e336be |
+ dev_err(&pdev->dev,
|
|
|
e336be |
+ "Failed to configure the device mode. Fallback to host mode policy.\n");
|
|
|
e336be |
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ /* Nothing to config, exit */
|
|
|
e336be |
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+
|
|
|
e336be |
+ if (!has_mem_bar) {
|
|
|
e336be |
+ dev_err(&pdev->dev,
|
|
|
e336be |
+ "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
|
|
|
e336be |
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
+ }
|
|
|
e336be |
+
|
|
|
e336be |
+ ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
|
|
|
e336be |
+ pci_resource_start(pdev, ENA_MEM_BAR),
|
|
|
e336be |
+ pci_resource_len(pdev, ENA_MEM_BAR));
|
|
|
e336be |
+
|
|
|
e336be |
+ if (!ena_dev->mem_bar)
|
|
|
e336be |
+ return -EFAULT;
|
|
|
e336be |
+
|
|
|
e336be |
+ return 0;
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
|
|
|
e336be |
@@ -3117,6 +3202,15 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
|
|
|
e336be |
pci_release_selected_regions(pdev, release_bars);
|
|
|
e336be |
}
|
|
|
e336be |
|
|
|
e336be |
+static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
|
|
|
e336be |
+{
|
|
|
e336be |
+ llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
|
|
|
e336be |
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
|
|
|
e336be |
+ llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
|
|
|
e336be |
+ llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
|
|
|
e336be |
+ llq_config->llq_ring_entry_size_value = 128;
|
|
|
e336be |
+}
|
|
|
e336be |
+
|
|
|
e336be |
static int ena_calc_queue_size(struct pci_dev *pdev,
|
|
|
e336be |
struct ena_com_dev *ena_dev,
|
|
|
e336be |
u16 *max_tx_sgl_size,
|
|
|
e336be |
@@ -3165,7 +3259,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
e336be |
static int version_printed;
|
|
|
e336be |
struct net_device *netdev;
|
|
|
e336be |
struct ena_adapter *adapter;
|
|
|
e336be |
+ struct ena_llq_configurations llq_config;
|
|
|
e336be |
struct ena_com_dev *ena_dev = NULL;
|
|
|
e336be |
+ char *queue_type_str;
|
|
|
e336be |
static int adapters_found;
|