| From 371f6ca7f5cbad70f6e5fafc12d5448d7b6f0750 Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:16 +0300 |
| Subject: [PATCH 02/16] net: ena: minor performance improvement |
| |
| Reduce fastpath overhead by making ena_com_tx_comp_req_id_get() inline. |
| Also move it to ena_eth_com.h file with its dependency function |
| ena_com_cq_inc_head(). |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_eth_com.c | 43 ----------------- |
| drivers/net/ethernet/amazon/ena/ena_eth_com.h | 46 ++++++++++++++++++- |
| 2 files changed, 44 insertions(+), 45 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c |
| index 2b3ff0c20155..9c0511e9f9a2 100644 |
| |
| |
| @@ -59,15 +59,6 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( |
| return cdesc; |
| } |
| |
| -static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) |
| -{ |
| - io_cq->head++; |
| - |
| - /* Switch phase bit in case of wrap around */ |
| - if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) |
| - io_cq->phase ^= 1; |
| -} |
| - |
| static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) |
| { |
| u16 tail_masked; |
| @@ -477,40 +468,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, |
| return 0; |
| } |
| |
| -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) |
| -{ |
| - u8 expected_phase, cdesc_phase; |
| - struct ena_eth_io_tx_cdesc *cdesc; |
| - u16 masked_head; |
| - |
| - masked_head = io_cq->head & (io_cq->q_depth - 1); |
| - expected_phase = io_cq->phase; |
| - |
| - cdesc = (struct ena_eth_io_tx_cdesc *) |
| - ((uintptr_t)io_cq->cdesc_addr.virt_addr + |
| - (masked_head * io_cq->cdesc_entry_size_in_bytes)); |
| - |
| - /* When the current completion descriptor phase isn't the same as the |
| - * expected, it mean that the device still didn't update |
| - * this completion. |
| - */ |
| - cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; |
| - if (cdesc_phase != expected_phase) |
| - return -EAGAIN; |
| - |
| - dma_rmb(); |
| - if (unlikely(cdesc->req_id >= io_cq->q_depth)) { |
| - pr_err("Invalid req id %d\n", cdesc->req_id); |
| - return -EINVAL; |
| - } |
| - |
| - ena_com_cq_inc_head(io_cq); |
| - |
| - *req_id = READ_ONCE(cdesc->req_id); |
| - |
| - return 0; |
| -} |
| - |
| bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) |
| { |
| struct ena_eth_io_rx_cdesc_base *cdesc; |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h |
| index 2f7657227cfe..4930324e9d8d 100644 |
| |
| |
| @@ -86,8 +86,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, |
| struct ena_com_buf *ena_buf, |
| u16 req_id); |
| |
| -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); |
| - |
| bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); |
| |
| static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, |
| @@ -159,4 +157,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) |
| io_sq->next_to_comp += elem; |
| } |
| |
| +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) |
| +{ |
| + io_cq->head++; |
| + |
| + /* Switch phase bit in case of wrap around */ |
| + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) |
| + io_cq->phase ^= 1; |
| +} |
| + |
| +static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, |
| + u16 *req_id) |
| +{ |
| + u8 expected_phase, cdesc_phase; |
| + struct ena_eth_io_tx_cdesc *cdesc; |
| + u16 masked_head; |
| + |
| + masked_head = io_cq->head & (io_cq->q_depth - 1); |
| + expected_phase = io_cq->phase; |
| + |
| + cdesc = (struct ena_eth_io_tx_cdesc *) |
| + ((uintptr_t)io_cq->cdesc_addr.virt_addr + |
| + (masked_head * io_cq->cdesc_entry_size_in_bytes)); |
| + |
| + /* When the current completion descriptor phase isn't the same as the |
| + * expected, it mean that the device still didn't update |
| + * this completion. |
| + */ |
| + cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; |
| + if (cdesc_phase != expected_phase) |
| + return -EAGAIN; |
| + |
| + dma_rmb(); |
| + |
| + *req_id = READ_ONCE(cdesc->req_id); |
| + if (unlikely(*req_id >= io_cq->q_depth)) { |
| + pr_err("Invalid req id %d\n", cdesc->req_id); |
| + return -EINVAL; |
| + } |
| + |
| + ena_com_cq_inc_head(io_cq); |
| + |
| + return 0; |
| +} |
| + |
| #endif /* ENA_ETH_COM_H_ */ |
| -- |
| 2.19.1 |
| |
| From df44a6755f48dfc5c94d878e80807931460c3846 Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:17 +0300 |
| Subject: [PATCH 03/16] net: ena: complete host info to match latest ENA spec |
| |
| Add new fields and definitions to host info and fill them |
| according to the latest ENA spec version. |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| .../net/ethernet/amazon/ena/ena_admin_defs.h | 31 ++++++++++++++++++- |
| drivers/net/ethernet/amazon/ena/ena_com.c | 12 +++---- |
| .../net/ethernet/amazon/ena/ena_common_defs.h | 4 +-- |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 10 ++++-- |
| 4 files changed, 43 insertions(+), 14 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h |
| index 4532e574ebcd..d735164efea3 100644 |
| |
| |
| @@ -63,6 +63,8 @@ enum ena_admin_aq_completion_status { |
| ENA_ADMIN_ILLEGAL_PARAMETER = 5, |
| |
| ENA_ADMIN_UNKNOWN_ERROR = 6, |
| + |
| + ENA_ADMIN_RESOURCE_BUSY = 7, |
| }; |
| |
| enum ena_admin_aq_feature_id { |
| @@ -702,6 +704,10 @@ enum ena_admin_os_type { |
| ENA_ADMIN_OS_FREEBSD = 4, |
| |
| ENA_ADMIN_OS_IPXE = 5, |
| + |
| + ENA_ADMIN_OS_ESXI = 6, |
| + |
| + ENA_ADMIN_OS_GROUPS_NUM = 6, |
| }; |
| |
| struct ena_admin_host_info { |
| @@ -723,11 +729,27 @@ struct ena_admin_host_info { |
| /* 7:0 : major |
| * 15:8 : minor |
| * 23:16 : sub_minor |
| + * 31:24 : module_type |
| */ |
| u32 driver_version; |
| |
| /* features bitmap */ |
| - u32 supported_network_features[4]; |
| + u32 supported_network_features[2]; |
| + |
| + /* ENA spec version of driver */ |
| + u16 ena_spec_version; |
| + |
| + /* ENA device's Bus, Device and Function |
| + * 2:0 : function |
| + * 7:3 : device |
| + * 15:8 : bus |
| + */ |
| + u16 bdf; |
| + |
| + /* Number of CPUs */ |
| + u16 num_cpus; |
| + |
| + u16 reserved; |
| }; |
| |
| struct ena_admin_rss_ind_table_entry { |
| @@ -1008,6 +1030,13 @@ struct ena_admin_ena_mmio_req_read_less_resp { |
| #define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) |
| #define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 |
| #define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) |
| +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24 |
| +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) |
| +#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) |
| +#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3 |
| +#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) |
| +#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 |
| +#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) |
| |
| /* aenq_common_desc */ |
| #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c |
| index 7635c38e77dd..b6e6a4721931 100644 |
| |
| |
| @@ -41,9 +41,6 @@ |
| #define ENA_ASYNC_QUEUE_DEPTH 16 |
| #define ENA_ADMIN_QUEUE_DEPTH 32 |
| |
| -#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ |
| - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ |
| - | (ENA_COMMON_SPEC_VERSION_MINOR)) |
| |
| #define ENA_CTRL_MAJOR 0 |
| #define ENA_CTRL_MINOR 0 |
| @@ -1400,11 +1397,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) |
| ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, |
| ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); |
| |
| - if (ver < MIN_ENA_VER) { |
| - pr_err("ENA version is lower than the minimal version the driver supports\n"); |
| - return -1; |
| - } |
| - |
| pr_info("ena controller version: %d.%d.%d implementation version %d\n", |
| (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> |
| ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, |
| @@ -2441,6 +2433,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) |
| if (unlikely(!host_attr->host_info)) |
| return -ENOMEM; |
| |
| + host_attr->host_info->ena_spec_version = |
| + ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | |
| + (ENA_COMMON_SPEC_VERSION_MINOR)); |
| + |
| return 0; |
| } |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h |
| index bb8d73676eab..23beb7e7ed7b 100644 |
| |
| |
| @@ -32,8 +32,8 @@ |
| #ifndef _ENA_COMMON_H_ |
| #define _ENA_COMMON_H_ |
| |
| -#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ |
| -#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ |
| +#define ENA_COMMON_SPEC_VERSION_MAJOR 2 |
| +#define ENA_COMMON_SPEC_VERSION_MINOR 0 |
| |
| /* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ |
| struct ena_common_mem_addr { |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index 69a49784b204..0c9c0d3ce856 100644 |
| |
| |
| @@ -2206,7 +2206,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, |
| return qid; |
| } |
| |
| -static void ena_config_host_info(struct ena_com_dev *ena_dev) |
| +static void ena_config_host_info(struct ena_com_dev *ena_dev, |
| + struct pci_dev *pdev) |
| { |
| struct ena_admin_host_info *host_info; |
| int rc; |
| @@ -2220,6 +2221,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) |
| |
| host_info = ena_dev->host_attr.host_info; |
| |
| + host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; |
| host_info->os_type = ENA_ADMIN_OS_LINUX; |
| host_info->kernel_ver = LINUX_VERSION_CODE; |
| strlcpy(host_info->kernel_ver_str, utsname()->version, |
| @@ -2230,7 +2232,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) |
| host_info->driver_version = |
| (DRV_MODULE_VER_MAJOR) | |
| (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | |
| - (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); |
| + (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | |
| + ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); |
| + host_info->num_cpus = num_online_cpus(); |
| |
| rc = ena_com_set_host_attributes(ena_dev); |
| if (rc) { |
| @@ -2454,7 +2458,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, |
| */ |
| ena_com_set_admin_polling_mode(ena_dev, true); |
| |
| - ena_config_host_info(ena_dev); |
| + ena_config_host_info(ena_dev, pdev); |
| |
| /* Get Device Attributes*/ |
| rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); |
| -- |
| 2.19.1 |
| |
| From 0a66e6d1fe86cb3d49fcd76057b4f7a50e0fe49a Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:18 +0300 |
| Subject: [PATCH 04/16] net: ena: introduce Low Latency Queues data structures |
| according to ENA spec |
| |
| Low Latency Queues(LLQ) allow usage of device's memory for descriptors |
| and headers. Such queues decrease processing time since data is already |
| located on the device when driver rings the doorbell. |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| .../net/ethernet/amazon/ena/ena_admin_defs.h | 90 ++++++++++++++++++- |
| drivers/net/ethernet/amazon/ena/ena_com.h | 38 ++++++++ |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 +- |
| 3 files changed, 128 insertions(+), 6 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h |
| index d735164efea3..b439ec1b3edb 100644 |
| |
| |
| @@ -74,6 +74,8 @@ enum ena_admin_aq_feature_id { |
| |
| ENA_ADMIN_HW_HINTS = 3, |
| |
| + ENA_ADMIN_LLQ = 4, |
| + |
| ENA_ADMIN_RSS_HASH_FUNCTION = 10, |
| |
| ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, |
| @@ -485,8 +487,85 @@ struct ena_admin_device_attr_feature_desc { |
| u32 max_mtu; |
| }; |
| |
| +enum ena_admin_llq_header_location { |
| + /* header is in descriptor list */ |
| + ENA_ADMIN_INLINE_HEADER = 1, |
| + /* header in a separate ring, implies 16B descriptor list entry */ |
| + ENA_ADMIN_HEADER_RING = 2, |
| +}; |
| + |
| +enum ena_admin_llq_ring_entry_size { |
| + ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1, |
| + ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2, |
| + ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4, |
| +}; |
| + |
| +enum ena_admin_llq_num_descs_before_header { |
| + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0, |
| + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1, |
| + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2, |
| + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4, |
| + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8, |
| +}; |
| + |
| +/* packet descriptor list entry always starts with one or more descriptors, |
| + * followed by a header. The rest of the descriptors are located in the |
| + * beginning of the subsequent entry. Stride refers to how the rest of the |
| + * descriptors are placed. This field is relevant only for inline header |
| + * mode |
| + */ |
| +enum ena_admin_llq_stride_ctrl { |
| + ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1, |
| + ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, |
| +}; |
| + |
| +struct ena_admin_feature_llq_desc { |
| + u32 max_llq_num; |
| + |
| + u32 max_llq_depth; |
| + |
| + /* specify the header locations the device supports. bitfield of |
| + * enum ena_admin_llq_header_location. |
| + */ |
| + u16 header_location_ctrl_supported; |
| + |
| + /* the header location the driver selected to use. */ |
| + u16 header_location_ctrl_enabled; |
| + |
| + /* if inline header is specified - this is the size of descriptor |
| + * list entry. If header in a separate ring is specified - this is |
| + * the size of header ring entry. bitfield of enum |
| + * ena_admin_llq_ring_entry_size. specify the entry sizes the device |
| + * supports |
| + */ |
| + u16 entry_size_ctrl_supported; |
| + |
| + /* the entry size the driver selected to use. */ |
| + u16 entry_size_ctrl_enabled; |
| + |
| + /* valid only if inline header is specified. First entry associated |
| + * with the packet includes descriptors and header. Rest of the |
| + * entries occupied by descriptors. This parameter defines the max |
| + * number of descriptors precedding the header in the first entry. |
| + * The field is bitfield of enum |
| + * ena_admin_llq_num_descs_before_header and specify the values the |
| + * device supports |
| + */ |
| + u16 desc_num_before_header_supported; |
| + |
| + /* the desire field the driver selected to use */ |
| + u16 desc_num_before_header_enabled; |
| + |
| + /* valid only if inline was chosen. bitfield of enum |
| + * ena_admin_llq_stride_ctrl |
| + */ |
| + u16 descriptors_stride_ctrl_supported; |
| + |
| + /* the stride control the driver selected to use */ |
| + u16 descriptors_stride_ctrl_enabled; |
| +}; |
| + |
| struct ena_admin_queue_feature_desc { |
| - /* including LLQs */ |
| u32 max_sq_num; |
| |
| u32 max_sq_depth; |
| @@ -495,9 +574,9 @@ struct ena_admin_queue_feature_desc { |
| |
| u32 max_cq_depth; |
| |
| - u32 max_llq_num; |
| + u32 max_legacy_llq_num; |
| |
| - u32 max_llq_depth; |
| + u32 max_legacy_llq_depth; |
| |
| u32 max_header_size; |
| |
| @@ -822,6 +901,8 @@ struct ena_admin_get_feat_resp { |
| |
| struct ena_admin_device_attr_feature_desc dev_attr; |
| |
| + struct ena_admin_feature_llq_desc llq; |
| + |
| struct ena_admin_queue_feature_desc max_queue; |
| |
| struct ena_admin_feature_aenq_desc aenq; |
| @@ -869,6 +950,9 @@ struct ena_admin_set_feat_cmd { |
| |
| /* rss indirection table */ |
| struct ena_admin_feature_rss_ind_table ind_table; |
| + |
| + /* LLQ configuration */ |
| + struct ena_admin_feature_llq_desc llq; |
| } u; |
| }; |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h |
| index 7b784f8a06a6..50e6c8f6f138 100644 |
| |
| |
| @@ -108,6 +108,14 @@ enum ena_intr_moder_level { |
| ENA_INTR_MAX_NUM_OF_LEVELS, |
| }; |
| |
| +struct ena_llq_configurations { |
| + enum ena_admin_llq_header_location llq_header_location; |
| + enum ena_admin_llq_ring_entry_size llq_ring_entry_size; |
| + enum ena_admin_llq_stride_ctrl llq_stride_ctrl; |
| + enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; |
| + u16 llq_ring_entry_size_value; |
| +}; |
| + |
| struct ena_intr_moder_entry { |
| unsigned int intr_moder_interval; |
| unsigned int pkts_per_interval; |
| @@ -142,6 +150,15 @@ struct ena_com_tx_meta { |
| u16 l4_hdr_len; /* In words */ |
| }; |
| |
| +struct ena_com_llq_info { |
| + u16 header_location_ctrl; |
| + u16 desc_stride_ctrl; |
| + u16 desc_list_entry_size_ctrl; |
| + u16 desc_list_entry_size; |
| + u16 descs_num_before_header; |
| + u16 descs_per_entry; |
| +}; |
| + |
| struct ena_com_io_cq { |
| struct ena_com_io_desc_addr cdesc_addr; |
| |
| @@ -179,6 +196,20 @@ struct ena_com_io_cq { |
| |
| } ____cacheline_aligned; |
| |
| +struct ena_com_io_bounce_buffer_control { |
| + u8 *base_buffer; |
| + u16 next_to_use; |
| + u16 buffer_size; |
| + u16 buffers_num; /* Must be a power of 2 */ |
| +}; |
| + |
| +/* This struct is to keep tracking the current location of the next llq entry */ |
| +struct ena_com_llq_pkt_ctrl { |
| + u8 *curr_bounce_buf; |
| + u16 idx; |
| + u16 descs_left_in_line; |
| +}; |
| + |
| struct ena_com_io_sq { |
| struct ena_com_io_desc_addr desc_addr; |
| |
| @@ -190,6 +221,9 @@ struct ena_com_io_sq { |
| |
| u32 msix_vector; |
| struct ena_com_tx_meta cached_tx_meta; |
| + struct ena_com_llq_info llq_info; |
| + struct ena_com_llq_pkt_ctrl llq_buf_ctrl; |
| + struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; |
| |
| u16 q_depth; |
| u16 qid; |
| @@ -197,6 +231,7 @@ struct ena_com_io_sq { |
| u16 idx; |
| u16 tail; |
| u16 next_to_comp; |
| + u16 llq_last_copy_tail; |
| u32 tx_max_header_size; |
| u8 phase; |
| u8 desc_entry_size; |
| @@ -334,6 +369,8 @@ struct ena_com_dev { |
| u16 intr_delay_resolution; |
| u32 intr_moder_tx_interval; |
| struct ena_intr_moder_entry *intr_moder_tbl; |
| + |
| + struct ena_com_llq_info llq_info; |
| }; |
| |
| struct ena_com_dev_get_features_ctx { |
| @@ -342,6 +379,7 @@ struct ena_com_dev_get_features_ctx { |
| struct ena_admin_feature_aenq_desc aenq; |
| struct ena_admin_feature_offload_desc offload; |
| struct ena_admin_ena_hw_hints hw_hints; |
| + struct ena_admin_feature_llq_desc llq; |
| }; |
| |
| struct ena_com_create_io_ctx { |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index 0c9c0d3ce856..789556960b8e 100644 |
| |
| |
| @@ -2959,7 +2959,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, |
| |
| /* In case of LLQ use the llq number in the get feature cmd */ |
| if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
| - io_sq_num = get_feat_ctx->max_queues.max_llq_num; |
| + io_sq_num = get_feat_ctx->max_queues.max_legacy_llq_num; |
| |
| if (io_sq_num == 0) { |
| dev_err(&pdev->dev, |
| @@ -2995,7 +2995,7 @@ static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, |
| has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); |
| |
| /* Enable push mode if device supports LLQ */ |
| - if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) |
| + if (has_mem_bar && get_feat_ctx->max_queues.max_legacy_llq_num > 0) |
| ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; |
| else |
| ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| @@ -3131,7 +3131,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev, |
| |
| if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
| queue_size = min_t(u32, queue_size, |
| - get_feat_ctx->max_queues.max_llq_depth); |
| + get_feat_ctx->max_queues.max_legacy_llq_depth); |
| |
| queue_size = rounddown_pow_of_two(queue_size); |
| |
| -- |
| 2.19.1 |
| |
| From 8e9ebea20ab8db4f3a993e815e0b6b84ce98bbfb Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:19 +0300 |
| Subject: [PATCH 05/16] net: ena: add functions for handling Low Latency Queues |
| in ena_com |
| |
| This patch introduces APIs for detection, initialization, configuration |
| and actual usage of low latency queues(LLQ). It extends transmit API with |
| creation of LLQ descriptors in device memory (which include host buffers |
| descriptors as well as packet header) |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_com.c | 249 +++++++++++++++++- |
| drivers/net/ethernet/amazon/ena/ena_com.h | 28 ++ |
| drivers/net/ethernet/amazon/ena/ena_eth_com.c | 231 ++++++++++++---- |
| drivers/net/ethernet/amazon/ena/ena_eth_com.h | 25 +- |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 21 +- |
| 5 files changed, 474 insertions(+), 80 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c |
| index b6e6a4721931..5220c7578d6b 100644 |
| |
| |
| @@ -58,6 +58,8 @@ |
| |
| #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF |
| |
| +#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 |
| + |
| #define ENA_REGS_ADMIN_INTR_MASK 1 |
| |
| #define ENA_POLL_MS 5 |
| @@ -352,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, |
| &io_sq->desc_addr.phys_addr, |
| GFP_KERNEL); |
| } |
| - } else { |
| + |
| + if (!io_sq->desc_addr.virt_addr) { |
| + pr_err("memory allocation failed"); |
| + return -ENOMEM; |
| + } |
| + } |
| + |
| + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
| + /* Allocate bounce buffers */ |
| + io_sq->bounce_buf_ctrl.buffer_size = |
| + ena_dev->llq_info.desc_list_entry_size; |
| + io_sq->bounce_buf_ctrl.buffers_num = |
| + ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; |
| + io_sq->bounce_buf_ctrl.next_to_use = 0; |
| + |
| + size = io_sq->bounce_buf_ctrl.buffer_size * |
| + io_sq->bounce_buf_ctrl.buffers_num; |
| + |
| dev_node = dev_to_node(ena_dev->dmadev); |
| set_dev_node(ena_dev->dmadev, ctx->numa_node); |
| - io_sq->desc_addr.virt_addr = |
| + io_sq->bounce_buf_ctrl.base_buffer = |
| devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); |
| set_dev_node(ena_dev->dmadev, dev_node); |
| - if (!io_sq->desc_addr.virt_addr) { |
| - io_sq->desc_addr.virt_addr = |
| + if (!io_sq->bounce_buf_ctrl.base_buffer) |
| + io_sq->bounce_buf_ctrl.base_buffer = |
| devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); |
| + |
| + if (!io_sq->bounce_buf_ctrl.base_buffer) { |
| + pr_err("bounce buffer memory allocation failed"); |
| + return -ENOMEM; |
| } |
| - } |
| |
| - if (!io_sq->desc_addr.virt_addr) { |
| - pr_err("memory allocation failed"); |
| - return -ENOMEM; |
| + memcpy(&io_sq->llq_info, &ena_dev->llq_info, |
| + sizeof(io_sq->llq_info)); |
| + |
| + /* Initiate the first bounce buffer */ |
| + io_sq->llq_buf_ctrl.curr_bounce_buf = |
| + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); |
| + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, |
| + 0x0, io_sq->llq_info.desc_list_entry_size); |
| + io_sq->llq_buf_ctrl.descs_left_in_line = |
| + io_sq->llq_info.descs_num_before_header; |
| } |
| |
| io_sq->tail = 0; |
| @@ -554,6 +583,156 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c |
| return ret; |
| } |
| |
| +/** |
| + * Set the LLQ configurations of the firmware |
| + * |
| + * The driver provides only the enabled feature values to the device, |
| + * which in turn, checks if they are supported. |
| + */ |
| +static int ena_com_set_llq(struct ena_com_dev *ena_dev) |
| +{ |
| + struct ena_com_admin_queue *admin_queue; |
| + struct ena_admin_set_feat_cmd cmd; |
| + struct ena_admin_set_feat_resp resp; |
| + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
| + int ret; |
| + |
| + memset(&cmd, 0x0, sizeof(cmd)); |
| + admin_queue = &ena_dev->admin_queue; |
| + |
| + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
| + cmd.feat_common.feature_id = ENA_ADMIN_LLQ; |
| + |
| + cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; |
| + cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; |
| + cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; |
| + cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; |
| + |
| + ret = ena_com_execute_admin_command(admin_queue, |
| + (struct ena_admin_aq_entry *)&cmd, |
| + sizeof(cmd), |
| + (struct ena_admin_acq_entry *)&resp, |
| + sizeof(resp)); |
| + |
| + if (unlikely(ret)) |
| + pr_err("Failed to set LLQ configurations: %d\n", ret); |
| + |
| + return ret; |
| +} |
| + |
| +static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, |
| + struct ena_admin_feature_llq_desc *llq_features, |
| + struct ena_llq_configurations *llq_default_cfg) |
| +{ |
| + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
| + u16 supported_feat; |
| + int rc; |
| + |
| + memset(llq_info, 0, sizeof(*llq_info)); |
| + |
| + supported_feat = llq_features->header_location_ctrl_supported; |
| + |
| + if (likely(supported_feat & llq_default_cfg->llq_header_location)) { |
| + llq_info->header_location_ctrl = |
| + llq_default_cfg->llq_header_location; |
| + } else { |
| + pr_err("Invalid header location control, supported: 0x%x\n", |
| + supported_feat); |
| + return -EINVAL; |
| + } |
| + |
| + if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { |
| + supported_feat = llq_features->descriptors_stride_ctrl_supported; |
| + if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { |
| + llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; |
| + } else { |
| + if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { |
| + llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; |
| + } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { |
| + llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; |
| + } else { |
| + pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", |
| + supported_feat); |
| + return -EINVAL; |
| + } |
| + |
| + pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", |
| + llq_default_cfg->llq_stride_ctrl, supported_feat, |
| + llq_info->desc_stride_ctrl); |
| + } |
| + } else { |
| + llq_info->desc_stride_ctrl = 0; |
| + } |
| + |
| + supported_feat = llq_features->entry_size_ctrl_supported; |
| + if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { |
| + llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; |
| + llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; |
| + } else { |
| + if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { |
| + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; |
| + llq_info->desc_list_entry_size = 128; |
| + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { |
| + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; |
| + llq_info->desc_list_entry_size = 192; |
| + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { |
| + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; |
| + llq_info->desc_list_entry_size = 256; |
| + } else { |
| + pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", |
| + supported_feat); |
| + return -EINVAL; |
| + } |
| + |
| + pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", |
| + llq_default_cfg->llq_ring_entry_size, supported_feat, |
| + llq_info->desc_list_entry_size); |
| + } |
| + if (unlikely(llq_info->desc_list_entry_size & 0x7)) { |
| + /* The desc list entry size should be whole multiply of 8 |
| + * This requirement comes from __iowrite64_copy() |
| + */ |
| + pr_err("illegal entry size %d\n", |
| + llq_info->desc_list_entry_size); |
| + return -EINVAL; |
| + } |
| + |
| + if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) |
| + llq_info->descs_per_entry = llq_info->desc_list_entry_size / |
| + sizeof(struct ena_eth_io_tx_desc); |
| + else |
| + llq_info->descs_per_entry = 1; |
| + |
| + supported_feat = llq_features->desc_num_before_header_supported; |
| + if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { |
| + llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; |
| + } else { |
| + if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { |
| + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; |
| + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { |
| + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; |
| + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { |
| + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; |
| + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { |
| + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; |
| + } else { |
| + pr_err("Invalid descs_num_before_header, supported: 0x%x\n", |
| + supported_feat); |
| + return -EINVAL; |
| + } |
| + |
| + pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", |
| + llq_default_cfg->llq_num_decs_before_header, |
| + supported_feat, llq_info->descs_num_before_header); |
| + } |
| + |
| + rc = ena_com_set_llq(ena_dev); |
| + if (rc) |
| + pr_err("Cannot set LLQ configuration: %d\n", rc); |
| + |
| + return 0; |
| +} |
| + |
| static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, |
| struct ena_com_admin_queue *admin_queue) |
| { |
| @@ -725,15 +904,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, |
| if (io_sq->desc_addr.virt_addr) { |
| size = io_sq->desc_entry_size * io_sq->q_depth; |
| |
| - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
| - dma_free_coherent(ena_dev->dmadev, size, |
| - io_sq->desc_addr.virt_addr, |
| - io_sq->desc_addr.phys_addr); |
| - else |
| - devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); |
| + dma_free_coherent(ena_dev->dmadev, size, |
| + io_sq->desc_addr.virt_addr, |
| + io_sq->desc_addr.phys_addr); |
| |
| io_sq->desc_addr.virt_addr = NULL; |
| } |
| + |
| + if (io_sq->bounce_buf_ctrl.base_buffer) { |
| + devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); |
| + io_sq->bounce_buf_ctrl.base_buffer = NULL; |
| + } |
| } |
| |
| static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, |
| @@ -1740,6 +1921,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, |
| else |
| return rc; |
| |
| + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ); |
| + if (!rc) |
| + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, |
| + sizeof(get_resp.u.llq)); |
| + else if (rc == -EOPNOTSUPP) |
| + memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); |
| + else |
| + return rc; |
| + |
| return 0; |
| } |
| |
| @@ -2708,3 +2898,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, |
| intr_moder_tbl[level].pkts_per_interval; |
| entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; |
| } |
| + |
| +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, |
| + struct ena_admin_feature_llq_desc *llq_features, |
| + struct ena_llq_configurations *llq_default_cfg) |
| +{ |
| + int rc; |
| + int size; |
| + |
| + if (!llq_features->max_llq_num) { |
| + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| + return 0; |
| + } |
| + |
| + rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); |
| + if (rc) |
| + return rc; |
| + |
| + /* Validate the descriptor is not too big */ |
| + size = ena_dev->tx_max_header_size; |
| + size += ena_dev->llq_info.descs_num_before_header * |
| + sizeof(struct ena_eth_io_tx_desc); |
| + |
| + if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { |
| + pr_err("the size of the LLQ entry is smaller than needed\n"); |
| + return -EINVAL; |
| + } |
| + |
| + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; |
| + |
| + return 0; |
| +} |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h |
| index 50e6c8f6f138..25af8d025919 100644 |
| |
| |
| @@ -37,6 +37,7 @@ |
| #include <linux/delay.h> |
| #include <linux/dma-mapping.h> |
| #include <linux/gfp.h> |
| +#include <linux/io.h> |
| #include <linux/sched.h> |
| #include <linux/sizes.h> |
| #include <linux/spinlock.h> |
| @@ -973,6 +974,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, |
| enum ena_intr_moder_level level, |
| struct ena_intr_moder_entry *entry); |
| |
| +/* ena_com_config_dev_mode - Configure the placement policy of the device. |
| + * @ena_dev: ENA communication layer struct |
| + * @llq_features: LLQ feature descriptor, retrieve via |
| + * ena_com_get_dev_attr_feat. |
| + * @ena_llq_config: The default driver LLQ parameters configurations |
| + */ |
| +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, |
| + struct ena_admin_feature_llq_desc *llq_features, |
| + struct ena_llq_configurations *llq_default_config); |
| + |
| static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) |
| { |
| return ena_dev->adaptive_coalescing; |
| @@ -1082,4 +1093,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, |
| intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; |
| } |
| |
| +static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) |
| +{ |
| + u16 size, buffers_num; |
| + u8 *buf; |
| + |
| + size = bounce_buf_ctrl->buffer_size; |
| + buffers_num = bounce_buf_ctrl->buffers_num; |
| + |
| + buf = bounce_buf_ctrl->base_buffer + |
| + (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; |
| + |
| + prefetchw(bounce_buf_ctrl->base_buffer + |
| + (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); |
| + |
| + return buf; |
| +} |
| + |
| #endif /* !(ENA_COM) */ |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c |
| index 9c0511e9f9a2..17107ca107e3 100644 |
| |
| |
| @@ -59,7 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( |
| return cdesc; |
| } |
| |
| -static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) |
| +static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) |
| { |
| u16 tail_masked; |
| u32 offset; |
| @@ -71,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) |
| return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); |
| } |
| |
| -static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) |
| +static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, |
| + u8 *bounce_buffer) |
| { |
| - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); |
| - u32 offset = tail_masked * io_sq->desc_entry_size; |
| + struct ena_com_llq_info *llq_info = &io_sq->llq_info; |
| |
| - /* In case this queue isn't a LLQ */ |
| - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
| - return; |
| + u16 dst_tail_mask; |
| + u32 dst_offset; |
| |
| - memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, |
| - io_sq->desc_addr.virt_addr + offset, |
| - io_sq->desc_entry_size); |
| -} |
| + dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); |
| + dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; |
| + |
| + /* Make sure everything was written into the bounce buffer before |
| + * writing the bounce buffer to the device |
| + */ |
| + wmb(); |
| + |
| + /* The line is completed. Copy it to dev */ |
| + __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, |
| + bounce_buffer, (llq_info->desc_list_entry_size) / 8); |
| |
| -static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) |
| -{ |
| io_sq->tail++; |
| |
| /* Switch phase bit in case of wrap around */ |
| if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) |
| io_sq->phase ^= 1; |
| + |
| + return 0; |
| } |
| |
| -static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, |
| - u8 *head_src, u16 header_len) |
| +static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, |
| + u8 *header_src, |
| + u16 header_len) |
| { |
| - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); |
| - u8 __iomem *dev_head_addr = |
| - io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); |
| + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; |
| + struct ena_com_llq_info *llq_info = &io_sq->llq_info; |
| + u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; |
| + u16 header_offset; |
| |
| - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
| + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) |
| return 0; |
| |
| - if (unlikely(!io_sq->header_addr)) { |
| - pr_err("Push buffer header ptr is NULL\n"); |
| - return -EINVAL; |
| + header_offset = |
| + llq_info->descs_num_before_header * io_sq->desc_entry_size; |
| + |
| + if (unlikely((header_offset + header_len) > |
| + llq_info->desc_list_entry_size)) { |
| + pr_err("trying to write header larger than llq entry can accommodate\n"); |
| + return -EFAULT; |
| + } |
| + |
| + if (unlikely(!bounce_buffer)) { |
| + pr_err("bounce buffer is NULL\n"); |
| + return -EFAULT; |
| + } |
| + |
| + memcpy(bounce_buffer + header_offset, header_src, header_len); |
| + |
| + return 0; |
| +} |
| + |
| +static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) |
| +{ |
| + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; |
| + u8 *bounce_buffer; |
| + void *sq_desc; |
| + |
| + bounce_buffer = pkt_ctrl->curr_bounce_buf; |
| + |
| + if (unlikely(!bounce_buffer)) { |
| + pr_err("bounce buffer is NULL\n"); |
| + return NULL; |
| + } |
| + |
| + sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; |
| + pkt_ctrl->idx++; |
| + pkt_ctrl->descs_left_in_line--; |
| + |
| + return sq_desc; |
| +} |
| + |
| +static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) |
| +{ |
| + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; |
| + struct ena_com_llq_info *llq_info = &io_sq->llq_info; |
| + int rc; |
| + |
| + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) |
| + return 0; |
| + |
| + /* bounce buffer was used, so write it and get a new one */ |
| + if (pkt_ctrl->idx) { |
| + rc = ena_com_write_bounce_buffer_to_dev(io_sq, |
| + pkt_ctrl->curr_bounce_buf); |
| + if (unlikely(rc)) |
| + return rc; |
| + |
| + pkt_ctrl->curr_bounce_buf = |
| + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); |
| + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, |
| + 0x0, llq_info->desc_list_entry_size); |
| + } |
| + |
| + pkt_ctrl->idx = 0; |
| + pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; |
| + return 0; |
| +} |
| + |
| +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) |
| +{ |
| + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
| + return get_sq_desc_llq(io_sq); |
| + |
| + return get_sq_desc_regular_queue(io_sq); |
| +} |
| + |
| +static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) |
| +{ |
| + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; |
| + struct ena_com_llq_info *llq_info = &io_sq->llq_info; |
| + int rc; |
| + |
| + if (!pkt_ctrl->descs_left_in_line) { |
| + rc = ena_com_write_bounce_buffer_to_dev(io_sq, |
| + pkt_ctrl->curr_bounce_buf); |
| + if (unlikely(rc)) |
| + return rc; |
| + |
| + pkt_ctrl->curr_bounce_buf = |
| + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); |
| + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, |
| + 0x0, llq_info->desc_list_entry_size); |
| + |
| + pkt_ctrl->idx = 0; |
| + if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) |
| + pkt_ctrl->descs_left_in_line = 1; |
| + else |
| + pkt_ctrl->descs_left_in_line = |
| + llq_info->desc_list_entry_size / io_sq->desc_entry_size; |
| } |
| |
| - memcpy_toio(dev_head_addr, head_src, header_len); |
| + return 0; |
| +} |
| + |
| +static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) |
| +{ |
| + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
| + return ena_com_sq_update_llq_tail(io_sq); |
| + |
| + io_sq->tail++; |
| + |
| + /* Switch phase bit in case of wrap around */ |
| + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) |
| + io_sq->phase ^= 1; |
| |
| return 0; |
| } |
| @@ -177,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, |
| return false; |
| } |
| |
| -static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, |
| - struct ena_com_tx_ctx *ena_tx_ctx) |
| +static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, |
| + struct ena_com_tx_ctx *ena_tx_ctx) |
| { |
| struct ena_eth_io_tx_meta_desc *meta_desc = NULL; |
| struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; |
| @@ -223,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i |
| memcpy(&io_sq->cached_tx_meta, ena_meta, |
| sizeof(struct ena_com_tx_meta)); |
| |
| - ena_com_copy_curr_sq_desc_to_dev(io_sq); |
| - ena_com_sq_update_tail(io_sq); |
| + return ena_com_sq_update_tail(io_sq); |
| } |
| |
| static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, |
| @@ -262,18 +375,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, |
| { |
| struct ena_eth_io_tx_desc *desc = NULL; |
| struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; |
| - void *push_header = ena_tx_ctx->push_header; |
| + void *buffer_to_push = ena_tx_ctx->push_header; |
| u16 header_len = ena_tx_ctx->header_len; |
| u16 num_bufs = ena_tx_ctx->num_bufs; |
| - int total_desc, i, rc; |
| + u16 start_tail = io_sq->tail; |
| + int i, rc; |
| bool have_meta; |
| u64 addr_hi; |
| |
| WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); |
| |
| /* num_bufs +1 for potential meta desc */ |
| - if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { |
| - pr_err("Not enough space in the tx queue\n"); |
| + if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { |
| + pr_debug("Not enough space in the tx queue\n"); |
| return -ENOMEM; |
| } |
| |
| @@ -283,23 +397,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, |
| return -EINVAL; |
| } |
| |
| - /* start with pushing the header (if needed) */ |
| - rc = ena_com_write_header(io_sq, push_header, header_len); |
| + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && |
| + !buffer_to_push)) |
| + return -EINVAL; |
| + |
| + rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); |
| if (unlikely(rc)) |
| return rc; |
| |
| have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, |
| ena_tx_ctx); |
| - if (have_meta) |
| - ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); |
| + if (have_meta) { |
| + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); |
| + if (unlikely(rc)) |
| + return rc; |
| + } |
| |
| - /* If the caller doesn't want send packets */ |
| + /* If the caller doesn't want to send packets */ |
| if (unlikely(!num_bufs && !header_len)) { |
| - *nb_hw_desc = have_meta ? 0 : 1; |
| - return 0; |
| + rc = ena_com_close_bounce_buffer(io_sq); |
| + *nb_hw_desc = io_sq->tail - start_tail; |
| + return rc; |
| } |
| |
| desc = get_sq_desc(io_sq); |
| + if (unlikely(!desc)) |
| + return -EFAULT; |
| memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); |
| |
| /* Set first desc when we don't have meta descriptor */ |
| @@ -351,10 +474,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, |
| for (i = 0; i < num_bufs; i++) { |
| /* The first desc share the same desc as the header */ |
| if (likely(i != 0)) { |
| - ena_com_copy_curr_sq_desc_to_dev(io_sq); |
| - ena_com_sq_update_tail(io_sq); |
| + rc = ena_com_sq_update_tail(io_sq); |
| + if (unlikely(rc)) |
| + return rc; |
| |
| desc = get_sq_desc(io_sq); |
| + if (unlikely(!desc)) |
| + return -EFAULT; |
| + |
| memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); |
| |
| desc->len_ctrl |= (io_sq->phase << |
| @@ -377,15 +504,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, |
| /* set the last desc indicator */ |
| desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; |
| |
| - ena_com_copy_curr_sq_desc_to_dev(io_sq); |
| - |
| - ena_com_sq_update_tail(io_sq); |
| + rc = ena_com_sq_update_tail(io_sq); |
| + if (unlikely(rc)) |
| + return rc; |
| |
| - total_desc = max_t(u16, num_bufs, 1); |
| - total_desc += have_meta ? 1 : 0; |
| + rc = ena_com_close_bounce_buffer(io_sq); |
| |
| - *nb_hw_desc = total_desc; |
| - return 0; |
| + *nb_hw_desc = io_sq->tail - start_tail; |
| + return rc; |
| } |
| |
| int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, |
| @@ -444,15 +570,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, |
| |
| WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); |
| |
| - if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) |
| + if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) |
| return -ENOSPC; |
| |
| desc = get_sq_desc(io_sq); |
| + if (unlikely(!desc)) |
| + return -EFAULT; |
| + |
| memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); |
| |
| desc->length = ena_buf->len; |
| |
| - desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; |
| + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; |
| desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; |
| desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; |
| desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; |
| @@ -463,9 +592,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, |
| desc->buff_addr_hi = |
| ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); |
| |
| - ena_com_sq_update_tail(io_sq); |
| - |
| - return 0; |
| + return ena_com_sq_update_tail(io_sq); |
| } |
| |
| bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h |
| index 4930324e9d8d..bcc84072367d 100644 |
| |
| |
| @@ -94,7 +94,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, |
| writel(intr_reg->intr_control, io_cq->unmask_reg); |
| } |
| |
| -static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) |
| +static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) |
| { |
| u16 tail, next_to_comp, cnt; |
| |
| @@ -105,11 +105,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) |
| return io_sq->q_depth - 1 - cnt; |
| } |
| |
| -static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) |
| +/* Check if the submission queue has enough space to hold required_buffers */ |
| +static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, |
| + u16 required_buffers) |
| { |
| - u16 tail; |
| + int temp; |
| |
| - tail = io_sq->tail; |
| + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
| + return ena_com_free_desc(io_sq) >= required_buffers; |
| + |
| + /* This calculation doesn't need to be 100% accurate. So to reduce |
| + * the calculation overhead just Subtract 2 lines from the free descs |
| + * (one for the header line and one to compensate the devision |
| + * down calculation. |
| + */ |
| + temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; |
| + |
| + return ena_com_free_desc(io_sq) > temp; |
| +} |
| + |
| +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) |
| +{ |
| + u16 tail = io_sq->tail; |
| |
| pr_debug("write submission queue doorbell for queue: %d tail: %d\n", |
| io_sq->qid, tail); |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index 789556960b8e..e732bd2ddd32 100644 |
| |
| |
| @@ -804,12 +804,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) |
| */ |
| smp_mb(); |
| |
| - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > |
| - ENA_TX_WAKEUP_THRESH; |
| + above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
| + ENA_TX_WAKEUP_THRESH); |
| if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { |
| __netif_tx_lock(txq, smp_processor_id()); |
| - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > |
| - ENA_TX_WAKEUP_THRESH; |
| + above_thresh = |
| + ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
| + ENA_TX_WAKEUP_THRESH); |
| if (netif_tx_queue_stopped(txq) && above_thresh) { |
| netif_tx_wake_queue(txq); |
| u64_stats_update_begin(&tx_ring->syncp); |
| @@ -1101,7 +1102,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, |
| |
| rx_ring->next_to_clean = next_to_clean; |
| |
| - refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); |
| + refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); |
| refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; |
| |
| /* Optimization, try to batch new rx buffers */ |
| @@ -2115,8 +2116,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| * to sgl_size + 2. one for the meta descriptor and one for header |
| * (if the header is larger than tx_max_header_size). |
| */ |
| - if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < |
| - (tx_ring->sgl_size + 2))) { |
| + if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
| + tx_ring->sgl_size + 2))) { |
| netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", |
| __func__, qid); |
| |
| @@ -2135,8 +2136,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| */ |
| smp_mb(); |
| |
| - if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) |
| - > ENA_TX_WAKEUP_THRESH) { |
| + if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
| + ENA_TX_WAKEUP_THRESH)) { |
| netif_tx_wake_queue(txq); |
| u64_stats_update_begin(&tx_ring->syncp); |
| tx_ring->tx_stats.queue_wakeup++; |
| @@ -2813,7 +2814,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) |
| rx_ring = &adapter->rx_ring[i]; |
| |
| refill_required = |
| - ena_com_sq_empty_space(rx_ring->ena_com_io_sq); |
| + ena_com_free_desc(rx_ring->ena_com_io_sq); |
| if (unlikely(refill_required == (rx_ring->ring_size - 1))) { |
| rx_ring->empty_rx_queue++; |
| |
| -- |
| 2.19.1 |
| |
| From e4729991ed2e7e26e4b061369d7dee054ca4710f Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:20 +0300 |
| Subject: [PATCH 06/16] net: ena: add functions for handling Low Latency Queues |
| in ena_netdev |
| |
| This patch includes all code changes necessary in ena_netdev to enable |
| packet sending via the LLQ placemnt mode. |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 + |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 387 +++++++++++------- |
| drivers/net/ethernet/amazon/ena/ena_netdev.h | 6 + |
| 3 files changed, 251 insertions(+), 143 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c |
| index 521607bc4393..fd28bd0d1c1e 100644 |
| |
| |
| @@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { |
| ENA_STAT_TX_ENTRY(doorbells), |
| ENA_STAT_TX_ENTRY(prepare_ctx_err), |
| ENA_STAT_TX_ENTRY(bad_req_id), |
| + ENA_STAT_TX_ENTRY(llq_buffer_copy), |
| ENA_STAT_TX_ENTRY(missed_tx), |
| }; |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index e732bd2ddd32..fcdfaf0ab8a7 100644 |
| |
| |
| @@ -237,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) |
| } |
| } |
| |
| + size = tx_ring->tx_max_header_size; |
| + tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); |
| + if (!tx_ring->push_buf_intermediate_buf) { |
| + tx_ring->push_buf_intermediate_buf = vzalloc(size); |
| + if (!tx_ring->push_buf_intermediate_buf) { |
| + vfree(tx_ring->tx_buffer_info); |
| + vfree(tx_ring->free_tx_ids); |
| + return -ENOMEM; |
| + } |
| + } |
| + |
| /* Req id ring for TX out of order completions */ |
| for (i = 0; i < tx_ring->ring_size; i++) |
| tx_ring->free_tx_ids[i] = i; |
| @@ -265,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) |
| |
| vfree(tx_ring->free_tx_ids); |
| tx_ring->free_tx_ids = NULL; |
| + |
| + vfree(tx_ring->push_buf_intermediate_buf); |
| + tx_ring->push_buf_intermediate_buf = NULL; |
| } |
| |
| /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues |
| @@ -602,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) |
| ena_free_rx_bufs(adapter, i); |
| } |
| |
| +static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring, |
| + struct ena_tx_buffer *tx_info) |
| +{ |
| + struct ena_com_buf *ena_buf; |
| + u32 cnt; |
| + int i; |
| + |
| + ena_buf = tx_info->bufs; |
| + cnt = tx_info->num_of_bufs; |
| + |
| + if (unlikely(!cnt)) |
| + return; |
| + |
| + if (tx_info->map_linear_data) { |
| + dma_unmap_single(tx_ring->dev, |
| + dma_unmap_addr(ena_buf, paddr), |
| + dma_unmap_len(ena_buf, len), |
| + DMA_TO_DEVICE); |
| + ena_buf++; |
| + cnt--; |
| + } |
| + |
| + /* unmap remaining mapped pages */ |
| + for (i = 0; i < cnt; i++) { |
| + dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), |
| + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); |
| + ena_buf++; |
| + } |
| +} |
| + |
| /* ena_free_tx_bufs - Free Tx Buffers per Queue |
| * @tx_ring: TX ring for which buffers be freed |
| */ |
| @@ -612,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) |
| |
| for (i = 0; i < tx_ring->ring_size; i++) { |
| struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; |
| - struct ena_com_buf *ena_buf; |
| - int nr_frags; |
| - int j; |
| |
| if (!tx_info->skb) |
| continue; |
| @@ -630,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) |
| tx_ring->qid, i); |
| } |
| |
| - ena_buf = tx_info->bufs; |
| - dma_unmap_single(tx_ring->dev, |
| - ena_buf->paddr, |
| - ena_buf->len, |
| - DMA_TO_DEVICE); |
| - |
| - /* unmap remaining mapped pages */ |
| - nr_frags = tx_info->num_of_bufs - 1; |
| - for (j = 0; j < nr_frags; j++) { |
| - ena_buf++; |
| - dma_unmap_page(tx_ring->dev, |
| - ena_buf->paddr, |
| - ena_buf->len, |
| - DMA_TO_DEVICE); |
| - } |
| + ena_unmap_tx_skb(tx_ring, tx_info); |
| |
| dev_kfree_skb_any(tx_info->skb); |
| } |
| @@ -735,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) |
| while (tx_pkts < budget) { |
| struct ena_tx_buffer *tx_info; |
| struct sk_buff *skb; |
| - struct ena_com_buf *ena_buf; |
| - int i, nr_frags; |
| |
| rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, |
| &req_id); |
| @@ -756,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) |
| tx_info->skb = NULL; |
| tx_info->last_jiffies = 0; |
| |
| - if (likely(tx_info->num_of_bufs != 0)) { |
| - ena_buf = tx_info->bufs; |
| - |
| - dma_unmap_single(tx_ring->dev, |
| - dma_unmap_addr(ena_buf, paddr), |
| - dma_unmap_len(ena_buf, len), |
| - DMA_TO_DEVICE); |
| - |
| - /* unmap remaining mapped pages */ |
| - nr_frags = tx_info->num_of_bufs - 1; |
| - for (i = 0; i < nr_frags; i++) { |
| - ena_buf++; |
| - dma_unmap_page(tx_ring->dev, |
| - dma_unmap_addr(ena_buf, paddr), |
| - dma_unmap_len(ena_buf, len), |
| - DMA_TO_DEVICE); |
| - } |
| - } |
| + ena_unmap_tx_skb(tx_ring, tx_info); |
| |
| netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, |
| "tx_poll: q %d skb %p completed\n", tx_ring->qid, |
| @@ -1300,7 +1308,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) |
| |
| /* Reserved the max msix vectors we might need */ |
| msix_vecs = ENA_MAX_MSIX_VEC(num_queues); |
| - |
| netif_dbg(adapter, probe, adapter->netdev, |
| "trying to enable MSI-X, vectors %d\n", msix_vecs); |
| |
| @@ -1591,7 +1598,7 @@ static int ena_up_complete(struct ena_adapter *adapter) |
| |
| static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) |
| { |
| - struct ena_com_create_io_ctx ctx = { 0 }; |
| + struct ena_com_create_io_ctx ctx; |
| struct ena_com_dev *ena_dev; |
| struct ena_ring *tx_ring; |
| u32 msix_vector; |
| @@ -1604,6 +1611,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) |
| msix_vector = ENA_IO_IRQ_IDX(qid); |
| ena_qid = ENA_IO_TXQ_IDX(qid); |
| |
| + memset(&ctx, 0x0, sizeof(ctx)); |
| + |
| ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; |
| ctx.qid = ena_qid; |
| ctx.mem_queue_type = ena_dev->tx_mem_queue_type; |
| @@ -1657,7 +1666,7 @@ static int ena_create_all_io_tx_queues(struct ena_adapter *adapter) |
| static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) |
| { |
| struct ena_com_dev *ena_dev; |
| - struct ena_com_create_io_ctx ctx = { 0 }; |
| + struct ena_com_create_io_ctx ctx; |
| struct ena_ring *rx_ring; |
| u32 msix_vector; |
| u16 ena_qid; |
| @@ -1669,6 +1678,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) |
| msix_vector = ENA_IO_IRQ_IDX(qid); |
| ena_qid = ENA_IO_RXQ_IDX(qid); |
| |
| + memset(&ctx, 0x0, sizeof(ctx)); |
| + |
| ctx.qid = ena_qid; |
| ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; |
| ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| @@ -1986,73 +1997,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, |
| return rc; |
| } |
| |
| -/* Called with netif_tx_lock. */ |
| -static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| +static int ena_tx_map_skb(struct ena_ring *tx_ring, |
| + struct ena_tx_buffer *tx_info, |
| + struct sk_buff *skb, |
| + void **push_hdr, |
| + u16 *header_len) |
| { |
| - struct ena_adapter *adapter = netdev_priv(dev); |
| - struct ena_tx_buffer *tx_info; |
| - struct ena_com_tx_ctx ena_tx_ctx; |
| - struct ena_ring *tx_ring; |
| - struct netdev_queue *txq; |
| + struct ena_adapter *adapter = tx_ring->adapter; |
| struct ena_com_buf *ena_buf; |
| - void *push_hdr; |
| - u32 len, last_frag; |
| - u16 next_to_use; |
| - u16 req_id; |
| - u16 push_len; |
| - u16 header_len; |
| dma_addr_t dma; |
| - int qid, rc, nb_hw_desc; |
| - int i = -1; |
| - |
| - netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); |
| - /* Determine which tx ring we will be placed on */ |
| - qid = skb_get_queue_mapping(skb); |
| - tx_ring = &adapter->tx_ring[qid]; |
| - txq = netdev_get_tx_queue(dev, qid); |
| - |
| - rc = ena_check_and_linearize_skb(tx_ring, skb); |
| - if (unlikely(rc)) |
| - goto error_drop_packet; |
| - |
| - skb_tx_timestamp(skb); |
| - len = skb_headlen(skb); |
| + u32 skb_head_len, frag_len, last_frag; |
| + u16 push_len = 0; |
| + u16 delta = 0; |
| + int i = 0; |
| |
| - next_to_use = tx_ring->next_to_use; |
| - req_id = tx_ring->free_tx_ids[next_to_use]; |
| - tx_info = &tx_ring->tx_buffer_info[req_id]; |
| - tx_info->num_of_bufs = 0; |
| - |
| - WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); |
| - ena_buf = tx_info->bufs; |
| + skb_head_len = skb_headlen(skb); |
| tx_info->skb = skb; |
| + ena_buf = tx_info->bufs; |
| |
| if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
| - /* prepared the push buffer */ |
| - push_len = min_t(u32, len, tx_ring->tx_max_header_size); |
| - header_len = push_len; |
| - push_hdr = skb->data; |
| + /* When the device is LLQ mode, the driver will copy |
| + * the header into the device memory space. |
| + * the ena_com layer assume the header is in a linear |
| + * memory space. |
| + * This assumption might be wrong since part of the header |
| + * can be in the fragmented buffers. |
| + * Use skb_header_pointer to make sure the header is in a |
| + * linear memory space. |
| + */ |
| + |
| + push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); |
| + *push_hdr = skb_header_pointer(skb, 0, push_len, |
| + tx_ring->push_buf_intermediate_buf); |
| + *header_len = push_len; |
| + if (unlikely(skb->data != *push_hdr)) { |
| + u64_stats_update_begin(&tx_ring->syncp); |
| + tx_ring->tx_stats.llq_buffer_copy++; |
| + u64_stats_update_end(&tx_ring->syncp); |
| + |
| + delta = push_len - skb_head_len; |
| + } |
| } else { |
| - push_len = 0; |
| - header_len = min_t(u32, len, tx_ring->tx_max_header_size); |
| - push_hdr = NULL; |
| + *push_hdr = NULL; |
| + *header_len = min_t(u32, skb_head_len, |
| + tx_ring->tx_max_header_size); |
| } |
| |
| - netif_dbg(adapter, tx_queued, dev, |
| + netif_dbg(adapter, tx_queued, adapter->netdev, |
| "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, |
| - push_hdr, push_len); |
| + *push_hdr, push_len); |
| |
| - if (len > push_len) { |
| + if (skb_head_len > push_len) { |
| dma = dma_map_single(tx_ring->dev, skb->data + push_len, |
| - len - push_len, DMA_TO_DEVICE); |
| - if (dma_mapping_error(tx_ring->dev, dma)) |
| + skb_head_len - push_len, DMA_TO_DEVICE); |
| + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) |
| goto error_report_dma_error; |
| |
| ena_buf->paddr = dma; |
| - ena_buf->len = len - push_len; |
| + ena_buf->len = skb_head_len - push_len; |
| |
| ena_buf++; |
| tx_info->num_of_bufs++; |
| + tx_info->map_linear_data = 1; |
| + } else { |
| + tx_info->map_linear_data = 0; |
| } |
| |
| last_frag = skb_shinfo(skb)->nr_frags; |
| @@ -2060,18 +2068,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| for (i = 0; i < last_frag; i++) { |
| const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| |
| - len = skb_frag_size(frag); |
| - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, |
| - DMA_TO_DEVICE); |
| - if (dma_mapping_error(tx_ring->dev, dma)) |
| + frag_len = skb_frag_size(frag); |
| + |
| + if (unlikely(delta >= frag_len)) { |
| + delta -= frag_len; |
| + continue; |
| + } |
| + |
| + dma = skb_frag_dma_map(tx_ring->dev, frag, delta, |
| + frag_len - delta, DMA_TO_DEVICE); |
| + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) |
| goto error_report_dma_error; |
| |
| ena_buf->paddr = dma; |
| - ena_buf->len = len; |
| + ena_buf->len = frag_len - delta; |
| ena_buf++; |
| + tx_info->num_of_bufs++; |
| + delta = 0; |
| } |
| |
| - tx_info->num_of_bufs += last_frag; |
| + return 0; |
| + |
| +error_report_dma_error: |
| + u64_stats_update_begin(&tx_ring->syncp); |
| + tx_ring->tx_stats.dma_mapping_err++; |
| + u64_stats_update_end(&tx_ring->syncp); |
| + netdev_warn(adapter->netdev, "failed to map skb\n"); |
| + |
| + tx_info->skb = NULL; |
| + |
| + tx_info->num_of_bufs += i; |
| + ena_unmap_tx_skb(tx_ring, tx_info); |
| + |
| + return -EINVAL; |
| +} |
| + |
| +/* Called with netif_tx_lock. */ |
| +static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| +{ |
| + struct ena_adapter *adapter = netdev_priv(dev); |
| + struct ena_tx_buffer *tx_info; |
| + struct ena_com_tx_ctx ena_tx_ctx; |
| + struct ena_ring *tx_ring; |
| + struct netdev_queue *txq; |
| + void *push_hdr; |
| + u16 next_to_use, req_id, header_len; |
| + int qid, rc, nb_hw_desc; |
| + |
| + netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); |
| + /* Determine which tx ring we will be placed on */ |
| + qid = skb_get_queue_mapping(skb); |
| + tx_ring = &adapter->tx_ring[qid]; |
| + txq = netdev_get_tx_queue(dev, qid); |
| + |
| + rc = ena_check_and_linearize_skb(tx_ring, skb); |
| + if (unlikely(rc)) |
| + goto error_drop_packet; |
| + |
| + skb_tx_timestamp(skb); |
| + |
| + next_to_use = tx_ring->next_to_use; |
| + req_id = tx_ring->free_tx_ids[next_to_use]; |
| + tx_info = &tx_ring->tx_buffer_info[req_id]; |
| + tx_info->num_of_bufs = 0; |
| + |
| + WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); |
| + |
| + rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); |
| + if (unlikely(rc)) |
| + goto error_drop_packet; |
| |
| memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); |
| ena_tx_ctx.ena_bufs = tx_info->bufs; |
| @@ -2087,14 +2152,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, |
| &nb_hw_desc); |
| |
| + /* ena_com_prepare_tx() can't fail due to overflow of tx queue, |
| + * since the number of free descriptors in the queue is checked |
| + * after sending the previous packet. In case there isn't enough |
| + * space in the queue for the next packet, it is stopped |
| + * until there is again enough available space in the queue. |
| + * All other failure reasons of ena_com_prepare_tx() are fatal |
| + * and therefore require a device reset. |
| + */ |
| if (unlikely(rc)) { |
| netif_err(adapter, tx_queued, dev, |
| "failed to prepare tx bufs\n"); |
| u64_stats_update_begin(&tx_ring->syncp); |
| - tx_ring->tx_stats.queue_stop++; |
| tx_ring->tx_stats.prepare_ctx_err++; |
| u64_stats_update_end(&tx_ring->syncp); |
| - netif_tx_stop_queue(txq); |
| + adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; |
| + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
| goto error_unmap_dma; |
| } |
| |
| @@ -2157,35 +2230,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| |
| return NETDEV_TX_OK; |
| |
| -error_report_dma_error: |
| - u64_stats_update_begin(&tx_ring->syncp); |
| - tx_ring->tx_stats.dma_mapping_err++; |
| - u64_stats_update_end(&tx_ring->syncp); |
| - netdev_warn(adapter->netdev, "failed to map skb\n"); |
| - |
| - tx_info->skb = NULL; |
| - |
| error_unmap_dma: |
| - if (i >= 0) { |
| - /* save value of frag that failed */ |
| - last_frag = i; |
| - |
| - /* start back at beginning and unmap skb */ |
| - tx_info->skb = NULL; |
| - ena_buf = tx_info->bufs; |
| - dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), |
| - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); |
| - |
| - /* unmap remaining mapped pages */ |
| - for (i = 0; i < last_frag; i++) { |
| - ena_buf++; |
| - dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), |
| - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); |
| - } |
| - } |
| + ena_unmap_tx_skb(tx_ring, tx_info); |
| + tx_info->skb = NULL; |
| |
| error_drop_packet: |
| - |
| dev_kfree_skb(skb); |
| return NETDEV_TX_OK; |
| } |
| @@ -2621,7 +2670,9 @@ static int ena_restore_device(struct ena_adapter *adapter) |
| netif_carrier_on(adapter->netdev); |
| |
| mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
| - dev_err(&pdev->dev, "Device reset completed successfully\n"); |
| + dev_err(&pdev->dev, |
| + "Device reset completed successfully, Driver info: %s\n", |
| + version); |
| |
| return rc; |
| err_disable_msix: |
| @@ -2988,18 +3039,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, |
| return io_queue_num; |
| } |
| |
| -static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, |
| - struct ena_com_dev_get_features_ctx *get_feat_ctx) |
| +static int ena_set_queues_placement_policy(struct pci_dev *pdev, |
| + struct ena_com_dev *ena_dev, |
| + struct ena_admin_feature_llq_desc *llq, |
| + struct ena_llq_configurations *llq_default_configurations) |
| { |
| bool has_mem_bar; |
| + int rc; |
| + u32 llq_feature_mask; |
| + |
| + llq_feature_mask = 1 << ENA_ADMIN_LLQ; |
| + if (!(ena_dev->supported_features & llq_feature_mask)) { |
| + dev_err(&pdev->dev, |
| + "LLQ is not supported Fallback to host mode policy.\n"); |
| + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| + return 0; |
| + } |
| |
| has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); |
| |
| - /* Enable push mode if device supports LLQ */ |
| - if (has_mem_bar && get_feat_ctx->max_queues.max_legacy_llq_num > 0) |
| - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; |
| - else |
| + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); |
| + if (unlikely(rc)) { |
| + dev_err(&pdev->dev, |
| + "Failed to configure the device mode. Fallback to host mode policy.\n"); |
| + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| + return 0; |
| + } |
| + |
| + /* Nothing to config, exit */ |
| + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
| + return 0; |
| + |
| + if (!has_mem_bar) { |
| + dev_err(&pdev->dev, |
| + "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); |
| ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| + return 0; |
| + } |
| + |
| + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, |
| + pci_resource_start(pdev, ENA_MEM_BAR), |
| + pci_resource_len(pdev, ENA_MEM_BAR)); |
| + |
| + if (!ena_dev->mem_bar) |
| + return -EFAULT; |
| + |
| + return 0; |
| } |
| |
| static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, |
| @@ -3117,6 +3202,15 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) |
| pci_release_selected_regions(pdev, release_bars); |
| } |
| |
| +static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) |
| +{ |
| + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; |
| + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; |
| + llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; |
| + llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; |
| + llq_config->llq_ring_entry_size_value = 128; |
| +} |
| + |
| static int ena_calc_queue_size(struct pci_dev *pdev, |
| struct ena_com_dev *ena_dev, |
| u16 *max_tx_sgl_size, |
| @@ -3165,7 +3259,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| static int version_printed; |
| struct net_device *netdev; |
| struct ena_adapter *adapter; |
| + struct ena_llq_configurations llq_config; |
| struct ena_com_dev *ena_dev = NULL; |
| + char *queue_type_str; |
| static int adapters_found; |
| int io_queue_num, bars, rc; |
| int queue_size; |
| @@ -3219,16 +3315,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| goto err_free_region; |
| } |
| |
| - ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); |
| + set_default_llq_configurations(&llq_config); |
| |
| - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
| - ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, |
| - pci_resource_start(pdev, ENA_MEM_BAR), |
| - pci_resource_len(pdev, ENA_MEM_BAR)); |
| - if (!ena_dev->mem_bar) { |
| - rc = -EFAULT; |
| - goto err_device_destroy; |
| - } |
| + rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, |
| + &llq_config); |
| + if (rc) { |
| + dev_err(&pdev->dev, "ena device init failed\n"); |
| + goto err_device_destroy; |
| } |
| |
| /* initial Tx interrupt delay, Assumes 1 usec granularity. |
| @@ -3243,8 +3336,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| goto err_device_destroy; |
| } |
| |
| - dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", |
| - io_queue_num, queue_size); |
| + dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n", |
| + io_queue_num, queue_size, |
| + (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ? |
| + "ENABLED" : "DISABLED"); |
| |
| /* dev zeroed in init_etherdev */ |
| netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); |
| @@ -3334,9 +3429,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| timer_setup(&adapter->timer_service, ena_timer_service, 0); |
| mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
| |
| - dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", |
| + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
| + queue_type_str = "Regular"; |
| + else |
| + queue_type_str = "Low Latency"; |
| + |
| + dev_info(&pdev->dev, |
| + "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n", |
| DEVICE_NAME, (long)pci_resource_start(pdev, 0), |
| - netdev->dev_addr, io_queue_num); |
| + netdev->dev_addr, io_queue_num, queue_type_str); |
| |
| set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h |
| index 7c7ae56c52cf..4fa7d2fda475 100644 |
| |
| |
| @@ -151,6 +151,9 @@ struct ena_tx_buffer { |
| /* num of buffers used by this skb */ |
| u32 num_of_bufs; |
| |
| + /* Indicate if bufs[0] map the linear data of the skb. */ |
| + u8 map_linear_data; |
| + |
| /* Used for detect missing tx packets to limit the number of prints */ |
| u32 print_once; |
| /* Save the last jiffies to detect missing tx packets |
| @@ -186,6 +189,7 @@ struct ena_stats_tx { |
| u64 tx_poll; |
| u64 doorbells; |
| u64 bad_req_id; |
| + u64 llq_buffer_copy; |
| u64 missed_tx; |
| }; |
| |
| @@ -257,6 +261,8 @@ struct ena_ring { |
| struct ena_stats_tx tx_stats; |
| struct ena_stats_rx rx_stats; |
| }; |
| + |
| + u8 *push_buf_intermediate_buf; |
| int empty_rx_queue; |
| } ____cacheline_aligned; |
| |
| -- |
| 2.19.1 |
| |
| From 910193b2d74a25d62354ab63115cd9a28ece1add Mon Sep 17 00:00:00 2001 |
| From: Peter Robinson <pbrobinson@gmail.com> |
| Date: Fri, 23 Nov 2018 12:33:42 +0000 |
| Subject: [PATCH 07/16] net: ena: use CSUM_CHECKED device indication to report |
| skb's checksum status |
| |
| Set skb->ip_summed to the correct value as reported by the device. |
| Add counter for the case where rx csum offload is enabled but |
| device didn't check it. |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| Signed-off-by: Peter Robinson <pbrobinson@gmail.com> |
| |
| drivers/net/ethernet/amazon/ena/ena_eth_com.c | 3 +++ |
| drivers/net/ethernet/amazon/ena/ena_eth_com.h | 1 + |
| drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h | 10 ++++++++-- |
| drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 + |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 13 ++++++++++++- |
| drivers/net/ethernet/amazon/ena/ena_netdev.h | 1 + |
| 6 files changed, 26 insertions(+), 3 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c |
| index 17107ca107e3..f6c2d3855be8 100644 |
| |
| |
| @@ -354,6 +354,9 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, |
| ena_rx_ctx->l4_csum_err = |
| !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> |
| ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); |
| + ena_rx_ctx->l4_csum_checked = |
| + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> |
| + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); |
| ena_rx_ctx->hash = cdesc->hash; |
| ena_rx_ctx->frag = |
| (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h |
| index bcc84072367d..340d02b64ca6 100644 |
| |
| |
| @@ -67,6 +67,7 @@ struct ena_com_rx_ctx { |
| enum ena_eth_io_l4_proto_index l4_proto; |
| bool l3_csum_err; |
| bool l4_csum_err; |
| + u8 l4_csum_checked; |
| /* fragmented packet */ |
| bool frag; |
| u32 hash; |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h |
| index f320c58793a5..4c5ccaa13c42 100644 |
| |
| |
| @@ -242,9 +242,13 @@ struct ena_eth_io_rx_cdesc_base { |
| * checksum error detected, or, the controller didn't |
| * validate the checksum. This bit is valid only when |
| * l4_proto_idx indicates TCP/UDP packet, and, |
| - * ipv4_frag is not set |
| + * ipv4_frag is not set. This bit is valid only when |
| + * l4_csum_checked below is set. |
| * 15 : ipv4_frag - Indicates IPv4 fragmented packet |
| - * 23:16 : reserved16 |
| + * 16 : l4_csum_checked - L4 checksum was verified |
| + * (could be OK or error), when cleared the status of |
| + * checksum is unknown |
| + * 23:17 : reserved17 - MBZ |
| * 24 : phase |
| * 25 : l3_csum2 - second checksum engine result |
| * 26 : first - Indicates first descriptor in |
| @@ -390,6 +394,8 @@ struct ena_eth_io_numa_node_cfg_reg { |
| #define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) |
| #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 |
| #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16) |
| #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 |
| #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) |
| #define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c |
| index fd28bd0d1c1e..f3a5a384e6e8 100644 |
| |
| |
| @@ -97,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { |
| ENA_STAT_RX_ENTRY(rx_copybreak_pkt), |
| ENA_STAT_RX_ENTRY(bad_req_id), |
| ENA_STAT_RX_ENTRY(empty_rx_ring), |
| + ENA_STAT_RX_ENTRY(csum_unchecked), |
| }; |
| |
| static const struct ena_stats ena_stats_ena_com_strings[] = { |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index fcdfaf0ab8a7..35b0ce5db24b 100644 |
| |
| |
| @@ -994,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, |
| return; |
| } |
| |
| - skb->ip_summed = CHECKSUM_UNNECESSARY; |
| + if (likely(ena_rx_ctx->l4_csum_checked)) { |
| + skb->ip_summed = CHECKSUM_UNNECESSARY; |
| + } else { |
| + u64_stats_update_begin(&rx_ring->syncp); |
| + rx_ring->rx_stats.csum_unchecked++; |
| + u64_stats_update_end(&rx_ring->syncp); |
| + skb->ip_summed = CHECKSUM_NONE; |
| + } |
| + } else { |
| + skb->ip_summed = CHECKSUM_NONE; |
| + return; |
| } |
| + |
| } |
| |
| static void ena_set_rx_hash(struct ena_ring *rx_ring, |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h |
| index 4fa7d2fda475..2d62e2c7fed7 100644 |
| |
| |
| @@ -205,6 +205,7 @@ struct ena_stats_rx { |
| u64 rx_copybreak_pkt; |
| u64 bad_req_id; |
| u64 empty_rx_ring; |
| + u64 csum_unchecked; |
| }; |
| |
| struct ena_ring { |
| -- |
| 2.19.1 |
| |
| From 20ba28b24e1d861ee3d8757fcadee46f742f29c5 Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:22 +0300 |
| Subject: [PATCH 08/16] net: ena: explicit casting and initialization, and |
| clearer error handling |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_com.c | 39 ++++++++++++-------- |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 +-- |
| drivers/net/ethernet/amazon/ena/ena_netdev.h | 22 +++++------ |
| 3 files changed, 36 insertions(+), 30 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c |
| index 5220c7578d6b..5c468b28723b 100644 |
| |
| |
| @@ -235,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu |
| tail_masked = admin_queue->sq.tail & queue_size_mask; |
| |
| /* In case of queue FULL */ |
| - cnt = atomic_read(&admin_queue->outstanding_cmds); |
| + cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); |
| if (cnt >= admin_queue->q_depth) { |
| pr_debug("admin queue is full.\n"); |
| admin_queue->stats.out_of_space++; |
| @@ -304,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue |
| struct ena_admin_acq_entry *comp, |
| size_t comp_size_in_bytes) |
| { |
| - unsigned long flags; |
| + unsigned long flags = 0; |
| struct ena_comp_ctx *comp_ctx; |
| |
| spin_lock_irqsave(&admin_queue->q_lock, flags); |
| @@ -332,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, |
| |
| memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); |
| |
| - io_sq->dma_addr_bits = ena_dev->dma_addr_bits; |
| + io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; |
| io_sq->desc_entry_size = |
| (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? |
| sizeof(struct ena_eth_io_tx_desc) : |
| @@ -486,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu |
| |
| /* Go over all the completions */ |
| while ((READ_ONCE(cqe->acq_common_descriptor.flags) & |
| - ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { |
| + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { |
| /* Do not read the rest of the completion entry before the |
| * phase bit was validated |
| */ |
| @@ -537,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status) |
| static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, |
| struct ena_com_admin_queue *admin_queue) |
| { |
| - unsigned long flags, timeout; |
| + unsigned long flags = 0; |
| + unsigned long timeout; |
| int ret; |
| |
| timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); |
| @@ -736,7 +737,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, |
| static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, |
| struct ena_com_admin_queue *admin_queue) |
| { |
| - unsigned long flags; |
| + unsigned long flags = 0; |
| int ret; |
| |
| wait_for_completion_timeout(&comp_ctx->wait_event, |
| @@ -782,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) |
| volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = |
| mmio_read->read_resp; |
| u32 mmio_read_reg, ret, i; |
| - unsigned long flags; |
| + unsigned long flags = 0; |
| u32 timeout = mmio_read->reg_read_to; |
| |
| might_sleep(); |
| @@ -1426,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) |
| void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) |
| { |
| struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
| - unsigned long flags; |
| + unsigned long flags = 0; |
| |
| spin_lock_irqsave(&admin_queue->q_lock, flags); |
| while (atomic_read(&admin_queue->outstanding_cmds) != 0) { |
| @@ -1470,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) |
| void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) |
| { |
| struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
| - unsigned long flags; |
| + unsigned long flags = 0; |
| |
| spin_lock_irqsave(&admin_queue->q_lock, flags); |
| ena_dev->admin_queue.running_state = state; |
| @@ -1504,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) |
| } |
| |
| if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { |
| - pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", |
| + pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", |
| get_resp.u.aenq.supported_groups, groups_flag); |
| return -EOPNOTSUPP; |
| } |
| @@ -1652,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) |
| sizeof(*mmio_read->read_resp), |
| &mmio_read->read_resp_dma_addr, GFP_KERNEL); |
| if (unlikely(!mmio_read->read_resp)) |
| - return -ENOMEM; |
| + goto err; |
| |
| ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); |
| |
| @@ -1661,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) |
| mmio_read->readless_supported = true; |
| |
| return 0; |
| + |
| +err: |
| + |
| + return -ENOMEM; |
| } |
| |
| void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) |
| @@ -1961,6 +1966,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) |
| struct ena_admin_aenq_entry *aenq_e; |
| struct ena_admin_aenq_common_desc *aenq_common; |
| struct ena_com_aenq *aenq = &dev->aenq; |
| + unsigned long long timestamp; |
| ena_aenq_handler handler_cb; |
| u16 masked_head, processed = 0; |
| u8 phase; |
| @@ -1978,10 +1984,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) |
| */ |
| dma_rmb(); |
| |
| + timestamp = |
| + (unsigned long long)aenq_common->timestamp_low | |
| + ((unsigned long long)aenq_common->timestamp_high << 32); |
| pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", |
| - aenq_common->group, aenq_common->syndrom, |
| - (u64)aenq_common->timestamp_low + |
| - ((u64)aenq_common->timestamp_high << 32)); |
| + aenq_common->group, aenq_common->syndrom, timestamp); |
| |
| /* Handle specific event*/ |
| handler_cb = ena_com_get_specific_aenq_cb(dev, |
| @@ -2623,8 +2630,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) |
| if (unlikely(!host_attr->host_info)) |
| return -ENOMEM; |
| |
| - host_attr->host_info->ena_spec_version = |
| - ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | |
| + host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << |
| + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | |
| (ENA_COMMON_SPEC_VERSION_MINOR)); |
| |
| return 0; |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index 35b0ce5db24b..e345220b4d9a 100644 |
| |
| |
| @@ -2604,15 +2604,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) |
| |
| dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
| adapter->dev_up_before_reset = dev_up; |
| - |
| if (!graceful) |
| ena_com_set_admin_running_state(ena_dev, false); |
| |
| if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
| ena_down(adapter); |
| |
| - /* Before releasing the ENA resources, a device reset is required. |
| - * (to prevent the device from accessing them). |
| + /* Stop the device from sending AENQ events (in case reset flag is set |
| + * and device is up, ena_close already reset the device |
| * In case the reset flag is set and the device is up, ena_down() |
| * already perform the reset, so it can be skipped. |
| */ |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h |
| index 2d62e2c7fed7..a16baf0124d5 100644 |
| |
| |
| @@ -61,6 +61,17 @@ |
| #define ENA_ADMIN_MSIX_VEC 1 |
| #define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues)) |
| |
| +/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the |
| + * driver passes 0. |
| + * Since the max packet size the ENA handles is ~9kB limit the buffer length to |
| + * 16kB. |
| + */ |
| +#if PAGE_SIZE > SZ_16K |
| +#define ENA_PAGE_SIZE SZ_16K |
| +#else |
| +#define ENA_PAGE_SIZE PAGE_SIZE |
| +#endif |
| + |
| #define ENA_MIN_MSIX_VEC 2 |
| |
| #define ENA_REG_BAR 0 |
| @@ -362,15 +373,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); |
| |
| int ena_get_sset_count(struct net_device *netdev, int sset); |
| |
| -/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the |
| - * driver passas 0. |
| - * Since the max packet size the ENA handles is ~9kB limit the buffer length to |
| - * 16kB. |
| - */ |
| -#if PAGE_SIZE > SZ_16K |
| -#define ENA_PAGE_SIZE SZ_16K |
| -#else |
| -#define ENA_PAGE_SIZE PAGE_SIZE |
| -#endif |
| - |
| #endif /* !(ENA_H) */ |
| -- |
| 2.19.1 |
| |
| From 7198e3afd93cabf55a3700cb015f59be831ecdcc Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:23 +0300 |
| Subject: [PATCH 09/16] net: ena: limit refill Rx threshold to 256 to avoid |
| latency issues |
| |
| Currently Rx refill is done when the number of required descriptors is |
| above 1/8 queue size. With a default of 1024 entries per queue the |
| threshold is 128 descriptors. |
| There is intention to increase the queue size to 8196 entries. |
| In this case threshold of 1024 descriptors is too large and can hurt |
| latency. |
| Add another limitation to Rx threshold to be at most 256 descriptors. |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 4 +++- |
| drivers/net/ethernet/amazon/ena/ena_netdev.h | 5 +++-- |
| 2 files changed, 6 insertions(+), 3 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index e345220b4d9a..c4c33b174e17 100644 |
| |
| |
| @@ -1122,7 +1122,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, |
| rx_ring->next_to_clean = next_to_clean; |
| |
| refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); |
| - refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; |
| + refill_threshold = |
| + min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, |
| + ENA_RX_REFILL_THRESH_PACKET); |
| |
| /* Optimization, try to batch new rx buffers */ |
| if (refill_required > refill_threshold) { |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h |
| index a16baf0124d5..0cf35ae77884 100644 |
| |
| |
| @@ -106,10 +106,11 @@ |
| */ |
| #define ENA_TX_POLL_BUDGET_DIVIDER 4 |
| |
| -/* Refill Rx queue when number of available descriptors is below |
| - * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER |
| +/* Refill Rx queue when number of required descriptors is above |
| + * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET |
| */ |
| #define ENA_RX_REFILL_THRESH_DIVIDER 8 |
| +#define ENA_RX_REFILL_THRESH_PACKET 256 |
| |
| /* Number of queues to check for missing queues per timer service */ |
| #define ENA_MONITORED_TX_QUEUES 4 |
| -- |
| 2.19.1 |
| |
| From 9fa751367f9fec718f4bb014e136fa5aecfb836c Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:24 +0300 |
| Subject: [PATCH 10/16] net: ena: change rx copybreak default to reduce kernel |
| memory pressure |
| |
| Improves socket memory utilization when receiving packets larger |
| than 128 bytes (the previous rx copybreak) and smaller than 256 bytes. |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 +- |
| 1 file changed, 1 insertion(+), 1 deletion(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h |
| index 0cf35ae77884..d241dfc542ca 100644 |
| |
| |
| @@ -81,7 +81,7 @@ |
| #define ENA_DEFAULT_RING_SIZE (1024) |
| |
| #define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) |
| -#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN) |
| +#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN) |
| |
| /* limit the buffer size to 600 bytes to handle MTU changes from very |
| * small to very large, in which case the number of buffers per packet |
| -- |
| 2.19.1 |
| |
| From 3bd1427b1dc5fff10b94528a7b0d7898b67073ce Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:25 +0300 |
| Subject: [PATCH 11/16] net: ena: remove redundant parameter in |
| ena_com_admin_init() |
| |
| Remove redundant spinlock acquire parameter from ena_com_admin_init() |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_com.c | 6 ++---- |
| drivers/net/ethernet/amazon/ena/ena_com.h | 5 +---- |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +- |
| 3 files changed, 4 insertions(+), 9 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c |
| index 5c468b28723b..420cede41ca4 100644 |
| |
| |
| @@ -1701,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) |
| } |
| |
| int ena_com_admin_init(struct ena_com_dev *ena_dev, |
| - struct ena_aenq_handlers *aenq_handlers, |
| - bool init_spinlock) |
| + struct ena_aenq_handlers *aenq_handlers) |
| { |
| struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
| u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; |
| @@ -1728,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, |
| |
| atomic_set(&admin_queue->outstanding_cmds, 0); |
| |
| - if (init_spinlock) |
| - spin_lock_init(&admin_queue->q_lock); |
| + spin_lock_init(&admin_queue->q_lock); |
| |
| ret = ena_com_init_comp_ctxt(admin_queue); |
| if (ret) |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h |
| index 25af8d025919..ae8b4857fce3 100644 |
| |
| |
| @@ -436,8 +436,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); |
| /* ena_com_admin_init - Init the admin and the async queues |
| * @ena_dev: ENA communication layer struct |
| * @aenq_handlers: Those handlers to be called upon event. |
| - * @init_spinlock: Indicate if this method should init the admin spinlock or |
| - * the spinlock was init before (for example, in a case of FLR). |
| * |
| * Initialize the admin submission and completion queues. |
| * Initialize the asynchronous events notification queues. |
| @@ -445,8 +443,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); |
| * @return - 0 on success, negative value on failure. |
| */ |
| int ena_com_admin_init(struct ena_com_dev *ena_dev, |
| - struct ena_aenq_handlers *aenq_handlers, |
| - bool init_spinlock); |
| + struct ena_aenq_handlers *aenq_handlers); |
| |
| /* ena_com_admin_destroy - Destroy the admin and the async events queues. |
| * @ena_dev: ENA communication layer struct |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index c4c33b174e17..284a0a612131 100644 |
| |
| |
| @@ -2508,7 +2508,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, |
| } |
| |
| /* ENA admin level init */ |
| - rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); |
| + rc = ena_com_admin_init(ena_dev, &aenq_handlers); |
| if (rc) { |
| dev_err(dev, |
| "Can not initialize ena admin queue with device\n"); |
| -- |
| 2.19.1 |
| |
| From f9f5dc0c8ab71ed9b98761eea2995b46983131bf Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:26 +0300 |
| Subject: [PATCH 12/16] net: ena: update driver version to 2.0.1 |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_netdev.h | 6 +++--- |
| 1 file changed, 3 insertions(+), 3 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h |
| index d241dfc542ca..521873642339 100644 |
| |
| |
| @@ -43,9 +43,9 @@ |
| #include "ena_com.h" |
| #include "ena_eth_com.h" |
| |
| -#define DRV_MODULE_VER_MAJOR 1 |
| -#define DRV_MODULE_VER_MINOR 5 |
| -#define DRV_MODULE_VER_SUBMINOR 0 |
| +#define DRV_MODULE_VER_MAJOR 2 |
| +#define DRV_MODULE_VER_MINOR 0 |
| +#define DRV_MODULE_VER_SUBMINOR 1 |
| |
| #define DRV_MODULE_NAME "ena" |
| #ifndef DRV_MODULE_VERSION |
| -- |
| 2.19.1 |
| |
| From 24f2b7764070e2e4cd8a2b056854f1928918887e Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Thu, 11 Oct 2018 11:26:27 +0300 |
| Subject: [PATCH 13/16] net: ena: fix indentations in ena_defs for better |
| readability |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| .../net/ethernet/amazon/ena/ena_admin_defs.h | 334 +++++++----------- |
| .../net/ethernet/amazon/ena/ena_eth_io_defs.h | 223 ++++++------ |
| .../net/ethernet/amazon/ena/ena_regs_defs.h | 206 +++++------ |
| 3 files changed, 338 insertions(+), 425 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h |
| index b439ec1b3edb..9f80b73f90b1 100644 |
| |
| |
| @@ -32,119 +32,81 @@ |
| #ifndef _ENA_ADMIN_H_ |
| #define _ENA_ADMIN_H_ |
| |
| -enum ena_admin_aq_opcode { |
| - ENA_ADMIN_CREATE_SQ = 1, |
| - |
| - ENA_ADMIN_DESTROY_SQ = 2, |
| - |
| - ENA_ADMIN_CREATE_CQ = 3, |
| - |
| - ENA_ADMIN_DESTROY_CQ = 4, |
| - |
| - ENA_ADMIN_GET_FEATURE = 8, |
| |
| - ENA_ADMIN_SET_FEATURE = 9, |
| - |
| - ENA_ADMIN_GET_STATS = 11, |
| +enum ena_admin_aq_opcode { |
| + ENA_ADMIN_CREATE_SQ = 1, |
| + ENA_ADMIN_DESTROY_SQ = 2, |
| + ENA_ADMIN_CREATE_CQ = 3, |
| + ENA_ADMIN_DESTROY_CQ = 4, |
| + ENA_ADMIN_GET_FEATURE = 8, |
| + ENA_ADMIN_SET_FEATURE = 9, |
| + ENA_ADMIN_GET_STATS = 11, |
| }; |
| |
| enum ena_admin_aq_completion_status { |
| - ENA_ADMIN_SUCCESS = 0, |
| - |
| - ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, |
| - |
| - ENA_ADMIN_BAD_OPCODE = 2, |
| - |
| - ENA_ADMIN_UNSUPPORTED_OPCODE = 3, |
| - |
| - ENA_ADMIN_MALFORMED_REQUEST = 4, |
| - |
| + ENA_ADMIN_SUCCESS = 0, |
| + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, |
| + ENA_ADMIN_BAD_OPCODE = 2, |
| + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, |
| + ENA_ADMIN_MALFORMED_REQUEST = 4, |
| /* Additional status is provided in ACQ entry extended_status */ |
| - ENA_ADMIN_ILLEGAL_PARAMETER = 5, |
| - |
| - ENA_ADMIN_UNKNOWN_ERROR = 6, |
| - |
| - ENA_ADMIN_RESOURCE_BUSY = 7, |
| + ENA_ADMIN_ILLEGAL_PARAMETER = 5, |
| + ENA_ADMIN_UNKNOWN_ERROR = 6, |
| + ENA_ADMIN_RESOURCE_BUSY = 7, |
| }; |
| |
| enum ena_admin_aq_feature_id { |
| - ENA_ADMIN_DEVICE_ATTRIBUTES = 1, |
| - |
| - ENA_ADMIN_MAX_QUEUES_NUM = 2, |
| - |
| - ENA_ADMIN_HW_HINTS = 3, |
| - |
| - ENA_ADMIN_LLQ = 4, |
| - |
| - ENA_ADMIN_RSS_HASH_FUNCTION = 10, |
| - |
| - ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, |
| - |
| - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, |
| - |
| - ENA_ADMIN_MTU = 14, |
| - |
| - ENA_ADMIN_RSS_HASH_INPUT = 18, |
| - |
| - ENA_ADMIN_INTERRUPT_MODERATION = 20, |
| - |
| - ENA_ADMIN_AENQ_CONFIG = 26, |
| - |
| - ENA_ADMIN_LINK_CONFIG = 27, |
| - |
| - ENA_ADMIN_HOST_ATTR_CONFIG = 28, |
| - |
| - ENA_ADMIN_FEATURES_OPCODE_NUM = 32, |
| + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, |
| + ENA_ADMIN_MAX_QUEUES_NUM = 2, |
| + ENA_ADMIN_HW_HINTS = 3, |
| + ENA_ADMIN_LLQ = 4, |
| + ENA_ADMIN_RSS_HASH_FUNCTION = 10, |
| + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, |
| + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, |
| + ENA_ADMIN_MTU = 14, |
| + ENA_ADMIN_RSS_HASH_INPUT = 18, |
| + ENA_ADMIN_INTERRUPT_MODERATION = 20, |
| + ENA_ADMIN_AENQ_CONFIG = 26, |
| + ENA_ADMIN_LINK_CONFIG = 27, |
| + ENA_ADMIN_HOST_ATTR_CONFIG = 28, |
| + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, |
| }; |
| |
| enum ena_admin_placement_policy_type { |
| /* descriptors and headers are in host memory */ |
| - ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, |
| - |
| + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, |
| /* descriptors and headers are in device memory (a.k.a Low Latency |
| * Queue) |
| */ |
| - ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, |
| + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, |
| }; |
| |
| enum ena_admin_link_types { |
| - ENA_ADMIN_LINK_SPEED_1G = 0x1, |
| - |
| - ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, |
| - |
| - ENA_ADMIN_LINK_SPEED_5G = 0x4, |
| - |
| - ENA_ADMIN_LINK_SPEED_10G = 0x8, |
| - |
| - ENA_ADMIN_LINK_SPEED_25G = 0x10, |
| - |
| - ENA_ADMIN_LINK_SPEED_40G = 0x20, |
| - |
| - ENA_ADMIN_LINK_SPEED_50G = 0x40, |
| - |
| - ENA_ADMIN_LINK_SPEED_100G = 0x80, |
| - |
| - ENA_ADMIN_LINK_SPEED_200G = 0x100, |
| - |
| - ENA_ADMIN_LINK_SPEED_400G = 0x200, |
| + ENA_ADMIN_LINK_SPEED_1G = 0x1, |
| + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, |
| + ENA_ADMIN_LINK_SPEED_5G = 0x4, |
| + ENA_ADMIN_LINK_SPEED_10G = 0x8, |
| + ENA_ADMIN_LINK_SPEED_25G = 0x10, |
| + ENA_ADMIN_LINK_SPEED_40G = 0x20, |
| + ENA_ADMIN_LINK_SPEED_50G = 0x40, |
| + ENA_ADMIN_LINK_SPEED_100G = 0x80, |
| + ENA_ADMIN_LINK_SPEED_200G = 0x100, |
| + ENA_ADMIN_LINK_SPEED_400G = 0x200, |
| }; |
| |
| enum ena_admin_completion_policy_type { |
| /* completion queue entry for each sq descriptor */ |
| - ENA_ADMIN_COMPLETION_POLICY_DESC = 0, |
| - |
| + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, |
| /* completion queue entry upon request in sq descriptor */ |
| - ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, |
| - |
| + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, |
| /* current queue head pointer is updated in OS memory upon sq |
| * descriptor request |
| */ |
| - ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, |
| - |
| + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, |
| /* current queue head pointer is updated in OS memory for each sq |
| * descriptor |
| */ |
| - ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, |
| + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, |
| }; |
| |
| /* basic stats return ena_admin_basic_stats while extanded stats return a |
| @@ -152,15 +114,13 @@ enum ena_admin_completion_policy_type { |
| * device id |
| */ |
| enum ena_admin_get_stats_type { |
| - ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, |
| - |
| - ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, |
| + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, |
| + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, |
| }; |
| |
| enum ena_admin_get_stats_scope { |
| - ENA_ADMIN_SPECIFIC_QUEUE = 0, |
| - |
| - ENA_ADMIN_ETH_TRAFFIC = 1, |
| + ENA_ADMIN_SPECIFIC_QUEUE = 0, |
| + ENA_ADMIN_ETH_TRAFFIC = 1, |
| }; |
| |
| struct ena_admin_aq_common_desc { |
| @@ -231,7 +191,9 @@ struct ena_admin_acq_common_desc { |
| |
| u16 extended_status; |
| |
| - /* serves as a hint what AQ entries can be revoked */ |
| + /* indicates to the driver which AQ entry has been consumed by the |
| + * device and could be reused |
| + */ |
| u16 sq_head_indx; |
| }; |
| |
| @@ -300,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd { |
| }; |
| |
| enum ena_admin_sq_direction { |
| - ENA_ADMIN_SQ_DIRECTION_TX = 1, |
| - |
| - ENA_ADMIN_SQ_DIRECTION_RX = 2, |
| + ENA_ADMIN_SQ_DIRECTION_TX = 1, |
| + ENA_ADMIN_SQ_DIRECTION_RX = 2, |
| }; |
| |
| struct ena_admin_acq_create_sq_resp_desc { |
| @@ -664,9 +625,8 @@ struct ena_admin_feature_offload_desc { |
| }; |
| |
| enum ena_admin_hash_functions { |
| - ENA_ADMIN_TOEPLITZ = 1, |
| - |
| - ENA_ADMIN_CRC32 = 2, |
| + ENA_ADMIN_TOEPLITZ = 1, |
| + ENA_ADMIN_CRC32 = 2, |
| }; |
| |
| struct ena_admin_feature_rss_flow_hash_control { |
| @@ -692,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function { |
| |
| /* RSS flow hash protocols */ |
| enum ena_admin_flow_hash_proto { |
| - ENA_ADMIN_RSS_TCP4 = 0, |
| - |
| - ENA_ADMIN_RSS_UDP4 = 1, |
| - |
| - ENA_ADMIN_RSS_TCP6 = 2, |
| - |
| - ENA_ADMIN_RSS_UDP6 = 3, |
| - |
| - ENA_ADMIN_RSS_IP4 = 4, |
| - |
| - ENA_ADMIN_RSS_IP6 = 5, |
| - |
| - ENA_ADMIN_RSS_IP4_FRAG = 6, |
| - |
| - ENA_ADMIN_RSS_NOT_IP = 7, |
| - |
| + ENA_ADMIN_RSS_TCP4 = 0, |
| + ENA_ADMIN_RSS_UDP4 = 1, |
| + ENA_ADMIN_RSS_TCP6 = 2, |
| + ENA_ADMIN_RSS_UDP6 = 3, |
| + ENA_ADMIN_RSS_IP4 = 4, |
| + ENA_ADMIN_RSS_IP6 = 5, |
| + ENA_ADMIN_RSS_IP4_FRAG = 6, |
| + ENA_ADMIN_RSS_NOT_IP = 7, |
| /* TCPv6 with extension header */ |
| - ENA_ADMIN_RSS_TCP6_EX = 8, |
| - |
| + ENA_ADMIN_RSS_TCP6_EX = 8, |
| /* IPv6 with extension header */ |
| - ENA_ADMIN_RSS_IP6_EX = 9, |
| - |
| - ENA_ADMIN_RSS_PROTO_NUM = 16, |
| + ENA_ADMIN_RSS_IP6_EX = 9, |
| + ENA_ADMIN_RSS_PROTO_NUM = 16, |
| }; |
| |
| /* RSS flow hash fields */ |
| enum ena_admin_flow_hash_fields { |
| /* Ethernet Dest Addr */ |
| - ENA_ADMIN_RSS_L2_DA = BIT(0), |
| - |
| + ENA_ADMIN_RSS_L2_DA = BIT(0), |
| /* Ethernet Src Addr */ |
| - ENA_ADMIN_RSS_L2_SA = BIT(1), |
| - |
| + ENA_ADMIN_RSS_L2_SA = BIT(1), |
| /* ipv4/6 Dest Addr */ |
| - ENA_ADMIN_RSS_L3_DA = BIT(2), |
| - |
| + ENA_ADMIN_RSS_L3_DA = BIT(2), |
| /* ipv4/6 Src Addr */ |
| - ENA_ADMIN_RSS_L3_SA = BIT(3), |
| - |
| + ENA_ADMIN_RSS_L3_SA = BIT(3), |
| /* tcp/udp Dest Port */ |
| - ENA_ADMIN_RSS_L4_DP = BIT(4), |
| - |
| + ENA_ADMIN_RSS_L4_DP = BIT(4), |
| /* tcp/udp Src Port */ |
| - ENA_ADMIN_RSS_L4_SP = BIT(5), |
| + ENA_ADMIN_RSS_L4_SP = BIT(5), |
| }; |
| |
| struct ena_admin_proto_input { |
| @@ -774,19 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input { |
| }; |
| |
| enum ena_admin_os_type { |
| - ENA_ADMIN_OS_LINUX = 1, |
| - |
| - ENA_ADMIN_OS_WIN = 2, |
| - |
| - ENA_ADMIN_OS_DPDK = 3, |
| - |
| - ENA_ADMIN_OS_FREEBSD = 4, |
| - |
| - ENA_ADMIN_OS_IPXE = 5, |
| - |
| - ENA_ADMIN_OS_ESXI = 6, |
| - |
| - ENA_ADMIN_OS_GROUPS_NUM = 6, |
| + ENA_ADMIN_OS_LINUX = 1, |
| + ENA_ADMIN_OS_WIN = 2, |
| + ENA_ADMIN_OS_DPDK = 3, |
| + ENA_ADMIN_OS_FREEBSD = 4, |
| + ENA_ADMIN_OS_IPXE = 5, |
| + ENA_ADMIN_OS_ESXI = 6, |
| + ENA_ADMIN_OS_GROUPS_NUM = 6, |
| }; |
| |
| struct ena_admin_host_info { |
| @@ -981,25 +920,18 @@ struct ena_admin_aenq_common_desc { |
| |
| /* asynchronous event notification groups */ |
| enum ena_admin_aenq_group { |
| - ENA_ADMIN_LINK_CHANGE = 0, |
| - |
| - ENA_ADMIN_FATAL_ERROR = 1, |
| - |
| - ENA_ADMIN_WARNING = 2, |
| - |
| - ENA_ADMIN_NOTIFICATION = 3, |
| - |
| - ENA_ADMIN_KEEP_ALIVE = 4, |
| - |
| - ENA_ADMIN_AENQ_GROUPS_NUM = 5, |
| + ENA_ADMIN_LINK_CHANGE = 0, |
| + ENA_ADMIN_FATAL_ERROR = 1, |
| + ENA_ADMIN_WARNING = 2, |
| + ENA_ADMIN_NOTIFICATION = 3, |
| + ENA_ADMIN_KEEP_ALIVE = 4, |
| + ENA_ADMIN_AENQ_GROUPS_NUM = 5, |
| }; |
| |
| enum ena_admin_aenq_notification_syndrom { |
| - ENA_ADMIN_SUSPEND = 0, |
| - |
| - ENA_ADMIN_RESUME = 1, |
| - |
| - ENA_ADMIN_UPDATE_HINTS = 2, |
| + ENA_ADMIN_SUSPEND = 0, |
| + ENA_ADMIN_RESUME = 1, |
| + ENA_ADMIN_UPDATE_HINTS = 2, |
| }; |
| |
| struct ena_admin_aenq_entry { |
| @@ -1034,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp { |
| }; |
| |
| /* aq_common_desc */ |
| -#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) |
| -#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) |
| -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 |
| -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) |
| -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 |
| -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) |
| +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) |
| +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) |
| +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 |
| +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) |
| +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 |
| +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) |
| |
| /* sq */ |
| -#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 |
| -#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) |
| +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 |
| +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) |
| |
| /* acq_common_desc */ |
| -#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) |
| -#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) |
| +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) |
| +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) |
| |
| /* aq_create_sq_cmd */ |
| -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 |
| -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) |
| -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) |
| -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 |
| -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) |
| +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 |
| +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) |
| +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) |
| +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 |
| +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) |
| #define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) |
| |
| /* aq_create_cq_cmd */ |
| @@ -1063,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp { |
| #define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) |
| |
| /* get_set_feature_common_desc */ |
| -#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) |
| +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) |
| |
| /* get_feature_link_desc */ |
| -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) |
| -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 |
| -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) |
| +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) |
| +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 |
| +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) |
| |
| /* feature_offload_desc */ |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) |
| @@ -1080,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp { |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 |
| #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 |
| -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 |
| +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) |
| |
| /* feature_rss_flow_hash_function */ |
| #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) |
| @@ -1100,32 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp { |
| |
| /* feature_rss_flow_hash_input */ |
| #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 |
| -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) |
| +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) |
| #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 |
| -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) |
| +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) |
| #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 |
| #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) |
| #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 |
| #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) |
| |
| /* host_info */ |
| -#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) |
| -#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 |
| -#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) |
| -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 |
| -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) |
| -#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24 |
| -#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) |
| -#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) |
| -#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3 |
| -#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) |
| -#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 |
| -#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) |
| +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) |
| +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 |
| +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) |
| +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 |
| +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) |
| +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24 |
| +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) |
| +#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) |
| +#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3 |
| +#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) |
| +#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 |
| +#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) |
| |
| /* aenq_common_desc */ |
| -#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) |
| +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) |
| |
| /* aenq_link_change_desc */ |
| -#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) |
| +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) |
| |
| #endif /*_ENA_ADMIN_H_ */ |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h |
| index 4c5ccaa13c42..00e0f056a741 100644 |
| |
| |
| @@ -33,25 +33,18 @@ |
| #define _ENA_ETH_IO_H_ |
| |
| enum ena_eth_io_l3_proto_index { |
| - ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, |
| - |
| - ENA_ETH_IO_L3_PROTO_IPV4 = 8, |
| - |
| - ENA_ETH_IO_L3_PROTO_IPV6 = 11, |
| - |
| - ENA_ETH_IO_L3_PROTO_FCOE = 21, |
| - |
| - ENA_ETH_IO_L3_PROTO_ROCE = 22, |
| + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, |
| + ENA_ETH_IO_L3_PROTO_IPV4 = 8, |
| + ENA_ETH_IO_L3_PROTO_IPV6 = 11, |
| + ENA_ETH_IO_L3_PROTO_FCOE = 21, |
| + ENA_ETH_IO_L3_PROTO_ROCE = 22, |
| }; |
| |
| enum ena_eth_io_l4_proto_index { |
| - ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, |
| - |
| - ENA_ETH_IO_L4_PROTO_TCP = 12, |
| - |
| - ENA_ETH_IO_L4_PROTO_UDP = 13, |
| - |
| - ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, |
| + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, |
| + ENA_ETH_IO_L4_PROTO_TCP = 12, |
| + ENA_ETH_IO_L4_PROTO_UDP = 13, |
| + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, |
| }; |
| |
| struct ena_eth_io_tx_desc { |
| @@ -307,116 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg { |
| }; |
| |
| /* tx_desc */ |
| -#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) |
| -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 |
| -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) |
| -#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 |
| -#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) |
| -#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 |
| -#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) |
| -#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 |
| -#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) |
| -#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 |
| -#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) |
| -#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 |
| -#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) |
| -#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) |
| -#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 |
| -#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) |
| -#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 |
| -#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) |
| -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 |
| -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) |
| -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 |
| -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) |
| -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 |
| -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) |
| -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 |
| -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) |
| -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 |
| -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) |
| -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 |
| -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) |
| -#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) |
| -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 |
| -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) |
| +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) |
| +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 |
| +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) |
| +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 |
| +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) |
| +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 |
| +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) |
| +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 |
| +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) |
| +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 |
| +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) |
| +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 |
| +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) |
| +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) |
| +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 |
| +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) |
| +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 |
| +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) |
| +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 |
| +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) |
| +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 |
| +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) |
| +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 |
| +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) |
| +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 |
| +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) |
| +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 |
| +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) |
| +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 |
| +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) |
| +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) |
| +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 |
| +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) |
| |
| /* tx_meta_desc */ |
| -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) |
| -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 |
| -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) |
| -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 |
| -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) |
| -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 |
| -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) |
| -#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 |
| -#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) |
| -#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 |
| -#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) |
| -#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 |
| -#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) |
| -#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 |
| -#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) |
| -#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 |
| -#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) |
| -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 |
| -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) |
| -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) |
| -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) |
| -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 |
| -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) |
| -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 |
| -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) |
| -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 |
| -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) |
| +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) |
| +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 |
| +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) |
| +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 |
| +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) |
| +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 |
| +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) |
| +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 |
| +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) |
| +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 |
| +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) |
| +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 |
| +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) |
| +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 |
| +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) |
| +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 |
| +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) |
| +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 |
| +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) |
| +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) |
| +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) |
| +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 |
| +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) |
| +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 |
| +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) |
| +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 |
| +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) |
| |
| /* tx_cdesc */ |
| -#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) |
| +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) |
| |
| /* rx_desc */ |
| -#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) |
| -#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 |
| -#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) |
| -#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 |
| -#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) |
| -#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 |
| -#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) |
| +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) |
| +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 |
| +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) |
| +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 |
| +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) |
| +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 |
| +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) |
| |
| /* rx_cdesc_base */ |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) |
| -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 |
| -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) |
| +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 |
| +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) |
| |
| /* intr_reg */ |
| -#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) |
| -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 |
| -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) |
| -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 |
| -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) |
| +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) |
| +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 |
| +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) |
| +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 |
| +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) |
| |
| /* numa_node_cfg_reg */ |
| -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) |
| -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 |
| -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) |
| +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) |
| +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 |
| +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) |
| |
| #endif /*_ENA_ETH_IO_H_ */ |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h |
| index 48ca97fbe7bc..04fcafcc059c 100644 |
| |
| |
| @@ -33,137 +33,125 @@ |
| #define _ENA_REGS_H_ |
| |
| enum ena_regs_reset_reason_types { |
| - ENA_REGS_RESET_NORMAL = 0, |
| - |
| - ENA_REGS_RESET_KEEP_ALIVE_TO = 1, |
| - |
| - ENA_REGS_RESET_ADMIN_TO = 2, |
| - |
| - ENA_REGS_RESET_MISS_TX_CMPL = 3, |
| - |
| - ENA_REGS_RESET_INV_RX_REQ_ID = 4, |
| - |
| - ENA_REGS_RESET_INV_TX_REQ_ID = 5, |
| - |
| - ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, |
| - |
| - ENA_REGS_RESET_INIT_ERR = 7, |
| - |
| - ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, |
| - |
| - ENA_REGS_RESET_OS_TRIGGER = 9, |
| - |
| - ENA_REGS_RESET_OS_NETDEV_WD = 10, |
| - |
| - ENA_REGS_RESET_SHUTDOWN = 11, |
| - |
| - ENA_REGS_RESET_USER_TRIGGER = 12, |
| - |
| - ENA_REGS_RESET_GENERIC = 13, |
| - |
| - ENA_REGS_RESET_MISS_INTERRUPT = 14, |
| + ENA_REGS_RESET_NORMAL = 0, |
| + ENA_REGS_RESET_KEEP_ALIVE_TO = 1, |
| + ENA_REGS_RESET_ADMIN_TO = 2, |
| + ENA_REGS_RESET_MISS_TX_CMPL = 3, |
| + ENA_REGS_RESET_INV_RX_REQ_ID = 4, |
| + ENA_REGS_RESET_INV_TX_REQ_ID = 5, |
| + ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, |
| + ENA_REGS_RESET_INIT_ERR = 7, |
| + ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, |
| + ENA_REGS_RESET_OS_TRIGGER = 9, |
| + ENA_REGS_RESET_OS_NETDEV_WD = 10, |
| + ENA_REGS_RESET_SHUTDOWN = 11, |
| + ENA_REGS_RESET_USER_TRIGGER = 12, |
| + ENA_REGS_RESET_GENERIC = 13, |
| + ENA_REGS_RESET_MISS_INTERRUPT = 14, |
| }; |
| |
| /* ena_registers offsets */ |
| -#define ENA_REGS_VERSION_OFF 0x0 |
| -#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 |
| -#define ENA_REGS_CAPS_OFF 0x8 |
| -#define ENA_REGS_CAPS_EXT_OFF 0xc |
| -#define ENA_REGS_AQ_BASE_LO_OFF 0x10 |
| -#define ENA_REGS_AQ_BASE_HI_OFF 0x14 |
| -#define ENA_REGS_AQ_CAPS_OFF 0x18 |
| -#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 |
| -#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 |
| -#define ENA_REGS_ACQ_CAPS_OFF 0x28 |
| -#define ENA_REGS_AQ_DB_OFF 0x2c |
| -#define ENA_REGS_ACQ_TAIL_OFF 0x30 |
| -#define ENA_REGS_AENQ_CAPS_OFF 0x34 |
| -#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 |
| -#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c |
| -#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 |
| -#define ENA_REGS_AENQ_TAIL_OFF 0x44 |
| -#define ENA_REGS_INTR_MASK_OFF 0x4c |
| -#define ENA_REGS_DEV_CTL_OFF 0x54 |
| -#define ENA_REGS_DEV_STS_OFF 0x58 |
| -#define ENA_REGS_MMIO_REG_READ_OFF 0x5c |
| -#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 |
| -#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 |
| -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 |
| + |
| +/* 0 base */ |
| +#define ENA_REGS_VERSION_OFF 0x0 |
| +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 |
| +#define ENA_REGS_CAPS_OFF 0x8 |
| +#define ENA_REGS_CAPS_EXT_OFF 0xc |
| +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 |
| +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 |
| +#define ENA_REGS_AQ_CAPS_OFF 0x18 |
| +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 |
| +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 |
| +#define ENA_REGS_ACQ_CAPS_OFF 0x28 |
| +#define ENA_REGS_AQ_DB_OFF 0x2c |
| +#define ENA_REGS_ACQ_TAIL_OFF 0x30 |
| +#define ENA_REGS_AENQ_CAPS_OFF 0x34 |
| +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 |
| +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c |
| +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 |
| +#define ENA_REGS_AENQ_TAIL_OFF 0x44 |
| +#define ENA_REGS_INTR_MASK_OFF 0x4c |
| +#define ENA_REGS_DEV_CTL_OFF 0x54 |
| +#define ENA_REGS_DEV_STS_OFF 0x58 |
| +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c |
| +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 |
| +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 |
| +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 |
| |
| /* version register */ |
| -#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff |
| -#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 |
| -#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 |
| +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff |
| +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 |
| +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 |
| |
| /* controller_version register */ |
| -#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff |
| -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 |
| -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 |
| -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 |
| -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 |
| -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 |
| -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 |
| +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff |
| +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 |
| +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 |
| +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 |
| +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 |
| +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 |
| +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 |
| |
| /* caps register */ |
| -#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 |
| -#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 |
| -#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e |
| -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 |
| -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 |
| -#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 |
| -#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 |
| +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 |
| +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 |
| +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e |
| +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 |
| +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 |
| +#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 |
| +#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 |
| |
| /* aq_caps register */ |
| -#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff |
| -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 |
| -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 |
| +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff |
| +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 |
| +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 |
| |
| /* acq_caps register */ |
| -#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff |
| -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 |
| -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 |
| +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff |
| +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 |
| +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 |
| |
| /* aenq_caps register */ |
| -#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff |
| -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 |
| -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 |
| +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff |
| +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 |
| +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 |
| |
| /* dev_ctl register */ |
| -#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 |
| -#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 |
| -#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 |
| -#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 |
| -#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 |
| -#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 |
| -#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 |
| -#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 |
| -#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 |
| +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 |
| +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 |
| +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 |
| +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 |
| +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 |
| +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 |
| +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 |
| +#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 |
| +#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 |
| |
| /* dev_sts register */ |
| -#define ENA_REGS_DEV_STS_READY_MASK 0x1 |
| -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 |
| -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 |
| -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 |
| -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 |
| -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 |
| -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 |
| -#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 |
| -#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 |
| -#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 |
| -#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 |
| -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 |
| -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 |
| -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 |
| -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 |
| +#define ENA_REGS_DEV_STS_READY_MASK 0x1 |
| +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 |
| +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 |
| +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 |
| +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 |
| +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 |
| +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 |
| +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 |
| +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 |
| +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 |
| +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 |
| +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 |
| +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 |
| +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 |
| +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 |
| |
| /* mmio_reg_read register */ |
| -#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff |
| -#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 |
| -#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 |
| +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff |
| +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 |
| +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 |
| |
| /* rss_ind_entry_update register */ |
| -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff |
| -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 |
| -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 |
| +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff |
| +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 |
| +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 |
| |
| #endif /*_ENA_REGS_H_ */ |
| -- |
| 2.19.1 |
| |
| From 6c2e2e6731d872a227226140780c8ecd9712bf4e Mon Sep 17 00:00:00 2001 |
| From: Netanel Belgazal <netanel@amazon.com> |
| Date: Wed, 17 Oct 2018 10:04:21 +0000 |
| Subject: [PATCH 14/16] net: ena: Fix Kconfig dependency on X86 |
| |
| The Kconfig limitation of X86 is to too wide. |
| The ENA driver only requires a little endian dependency. |
| |
| Change the dependency to be on little endian CPU. |
| |
| Signed-off-by: Netanel Belgazal <netanel@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/Kconfig | 2 +- |
| 1 file changed, 1 insertion(+), 1 deletion(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig |
| index 99b30353541a..9e87d7b8360f 100644 |
| |
| |
| @@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON |
| |
| config ENA_ETHERNET |
| tristate "Elastic Network Adapter (ENA) support" |
| - depends on (PCI_MSI && X86) |
| + depends on PCI_MSI && !CPU_BIG_ENDIAN |
| ---help--- |
| This driver supports Elastic Network Adapter (ENA)" |
| |
| -- |
| 2.19.1 |
| |
| From 628f8c52965bb8734f76bbeb2b24b20bc48d2180 Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Wed, 17 Oct 2018 15:33:23 +0300 |
| Subject: [PATCH 15/16] net: ena: enable Low Latency Queues |
| |
| Use the new API to enable usage of LLQ. |
| |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_netdev.c | 18 ++++-------------- |
| 1 file changed, 4 insertions(+), 14 deletions(-) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
| index 284a0a612131..18956e7604a3 100644 |
| |
| |
| @@ -3022,20 +3022,10 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, |
| int io_sq_num, io_queue_num; |
| |
| /* In case of LLQ use the llq number in the get feature cmd */ |
| - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
| - io_sq_num = get_feat_ctx->max_queues.max_legacy_llq_num; |
| - |
| - if (io_sq_num == 0) { |
| - dev_err(&pdev->dev, |
| - "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n"); |
| - |
| - ena_dev->tx_mem_queue_type = |
| - ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| - io_sq_num = get_feat_ctx->max_queues.max_sq_num; |
| - } |
| - } else { |
| + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
| + io_sq_num = get_feat_ctx->llq.max_llq_num; |
| + else |
| io_sq_num = get_feat_ctx->max_queues.max_sq_num; |
| - } |
| |
| io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); |
| io_queue_num = min_t(int, io_queue_num, io_sq_num); |
| @@ -3238,7 +3228,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev, |
| |
| if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
| queue_size = min_t(u32, queue_size, |
| - get_feat_ctx->max_queues.max_legacy_llq_depth); |
| + get_feat_ctx->llq.max_llq_depth); |
| |
| queue_size = rounddown_pow_of_two(queue_size); |
| |
| -- |
| 2.19.1 |
| |
| From 3d31772392bcd5c9fc8a493df6c9f58ca6930e50 Mon Sep 17 00:00:00 2001 |
| From: Arthur Kiyanovski <akiyano@amazon.com> |
| Date: Sun, 21 Oct 2018 18:07:14 +0300 |
| Subject: [PATCH 16/16] net: ena: fix compilation error in xtensa architecture |
| |
| linux/prefetch.h is never explicitly included in ena_com, although |
| functions from it, such as prefetchw(), are used throughout ena_com. |
| This is an inclusion bug, and we fix it here by explicitly including |
| linux/prefetch.h. The bug was exposed when the driver was compiled |
| for the xtensa architecture. |
| |
| Fixes: 689b2bdaaa14 ("net: ena: add functions for handling Low Latency Queues in ena_com") |
| Fixes: 8c590f977638 ("ena: Fix Kconfig dependency on X86") |
| Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| |
| drivers/net/ethernet/amazon/ena/ena_com.h | 1 + |
| 1 file changed, 1 insertion(+) |
| |
| diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h |
| index ae8b4857fce3..078d6f2b4f39 100644 |
| |
| |
| @@ -38,6 +38,7 @@ |
| #include <linux/dma-mapping.h> |
| #include <linux/gfp.h> |
| #include <linux/io.h> |
| +#include <linux/prefetch.h> |
| #include <linux/sched.h> |
| #include <linux/sizes.h> |
| #include <linux/spinlock.h> |
| -- |
| 2.19.1 |
| |