ena module for Driver Update Program
27e81a0450e2dccb09459122752d8d1fe610cf60..fc384122a7a805dde4dbbf3c4c4ef94a5ca0f2fe
2019-01-09 CentOS Sources
import kmod-redhat-ena-2.0.2K_dup7.6-2.el7_6
fc3841 diff | tree
35 files added
1 files deleted
5414 ■■■■■ changed files
.gitignore 1 ●●●● patch | view | raw | blame | history
.kmod-redhat-ena.metadata 1 ●●●● patch | view | raw | blame | history
README.md 4 ●●●● patch | view | raw | blame | history
SOURCES/0029-force-enable-ENA_ETHERNET.patch 13 ●●●●● patch | view | raw | blame | history
SOURCES/0030-version-bump.patch 13 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_01-28_net_ena_Eliminate_duplicate_barriers_on_weakly-ordered_ar.patch 114 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_02-28_net_ena_Fix_use_of_uninitialized_DMA_address_bits_field.patch 54 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_03-28_net_ena_fix_surprise_unplug_NULL_dereference_kernel_crash.patch 48 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_04-28_net_ena_fix_driver_when_PAGE_SIZE_==_64kB.patch 96 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_05-28_net_ena_fix_device_destruction_to_gracefully_free_resourc.patch 95 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_06-28_net_ena_fix_potential_double_ena_destroy_device_.patch 57 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_07-28_net_ena_fix_missing_lock_during_device_destruction.patch 65 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_08-28_net_ena_fix_missing_calls_to_READ_ONCE.patch 59 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_09-28_net_ena_fix_incorrect_usage_of_memory_barriers.patch 189 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_10-28_net_ena_remove_ndo_poll_controller.patch 76 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_11-28_net_ena_fix_warning_in_rmmod_caused_by_double_iounmap.patch 50 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_12-28_net_ena_fix_rare_bug_when_failed_restart-resume_is_follow.patch 46 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_13-28_net_ena_fix_NULL_dereference_due_to_untimely_napi_initial.patch 57 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_14-28_net_ena_fix_auto_casting_to_boolean.patch 46 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_15-28_net_ena_minor_performance_improvement.patch 151 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_16-28_net_ena_complete_host_info_to_match_latest_ENA_spec.patch 190 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_17-28_net_ena_introduce_Low_Latency_Queues_data_structures_acco.patch 283 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_18-28_net_ena_add_functions_for_handling_Low_Latency_Queues_in_.patch 844 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_19-28_net_ena_add_functions_for_handling_Low_Latency_Queues_in_.patch 667 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_20-28_net_ena_use_CSUM_CHECKED_device_indication_to_report_skb..patch 137 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_21-28_net_ena_explicit_casting_and_initialization,_and_clearer_.patch 240 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_22-28_net_ena_limit_refill_Rx_threshold_to_256_to_avoid_latency.patch 66 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_23-28_net_ena_change_rx_copybreak_default_to_reduce_kernel_memo.patch 40 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_24-28_net_ena_remove_redundant_parameter_in_ena_com_admin_init_.patch 88 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_25-28_net_ena_update_driver_version_to_2.0.1.patch 40 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_26-28_net_ena_fix_indentations_in_ena_defs_for_better_readabili.patch 1012 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_28-28_net_ena_enable_Low_Latency_Queues.patch 62 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_29-31_net_ena_fix_crash_during_failed_resume_from_hibernation.patch 47 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_30-31_net_ena_fix_crash_during_ena_remove_.patch 110 ●●●●● patch | view | raw | blame | history
SOURCES/_RHEL7_31-31_net_ena_update_driver_version_from_2.0.1_to_2.0.2.patch 37 ●●●●● patch | view | raw | blame | history
SPECS/ena.spec 316 ●●●●● patch | view | raw | blame | history
.gitignore
New file
@@ -0,0 +1 @@
SOURCES/ena-redhat-2.0.2K_dup7.6.tar.bz2
.kmod-redhat-ena.metadata
New file
@@ -0,0 +1 @@
3dd04b6061a127b73514d89f8daa08aa39804d5a SOURCES/ena-redhat-2.0.2K_dup7.6.tar.bz2
README.md
File was deleted
SOURCES/0029-force-enable-ENA_ETHERNET.patch
New file
@@ -0,0 +1,13 @@
Index: src/drivers/net/ethernet/amazon/ena/Makefile
===================================================================
--- src.orig/drivers/net/ethernet/amazon/ena/Makefile    2018-11-01 09:40:31.083390484 +0100
+++ src/drivers/net/ethernet/amazon/ena/Makefile    2018-11-01 11:42:55.781568815 +0100
@@ -2,6 +2,8 @@
 # Makefile for the Elastic Network Adapter (ENA) device drivers.
 #
+CONFIG_ENA_ETHERNET := m
+
 obj-$(CONFIG_ENA_ETHERNET) += ena.o
 ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
SOURCES/0030-version-bump.patch
New file
@@ -0,0 +1,13 @@
Index: src/drivers/net/ethernet/amazon/ena/ena_netdev.h
===================================================================
--- src.orig/drivers/net/ethernet/amazon/ena/ena_netdev.h    2018-11-01 09:00:56.299692336 +0100
+++ src/drivers/net/ethernet/amazon/ena/ena_netdev.h    2018-11-01 09:07:38.561437090 +0100
@@ -52,7 +52,7 @@
 #define DRV_MODULE_VERSION \
     __stringify(DRV_MODULE_VER_MAJOR) "."    \
     __stringify(DRV_MODULE_VER_MINOR) "."    \
-    __stringify(DRV_MODULE_VER_SUBMINOR) "K"
+    __stringify(DRV_MODULE_VER_SUBMINOR) "K_dup7.6"
 #endif
 #define DEVICE_NAME    "Elastic Network Adapter (ENA)"
SOURCES/_RHEL7_01-28_net_ena_Eliminate_duplicate_barriers_on_weakly-ordered_ar.patch
New file
@@ -0,0 +1,114 @@
Date: Tue, 30 Oct 2018 13:25:16 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 01/28] net: ena: Eliminate duplicate barriers on
 weakly-ordered archs
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Code includes barrier() followed by writel(). writel() already has a
barrier on some architectures like arm64.
This ends up CPU observing two barriers back to back before executing the
register write.
Create a new wrapper function with relaxed write operator. Use the new
wrapper when a write is following a barrier().
Since code already has an explicit barrier call, changing writel() to
writel_relaxed() and adding mmiowb() for ordering protection.
Signed-off-by: Sinan Kaya <okaya@codeaurora.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 6d2e1a8d5e25e5f4563f5ea24bcb5da1ae261b26)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c     | 8 ++++++--
 drivers/net/ethernet/amazon/ena/ena_eth_com.h | 8 ++++++--
 drivers/net/ethernet/amazon/ena/ena_netdev.c  | 5 +++--
 3 files changed, 15 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index bf2de5298005..1b9d3130af4d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -631,8 +631,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
      */
     wmb();
-    writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+    writel_relaxed(mmio_read_reg,
+               ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+    mmiowb();
     for (i = 0; i < timeout; i++) {
         if (read_resp->req_id == mmio_read->seq_num)
             break;
@@ -1826,7 +1828,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
     /* write the aenq doorbell after all AENQ descriptors were read */
     mb();
-    writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+    writel_relaxed((u32)aenq->head,
+               dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+    mmiowb();
 }
 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2f7657227cfe..6fdc753d9483 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -107,7 +107,8 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
     return io_sq->q_depth - 1 - cnt;
 }
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
+                        bool relaxed)
 {
     u16 tail;
@@ -116,7 +117,10 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
     pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
          io_sq->qid, tail);
-    writel(tail, io_sq->db_addr);
+    if (relaxed)
+        writel_relaxed(tail, io_sq->db_addr);
+    else
+        writel(tail, io_sq->db_addr);
     return 0;
 }
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c8da977ca435..4ae8749699bb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -556,7 +556,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
          * issue a doorbell
          */
         wmb();
-        ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
+        ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
+        mmiowb();
     }
     rx_ring->next_to_use = next_to_use;
@@ -2144,7 +2145,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
     if (netif_xmit_stopped(txq) || !skb->xmit_more) {
         /* trigger the dma engine */
-        ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+        ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
         u64_stats_update_begin(&tx_ring->syncp);
         tx_ring->tx_stats.doorbells++;
         u64_stats_update_end(&tx_ring->syncp);
--
2.17.1
SOURCES/_RHEL7_02-28_net_ena_Fix_use_of_uninitialized_DMA_address_bits_field.patch
New file
@@ -0,0 +1,54 @@
Date: Tue, 30 Oct 2018 13:25:17 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 02/28] net: ena: Fix use of uninitialized DMA address bits
 field
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
UBSAN triggers the following undefined behaviour warnings:
[...]
[   13.236124] UBSAN: Undefined behaviour in drivers/net/ethernet/amazon/ena/ena_eth_com.c:468:22
[   13.240043] shift exponent 64 is too large for 64-bit type 'long long unsigned int'
[...]
[   13.744769] UBSAN: Undefined behaviour in drivers/net/ethernet/amazon/ena/ena_eth_com.c:373:4
[   13.748694] shift exponent 64 is too large for 64-bit type 'long long unsigned int'
[...]
When splitting the address to high and low, GENMASK_ULL is used to generate
a bitmask with dma_addr_bits field from io_sq (in ena_com_prepare_tx and
ena_com_add_single_rx_desc).
The problem is that dma_addr_bits is not initialized with a proper value
(besides being cleared in ena_com_create_io_queue).
Assign dma_addr_bits the correct value that is stored in ena_dev when
initializing the SQ.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
Signed-off-by: Gal Pressman <pressmangal@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 101f0cd4f2216d32f1b8a75a2154cf3997484ee2)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c | 1 +
 1 file changed, 1 insertion(+)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1b9d3130af4d..17f12c18d225 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
     memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+    io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
     io_sq->desc_entry_size =
         (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
         sizeof(struct ena_eth_io_tx_desc) :
--
2.17.1
SOURCES/_RHEL7_03-28_net_ena_fix_surprise_unplug_NULL_dereference_kernel_crash.patch
New file
@@ -0,0 +1,48 @@
Date: Tue, 30 Oct 2018 13:25:18 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 03/28] net: ena: fix surprise unplug NULL dereference kernel
 crash
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Starting with driver version 1.5.0, in case of a surprise device
unplug, there is a race caused by invoking ena_destroy_device()
from two different places. As a result, the readless register might
be accessed after it was destroyed.
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 772ed869f535b4ec2b134645c951ff22de4d3f79)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4ae8749699bb..9eb1e2da8a2d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3429,12 +3429,12 @@ static void ena_remove(struct pci_dev *pdev)
         netdev->rx_cpu_rmap = NULL;
     }
 #endif /* CONFIG_RFS_ACCEL */
-
-    unregister_netdev(netdev);
     del_timer_sync(&adapter->timer_service);
     cancel_work_sync(&adapter->reset_task);
+    unregister_netdev(netdev);
+
     /* Reset the device only if the device is running. */
     if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
         ena_com_dev_reset(ena_dev, adapter->reset_reason);
--
2.17.1
SOURCES/_RHEL7_04-28_net_ena_fix_driver_when_PAGE_SIZE_==_64kB.patch
New file
@@ -0,0 +1,96 @@
Date: Tue, 30 Oct 2018 13:25:19 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 04/28] net: ena: fix driver when PAGE_SIZE == 64kB
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
The buffer length field in the ena rx descriptor is 16 bit, and the
current driver passes a full page in each ena rx descriptor.
When PAGE_SIZE equals 64kB or more, the buffer length field becomes
zero.
To solve this issue, limit the ena Rx descriptor to use 16kB even
when allocating 64kB kernel pages. This change would not impact ena
device functionality, as 16kB is still larger than maximum MTU.
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit ef5b0771d247379c90c8bf1332ff32f7f74bff7f)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 10 +++++-----
 drivers/net/ethernet/amazon/ena/ena_netdev.h | 11 +++++++++++
 2 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 9eb1e2da8a2d..773834b87a8c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
         return -ENOMEM;
     }
-    dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+    dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
                DMA_FROM_DEVICE);
     if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
         u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
     rx_info->page_offset = 0;
     ena_buf = &rx_info->ena_buf;
     ena_buf->paddr = dma;
-    ena_buf->len = PAGE_SIZE;
+    ena_buf->len = ENA_PAGE_SIZE;
     return 0;
 }
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
         return;
     }
-    dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+    dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
                DMA_FROM_DEVICE);
     __free_page(page);
@@ -917,10 +917,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
     do {
         dma_unmap_page(rx_ring->dev,
                    dma_unmap_addr(&rx_info->ena_buf, paddr),
-                   PAGE_SIZE, DMA_FROM_DEVICE);
+                   ENA_PAGE_SIZE, DMA_FROM_DEVICE);
         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-                rx_info->page_offset, len, PAGE_SIZE);
+                rx_info->page_offset, len, ENA_PAGE_SIZE);
         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
               "rx skb updated. len %d. data_len %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 3b739f9d299b..c9c85332d134 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -353,4 +353,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
 int ena_get_sset_count(struct net_device *netdev, int sset);
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */
--
2.17.1
SOURCES/_RHEL7_05-28_net_ena_fix_device_destruction_to_gracefully_free_resourc.patch
New file
@@ -0,0 +1,95 @@
Date: Tue, 30 Oct 2018 13:25:20 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 05/28] net: ena: fix device destruction to gracefully free
 resources
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
When ena_destroy_device() is called from ena_suspend(), the device is
still reachable from the driver. Therefore, the driver can send a command
to the device to free all resources.
However, in all other cases of calling ena_destroy_device(), the device is
potentially in an error state and unreachable from the driver. In these
cases the driver must not send commands to the device.
The current implementation does not request resource freeing from the
device even when possible. We add the graceful parameter to
ena_destroy_device() to enable resource freeing when possible, and
use it in ena_suspend().
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit cfa324a514233b28a6934de619183eee941f02d7)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 773834b87a8c..5b32fbc6ed5d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
 static int ena_rss_init_default(struct ena_adapter *adapter);
 static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
 static int ena_restore_device(struct ena_adapter *adapter);
 static void ena_tx_timeout(struct net_device *dev)
@@ -1893,7 +1893,7 @@ static int ena_close(struct net_device *netdev)
               "Destroy failure, restarting device\n");
         ena_dump_stats_to_dmesg(adapter);
         /* rtnl lock already obtained in dev_ioctl() layer */
-        ena_destroy_device(adapter);
+        ena_destroy_device(adapter, false);
         ena_restore_device(adapter);
     }
@@ -2543,7 +2543,7 @@ err_disable_msix:
     return rc;
 }
-static void ena_destroy_device(struct ena_adapter *adapter)
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
 {
     struct net_device *netdev = adapter->netdev;
     struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -2556,7 +2556,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
     dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
     adapter->dev_up_before_reset = dev_up;
-    ena_com_set_admin_running_state(ena_dev, false);
+    if (!graceful)
+        ena_com_set_admin_running_state(ena_dev, false);
     if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
         ena_down(adapter);
@@ -2658,7 +2659,7 @@ static void ena_fw_reset_device(struct work_struct *work)
         return;
     }
     rtnl_lock();
-    ena_destroy_device(adapter);
+    ena_destroy_device(adapter, false);
     ena_restore_device(adapter);
     rtnl_unlock();
 }
@@ -3487,7 +3488,7 @@ static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
             "ignoring device reset request as the device is being suspended\n");
         clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
     }
-    ena_destroy_device(adapter);
+    ena_destroy_device(adapter, true);
     rtnl_unlock();
     return 0;
 }
--
2.17.1
SOURCES/_RHEL7_06-28_net_ena_fix_potential_double_ena_destroy_device_.patch
New file
@@ -0,0 +1,57 @@
Date: Tue, 30 Oct 2018 13:25:21 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 06/28] net: ena: fix potential double ena_destroy_device()
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
ena_destroy_device() can potentially be called twice.
To avoid this, check that the device is running and
only then proceed destroying it.
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit fe870c77efdf8682252545cbd3d29800d8379efc)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 +++++
 1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 5b32fbc6ed5d..41e59e5db064 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2549,6 +2549,9 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
     struct ena_com_dev *ena_dev = adapter->ena_dev;
     bool dev_up;
+    if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+        return;
+
     netif_carrier_off(netdev);
     del_timer_sync(&adapter->timer_service);
@@ -2585,6 +2588,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
     adapter->reset_reason = ENA_REGS_RESET_NORMAL;
     clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+    clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
 }
 static int ena_restore_device(struct ena_adapter *adapter)
@@ -2629,6 +2633,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
         }
     }
+    set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
     mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
     dev_err(&pdev->dev, "Device reset completed successfully\n");
--
2.17.1
SOURCES/_RHEL7_07-28_net_ena_fix_missing_lock_during_device_destruction.patch
New file
@@ -0,0 +1,65 @@
Date: Tue, 30 Oct 2018 13:25:22 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 07/28] net: ena: fix missing lock during device destruction
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
acquire the rtnl_lock during device destruction to avoid
using partially destroyed device.
ena_remove() shares almost the same logic as ena_destroy_device(),
so use ena_destroy_device() and avoid duplications.
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 944b28aa2982b4590d4d4dfc777cf85135dca2c0)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 20 +++++++-------------
 1 file changed, 7 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 41e59e5db064..9497db03bec8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3441,24 +3441,18 @@ static void ena_remove(struct pci_dev *pdev)
     unregister_netdev(netdev);
-    /* Reset the device only if the device is running. */
+    /* If the device is running then we want to make sure the device will be
+     * reset to make sure no more events will be issued by the device.
+     */
     if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
-        ena_com_dev_reset(ena_dev, adapter->reset_reason);
-
-    ena_free_mgmnt_irq(adapter);
+        set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-    ena_disable_msix(adapter);
+    rtnl_lock();
+    ena_destroy_device(adapter, true);
+    rtnl_unlock();
     free_netdev(netdev);
-    ena_com_mmio_reg_read_request_destroy(ena_dev);
-
-    ena_com_abort_admin_commands(ena_dev);
-
-    ena_com_wait_for_abort_completion(ena_dev);
-
-    ena_com_admin_destroy(ena_dev);
-
     ena_com_rss_destroy(ena_dev);
     ena_com_delete_debug_area(ena_dev);
--
2.17.1
SOURCES/_RHEL7_08-28_net_ena_fix_missing_calls_to_READ_ONCE.patch
New file
@@ -0,0 +1,59 @@
Date: Tue, 30 Oct 2018 13:25:23 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 08/28] net: ena: fix missing calls to READ_ONCE
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Add READ_ONCE calls where necessary (for example when iterating
over a memory field that gets updated by the hardware).
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 28abf4e9c9201eda5c4d29ea609d07e877b464b8)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 17f12c18d225..c37deef3bcf1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -459,7 +459,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
     cqe = &admin_queue->cq.entries[head_masked];
     /* Go over all the completions */
-    while ((cqe->acq_common_descriptor.flags &
+    while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
             ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
         /* Do not read the rest of the completion entry before the
          * phase bit was validated
@@ -637,7 +637,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
     mmiowb();
     for (i = 0; i < timeout; i++) {
-        if (read_resp->req_id == mmio_read->seq_num)
+        if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
             break;
         udelay(1);
@@ -1796,8 +1796,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
     aenq_common = &aenq_e->aenq_common_desc;
     /* Go over all the events */
-    while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
-           phase) {
+    while ((READ_ONCE(aenq_common->flags) &
+        ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
         pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
              aenq_common->group, aenq_common->syndrom,
              (u64)aenq_common->timestamp_low +
--
2.17.1
SOURCES/_RHEL7_09-28_net_ena_fix_incorrect_usage_of_memory_barriers.patch
New file
@@ -0,0 +1,189 @@
Date: Tue, 30 Oct 2018 13:25:24 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 09/28] net: ena: fix incorrect usage of memory barriers
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Added memory barriers where they were missing to support multiple
architectures, and removed redundant ones.
As part of removing the redundant memory barriers and improving
performance, we moved to more relaxed versions of memory barriers,
as well as to the more relaxed version of writel - writel_relaxed,
while maintaining correctness.
Signed-off-by: Netanel Belgazal <netanel@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 37dff155dcf57f6c08bf1641c5ddf9abd45f2b1f)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c     | 16 +++++-----
 drivers/net/ethernet/amazon/ena/ena_eth_com.c |  6 ++++
 drivers/net/ethernet/amazon/ena/ena_eth_com.h |  8 ++---
 drivers/net/ethernet/amazon/ena/ena_netdev.c  | 30 +++++++------------
 4 files changed, 26 insertions(+), 34 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index c37deef3bcf1..7635c38e77dd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -464,7 +464,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
         /* Do not read the rest of the completion entry before the
          * phase bit was validated
          */
-        rmb();
+        dma_rmb();
         ena_com_handle_single_admin_completion(admin_queue, cqe);
         head_masked++;
@@ -627,15 +627,8 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
     mmio_read_reg |= mmio_read->seq_num &
             ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
-    /* make sure read_resp->req_id get updated before the hw can write
-     * there
-     */
-    wmb();
-
-    writel_relaxed(mmio_read_reg,
-               ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+    writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
-    mmiowb();
     for (i = 0; i < timeout; i++) {
         if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
             break;
@@ -1798,6 +1791,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
     /* Go over all the events */
     while ((READ_ONCE(aenq_common->flags) &
         ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+        /* Make sure the phase bit (ownership) is as expected before
+         * reading the rest of the descriptor.
+         */
+        dma_rmb();
+
         pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
              aenq_common->group, aenq_common->syndrom,
              (u64)aenq_common->timestamp_low +
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ea149c134e15..1c682b76190f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
     if (desc_phase != expected_phase)
         return NULL;
+    /* Make sure we read the rest of the descriptor after the phase bit
+     * has been read
+     */
+    dma_rmb();
+
     return cdesc;
 }
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
     if (cdesc_phase != expected_phase)
         return -EAGAIN;
+    dma_rmb();
     if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
         pr_err("Invalid req id %d\n", cdesc->req_id);
         return -EINVAL;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 6fdc753d9483..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
     return io_sq->q_depth - 1 - cnt;
 }
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
-                        bool relaxed)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
 {
     u16 tail;
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
     pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
          io_sq->qid, tail);
-    if (relaxed)
-        writel_relaxed(tail, io_sq->db_addr);
-    else
-        writel(tail, io_sq->db_addr);
+    writel(tail, io_sq->db_addr);
     return 0;
 }
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 9497db03bec8..4ca0af957654 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
                 rx_ring->qid, i, num);
     }
-    if (likely(i)) {
-        /* Add memory barrier to make sure the desc were written before
-         * issue a doorbell
-         */
-        wmb();
-        ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
-        mmiowb();
-    }
+    /* ena_com_write_sq_doorbell issues a wmb() */
+    if (likely(i))
+        ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
     rx_ring->next_to_use = next_to_use;
@@ -2105,12 +2100,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
     tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
         tx_ring->ring_size);
-    /* This WMB is aimed to:
-     * 1 - perform smp barrier before reading next_to_completion
-     * 2 - make sure the desc were written before trigger DB
-     */
-    wmb();
-
     /* stop the queue when no more space available, the packet can have up
      * to sgl_size + 2. one for the meta descriptor and one for header
      * (if the header is larger than tx_max_header_size).
@@ -2129,10 +2118,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
          * stop the queue but meanwhile clean_tx_irq updates
          * next_to_completion and terminates.
          * The queue will remain stopped forever.
-         * To solve this issue this function perform rmb, check
-         * the wakeup condition and wake up the queue if needed.
+         * To solve this issue add a mb() to make sure that
+         * netif_tx_stop_queue() write is vissible before checking if
+         * there is additional space in the queue.
          */
-        smp_rmb();
+        smp_mb();
         if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
                 > ENA_TX_WAKEUP_THRESH) {
@@ -2144,8 +2134,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
     }
     if (netif_xmit_stopped(txq) || !skb->xmit_more) {
-        /* trigger the dma engine */
-        ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
+        /* trigger the dma engine. ena_com_write_sq_doorbell()
+         * has a mb
+         */
+        ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
         u64_stats_update_begin(&tx_ring->syncp);
         tx_ring->tx_stats.doorbells++;
         u64_stats_update_end(&tx_ring->syncp);
--
2.17.1
SOURCES/_RHEL7_10-28_net_ena_remove_ndo_poll_controller.patch
New file
@@ -0,0 +1,76 @@
Date: Tue, 30 Oct 2018 13:25:25 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 10/28] net: ena: remove ndo_poll_controller
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
As diagnosed by Song Liu, ndo_poll_controller() can
be very dangerous on loaded hosts, since the cpu
calling ndo_poll_controller() might steal all NAPI
contexts (for all RX/TX queues of the NIC). This capture
can last for unlimited amount of time, since one
cpu is generally not able to drain all the queues under load.
ena uses NAPI for TX completions, so we better let core
networking stack call the napi->poll() to avoid the capture.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Netanel Belgazal <netanel@amazon.com>
Cc: Saeed Bishara <saeedb@amazon.com>
Cc: Zorik Machulsky <zorik@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 21627982e4fff76a053f4d08d7fb56e532e08d52)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 22 --------------------
 1 file changed, 22 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4ca0af957654..6c25fd11f678 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2178,25 +2178,6 @@ error_drop_packet:
     return NETDEV_TX_OK;
 }
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
-    struct ena_adapter *adapter = netdev_priv(netdev);
-    int i;
-
-    /* Dont schedule NAPI if the driver is in the middle of reset
-     * or netdev is down.
-     */
-
-    if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
-        test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
-        return;
-
-    for (i = 0; i < adapter->num_queues; i++)
-        napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
                 void *accel_priv, select_queue_fallback_t fallback)
 {
@@ -2362,9 +2343,6 @@ static const struct net_device_ops ena_netdev_ops = {
     .extended.ndo_change_mtu        = ena_change_mtu,
     .ndo_set_mac_address    = NULL,
     .ndo_validate_addr    = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-    .ndo_poll_controller    = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
 };
 static int ena_device_validate_params(struct ena_adapter *adapter,
--
2.17.1
SOURCES/_RHEL7_11-28_net_ena_fix_warning_in_rmmod_caused_by_double_iounmap.patch
New file
@@ -0,0 +1,50 @@
Date: Tue, 30 Oct 2018 13:25:26 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 11/28] net: ena: fix warning in rmmod caused by double iounmap
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Memory mapped with devm_ioremap is automatically freed when the driver
is disconnected from the device. Therefore there is no need to
explicitly call devm_iounmap.
Fixes: 0857d92f71b6 ("net: ena: add missing unmap bars on device removal")
Fixes: 411838e7b41c ("net: ena: fix rare kernel crash when bar memory remap fails")
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit d79c3888bde6581da7ff9f9d6f581900ecb5e632)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 6c25fd11f678..f34d2147a4e2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3092,15 +3092,8 @@ err_rss_init:
 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
 {
-    int release_bars;
+    int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
-    if (ena_dev->mem_bar)
-        devm_iounmap(&pdev->dev, ena_dev->mem_bar);
-
-    if (ena_dev->reg_bar)
-        devm_iounmap(&pdev->dev, ena_dev->reg_bar);
-
-    release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
     pci_release_selected_regions(pdev, release_bars);
 }
--
2.17.1
SOURCES/_RHEL7_12-28_net_ena_fix_rare_bug_when_failed_restart-resume_is_follow.patch
New file
@@ -0,0 +1,46 @@
Date: Tue, 30 Oct 2018 13:25:27 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 12/28] net: ena: fix rare bug when failed restart/resume is
 followed by driver removal
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
In a rare scenario when ena_device_restore() fails, followed by device
remove, an FLR will not be issued. In this case, the device will keep
sending asynchronous AENQ keep-alive events, even after driver removal,
leading to memory corruption.
Fixes: 8c5c7abdeb2d ("net: ena: add power management ops to the ENA driver")
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit d7703ddbd7c9cb1ab7c08e1b85b314ff8cea38e9)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 4 ++++
 1 file changed, 4 insertions(+)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f34d2147a4e2..b75537df9277 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2612,7 +2612,11 @@ err_disable_msix:
     ena_free_mgmnt_irq(adapter);
     ena_disable_msix(adapter);
 err_device_destroy:
+    ena_com_abort_admin_commands(ena_dev);
+    ena_com_wait_for_abort_completion(ena_dev);
     ena_com_admin_destroy(ena_dev);
+    ena_com_mmio_reg_read_request_destroy(ena_dev);
+    ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
 err:
     clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
     clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
--
2.17.1
SOURCES/_RHEL7_13-28_net_ena_fix_NULL_dereference_due_to_untimely_napi_initial.patch
New file
@@ -0,0 +1,57 @@
Date: Tue, 30 Oct 2018 13:25:28 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 13/28] net: ena: fix NULL dereference due to untimely napi
 initialization
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
napi poll functions should be initialized before running request_irq(),
to handle a rare condition where there is a pending interrupt, causing
the ISR to fire immediately while the poll function wasn't set yet,
causing a NULL dereference.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 78a55d05def95144ca5fa9a64c49b2a0636a9866)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b75537df9277..d7f050123f93 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1568,8 +1568,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
     if (rc)
         return rc;
-    ena_init_napi(adapter);
-
     ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
     ena_refill_all_rx_bufs(adapter);
@@ -1723,6 +1721,13 @@ static int ena_up(struct ena_adapter *adapter)
     ena_setup_io_intr(adapter);
+    /* napi poll functions should be initialized before running
+     * request_irq(), to handle a rare condition where there is a pending
+     * interrupt, causing the ISR to fire immediately while the poll
+     * function wasn't set yet, causing a null dereference
+     */
+    ena_init_napi(adapter);
+
     rc = ena_request_io_irq(adapter);
     if (rc)
         goto err_req_irq;
--
2.17.1
SOURCES/_RHEL7_14-28_net_ena_fix_auto_casting_to_boolean.patch
New file
@@ -0,0 +1,46 @@
Date: Tue, 30 Oct 2018 13:25:29 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 14/28] net: ena: fix auto casting to boolean
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Eliminate potential auto casting compilation error.
Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 248ab77342d0453f067b666b36f0f517ea66c361)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_eth_com.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 1c682b76190f..2b3ff0c20155 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -245,11 +245,11 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
         (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
         ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
     ena_rx_ctx->l3_csum_err =
-        (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
-        ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+        !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+        ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
     ena_rx_ctx->l4_csum_err =
-        (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
-        ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+        !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+        ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
     ena_rx_ctx->hash = cdesc->hash;
     ena_rx_ctx->frag =
         (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
--
2.17.1
SOURCES/_RHEL7_15-28_net_ena_minor_performance_improvement.patch
New file
@@ -0,0 +1,151 @@
Date: Tue, 30 Oct 2018 13:25:30 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 15/28] net: ena: minor performance improvement
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Reduce fastpath overhead by making ena_com_tx_comp_req_id_get() inline.
Also move it to ena_eth_com.h file with its dependency function
ena_com_cq_inc_head().
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 0e575f8542d1f4d74df30b5a9ba419c5373d01a1)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_eth_com.c | 43 -----------------
 drivers/net/ethernet/amazon/ena/ena_eth_com.h | 46 ++++++++++++++++++-
 2 files changed, 44 insertions(+), 45 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 2b3ff0c20155..9c0511e9f9a2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -59,15 +59,6 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
     return cdesc;
 }
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
-{
-    io_cq->head++;
-
-    /* Switch phase bit in case of wrap around */
-    if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
-        io_cq->phase ^= 1;
-}
-
 static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
 {
     u16 tail_masked;
@@ -477,40 +468,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
     return 0;
 }
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
-{
-    u8 expected_phase, cdesc_phase;
-    struct ena_eth_io_tx_cdesc *cdesc;
-    u16 masked_head;
-
-    masked_head = io_cq->head & (io_cq->q_depth - 1);
-    expected_phase = io_cq->phase;
-
-    cdesc = (struct ena_eth_io_tx_cdesc *)
-        ((uintptr_t)io_cq->cdesc_addr.virt_addr +
-        (masked_head * io_cq->cdesc_entry_size_in_bytes));
-
-    /* When the current completion descriptor phase isn't the same as the
-     * expected, it mean that the device still didn't update
-     * this completion.
-     */
-    cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
-    if (cdesc_phase != expected_phase)
-        return -EAGAIN;
-
-    dma_rmb();
-    if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
-        pr_err("Invalid req id %d\n", cdesc->req_id);
-        return -EINVAL;
-    }
-
-    ena_com_cq_inc_head(io_cq);
-
-    *req_id = READ_ONCE(cdesc->req_id);
-
-    return 0;
-}
-
 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
 {
     struct ena_eth_io_rx_cdesc_base *cdesc;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2f7657227cfe..4930324e9d8d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -86,8 +86,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
                    struct ena_com_buf *ena_buf,
                    u16 req_id);
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
-
 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
@@ -159,4 +157,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
     io_sq->next_to_comp += elem;
 }
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+    io_cq->head++;
+
+    /* Switch phase bit in case of wrap around */
+    if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+        io_cq->phase ^= 1;
+}
+
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+                         u16 *req_id)
+{
+    u8 expected_phase, cdesc_phase;
+    struct ena_eth_io_tx_cdesc *cdesc;
+    u16 masked_head;
+
+    masked_head = io_cq->head & (io_cq->q_depth - 1);
+    expected_phase = io_cq->phase;
+
+    cdesc = (struct ena_eth_io_tx_cdesc *)
+        ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+        (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+    /* When the current completion descriptor phase isn't the same as the
+     * expected, it mean that the device still didn't update
+     * this completion.
+     */
+    cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+    if (cdesc_phase != expected_phase)
+        return -EAGAIN;
+
+    dma_rmb();
+
+    *req_id = READ_ONCE(cdesc->req_id);
+    if (unlikely(*req_id >= io_cq->q_depth)) {
+        pr_err("Invalid req id %d\n", cdesc->req_id);
+        return -EINVAL;
+    }
+
+    ena_com_cq_inc_head(io_cq);
+
+    return 0;
+}
+
 #endif /* ENA_ETH_COM_H_ */
--
2.17.1
SOURCES/_RHEL7_16-28_net_ena_complete_host_info_to_match_latest_ENA_spec.patch
New file
@@ -0,0 +1,190 @@
Date: Tue, 30 Oct 2018 13:25:31 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 16/28] net: ena: complete host info to match latest ENA spec
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Add new fields and definitions to host info and fill them
according to the latest ENA spec version.
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 095f2f1facba0c78f23750dba65c78cef722c1ea)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 .../net/ethernet/amazon/ena/ena_admin_defs.h  | 31 ++++++++++++++++++-
 drivers/net/ethernet/amazon/ena/ena_com.c     | 12 +++----
 .../net/ethernet/amazon/ena/ena_common_defs.h |  4 +--
 drivers/net/ethernet/amazon/ena/ena_netdev.c  | 10 ++++--
 4 files changed, 43 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 4532e574ebcd..d735164efea3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -63,6 +63,8 @@ enum ena_admin_aq_completion_status {
     ENA_ADMIN_ILLEGAL_PARAMETER        = 5,
     ENA_ADMIN_UNKNOWN_ERROR            = 6,
+
+    ENA_ADMIN_RESOURCE_BUSY                 = 7,
 };
 enum ena_admin_aq_feature_id {
@@ -702,6 +704,10 @@ enum ena_admin_os_type {
     ENA_ADMIN_OS_FREEBSD    = 4,
     ENA_ADMIN_OS_IPXE    = 5,
+
+    ENA_ADMIN_OS_ESXI       = 6,
+
+    ENA_ADMIN_OS_GROUPS_NUM = 6,
 };
 struct ena_admin_host_info {
@@ -723,11 +729,27 @@ struct ena_admin_host_info {
     /* 7:0 : major
      * 15:8 : minor
      * 23:16 : sub_minor
+     * 31:24 : module_type
      */
     u32 driver_version;
     /* features bitmap */
-    u32 supported_network_features[4];
+    u32 supported_network_features[2];
+
+    /* ENA spec version of driver */
+    u16 ena_spec_version;
+
+    /* ENA device's Bus, Device and Function
+     * 2:0 : function
+     * 7:3 : device
+     * 15:8 : bus
+     */
+    u16 bdf;
+
+    /* Number of CPUs */
+    u16 num_cpus;
+
+    u16 reserved;
 };
 struct ena_admin_rss_ind_table_entry {
@@ -1008,6 +1030,13 @@ struct ena_admin_ena_mmio_req_read_less_resp {
 #define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8)
 #define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16
 #define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16)
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24
+#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24)
+#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3
+#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
 /* aenq_common_desc */
 #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 7635c38e77dd..b6e6a4721931 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,9 +41,6 @@
 #define ENA_ASYNC_QUEUE_DEPTH 16
 #define ENA_ADMIN_QUEUE_DEPTH 32
-#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
-        ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
-        | (ENA_COMMON_SPEC_VERSION_MINOR))
 #define ENA_CTRL_MAJOR        0
 #define ENA_CTRL_MINOR        0
@@ -1400,11 +1397,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
             ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
         ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
-    if (ver < MIN_ENA_VER) {
-        pr_err("ENA version is lower than the minimal version the driver supports\n");
-        return -1;
-    }
-
     pr_info("ena controller version: %d.%d.%d implementation version %d\n",
         (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
             ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
@@ -2441,6 +2433,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
     if (unlikely(!host_attr->host_info))
         return -ENOMEM;
+    host_attr->host_info->ena_spec_version =
+        ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+        (ENA_COMMON_SPEC_VERSION_MINOR));
+
     return 0;
 }
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index bb8d73676eab..23beb7e7ed7b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -32,8 +32,8 @@
 #ifndef _ENA_COMMON_H_
 #define _ENA_COMMON_H_
-#define ENA_COMMON_SPEC_VERSION_MAJOR    0 /*  */
-#define ENA_COMMON_SPEC_VERSION_MINOR    10 /*  */
+#define ENA_COMMON_SPEC_VERSION_MAJOR        2
+#define ENA_COMMON_SPEC_VERSION_MINOR        0
 /* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
 struct ena_common_mem_addr {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index d7f050123f93..04a70b78091a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2199,7 +2199,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
     return qid;
 }
-static void ena_config_host_info(struct ena_com_dev *ena_dev)
+static void ena_config_host_info(struct ena_com_dev *ena_dev,
+                 struct pci_dev *pdev)
 {
     struct ena_admin_host_info *host_info;
     int rc;
@@ -2213,6 +2214,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
     host_info = ena_dev->host_attr.host_info;
+    host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
     host_info->os_type = ENA_ADMIN_OS_LINUX;
     host_info->kernel_ver = LINUX_VERSION_CODE;
     strncpy(host_info->kernel_ver_str, utsname()->version,
@@ -2223,7 +2225,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
     host_info->driver_version =
         (DRV_MODULE_VER_MAJOR) |
         (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
-        (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
+        (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
+        ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
+    host_info->num_cpus = num_online_cpus();
     rc = ena_com_set_host_attributes(ena_dev);
     if (rc) {
@@ -2448,7 +2452,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
      */
     ena_com_set_admin_polling_mode(ena_dev, true);
-    ena_config_host_info(ena_dev);
+    ena_config_host_info(ena_dev, pdev);
     /* Get Device Attributes*/
     rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
--
2.17.1
SOURCES/_RHEL7_17-28_net_ena_introduce_Low_Latency_Queues_data_structures_acco.patch
New file
@@ -0,0 +1,283 @@
Date: Tue, 30 Oct 2018 13:25:32 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 17/28] net: ena: introduce Low Latency Queues data structures
 according to ENA spec
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Low Latency Queues(LLQ) allow usage of device's memory for descriptors
and headers. Such queues decrease processing time since data is already
located on the device when driver rings the doorbell.
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit a7982b8ec947052df6d4467b3a81571f02f528e0)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 .../net/ethernet/amazon/ena/ena_admin_defs.h  | 90 ++++++++++++++++++-
 drivers/net/ethernet/amazon/ena/ena_com.h     | 38 ++++++++
 drivers/net/ethernet/amazon/ena/ena_netdev.c  |  6 +-
 3 files changed, 128 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index d735164efea3..b439ec1b3edb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -74,6 +74,8 @@ enum ena_admin_aq_feature_id {
     ENA_ADMIN_HW_HINTS            = 3,
+    ENA_ADMIN_LLQ                           = 4,
+
     ENA_ADMIN_RSS_HASH_FUNCTION        = 10,
     ENA_ADMIN_STATELESS_OFFLOAD_CONFIG    = 11,
@@ -485,8 +487,85 @@ struct ena_admin_device_attr_feature_desc {
     u32 max_mtu;
 };
+enum ena_admin_llq_header_location {
+    /* header is in descriptor list */
+    ENA_ADMIN_INLINE_HEADER                     = 1,
+    /* header in a separate ring, implies 16B descriptor list entry */
+    ENA_ADMIN_HEADER_RING                       = 2,
+};
+
+enum ena_admin_llq_ring_entry_size {
+    ENA_ADMIN_LIST_ENTRY_SIZE_128B              = 1,
+    ENA_ADMIN_LIST_ENTRY_SIZE_192B              = 2,
+    ENA_ADMIN_LIST_ENTRY_SIZE_256B              = 4,
+};
+
+enum ena_admin_llq_num_descs_before_header {
+    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0     = 0,
+    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1     = 1,
+    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2     = 2,
+    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4     = 4,
+    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8     = 8,
+};
+
+/* packet descriptor list entry always starts with one or more descriptors,
+ * followed by a header. The rest of the descriptors are located in the
+ * beginning of the subsequent entry. Stride refers to how the rest of the
+ * descriptors are placed. This field is relevant only for inline header
+ * mode
+ */
+enum ena_admin_llq_stride_ctrl {
+    ENA_ADMIN_SINGLE_DESC_PER_ENTRY             = 1,
+    ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY          = 2,
+};
+
+struct ena_admin_feature_llq_desc {
+    u32 max_llq_num;
+
+    u32 max_llq_depth;
+
+    /*  specify the header locations the device supports. bitfield of
+     *    enum ena_admin_llq_header_location.
+     */
+    u16 header_location_ctrl_supported;
+
+    /* the header location the driver selected to use. */
+    u16 header_location_ctrl_enabled;
+
+    /* if inline header is specified - this is the size of descriptor
+     *    list entry. If header in a separate ring is specified - this is
+     *    the size of header ring entry. bitfield of enum
+     *    ena_admin_llq_ring_entry_size. specify the entry sizes the device
+     *    supports
+     */
+    u16 entry_size_ctrl_supported;
+
+    /* the entry size the driver selected to use. */
+    u16 entry_size_ctrl_enabled;
+
+    /* valid only if inline header is specified. First entry associated
+     *    with the packet includes descriptors and header. Rest of the
+     *    entries occupied by descriptors. This parameter defines the max
+     *    number of descriptors precedding the header in the first entry.
+     *    The field is bitfield of enum
+     *    ena_admin_llq_num_descs_before_header and specify the values the
+     *    device supports
+     */
+    u16 desc_num_before_header_supported;
+
+    /* the desire field the driver selected to use */
+    u16 desc_num_before_header_enabled;
+
+    /* valid only if inline was chosen. bitfield of enum
+     *    ena_admin_llq_stride_ctrl
+     */
+    u16 descriptors_stride_ctrl_supported;
+
+    /* the stride control the driver selected to use */
+    u16 descriptors_stride_ctrl_enabled;
+};
+
 struct ena_admin_queue_feature_desc {
-    /* including LLQs */
     u32 max_sq_num;
     u32 max_sq_depth;
@@ -495,9 +574,9 @@ struct ena_admin_queue_feature_desc {
     u32 max_cq_depth;
-    u32 max_llq_num;
+    u32 max_legacy_llq_num;
-    u32 max_llq_depth;
+    u32 max_legacy_llq_depth;
     u32 max_header_size;
@@ -822,6 +901,8 @@ struct ena_admin_get_feat_resp {
         struct ena_admin_device_attr_feature_desc dev_attr;
+        struct ena_admin_feature_llq_desc llq;
+
         struct ena_admin_queue_feature_desc max_queue;
         struct ena_admin_feature_aenq_desc aenq;
@@ -869,6 +950,9 @@ struct ena_admin_set_feat_cmd {
         /* rss indirection table */
         struct ena_admin_feature_rss_ind_table ind_table;
+
+        /* LLQ configuration */
+        struct ena_admin_feature_llq_desc llq;
     } u;
 };
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 7b784f8a06a6..50e6c8f6f138 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -108,6 +108,14 @@ enum ena_intr_moder_level {
     ENA_INTR_MAX_NUM_OF_LEVELS,
 };
+struct ena_llq_configurations {
+    enum ena_admin_llq_header_location llq_header_location;
+    enum ena_admin_llq_ring_entry_size llq_ring_entry_size;
+    enum ena_admin_llq_stride_ctrl  llq_stride_ctrl;
+    enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header;
+    u16 llq_ring_entry_size_value;
+};
+
 struct ena_intr_moder_entry {
     unsigned int intr_moder_interval;
     unsigned int pkts_per_interval;
@@ -142,6 +150,15 @@ struct ena_com_tx_meta {
     u16 l4_hdr_len; /* In words */
 };
+struct ena_com_llq_info {
+    u16 header_location_ctrl;
+    u16 desc_stride_ctrl;
+    u16 desc_list_entry_size_ctrl;
+    u16 desc_list_entry_size;
+    u16 descs_num_before_header;
+    u16 descs_per_entry;
+};
+
 struct ena_com_io_cq {
     struct ena_com_io_desc_addr cdesc_addr;
@@ -179,6 +196,20 @@ struct ena_com_io_cq {
 } ____cacheline_aligned;
+struct ena_com_io_bounce_buffer_control {
+    u8 *base_buffer;
+    u16 next_to_use;
+    u16 buffer_size;
+    u16 buffers_num;  /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry */
+struct ena_com_llq_pkt_ctrl {
+    u8 *curr_bounce_buf;
+    u16 idx;
+    u16 descs_left_in_line;
+};
+
 struct ena_com_io_sq {
     struct ena_com_io_desc_addr desc_addr;
@@ -190,6 +221,9 @@ struct ena_com_io_sq {
     u32 msix_vector;
     struct ena_com_tx_meta cached_tx_meta;
+    struct ena_com_llq_info llq_info;
+    struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+    struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
     u16 q_depth;
     u16 qid;
@@ -197,6 +231,7 @@ struct ena_com_io_sq {
     u16 idx;
     u16 tail;
     u16 next_to_comp;
+    u16 llq_last_copy_tail;
     u32 tx_max_header_size;
     u8 phase;
     u8 desc_entry_size;
@@ -334,6 +369,8 @@ struct ena_com_dev {
     u16 intr_delay_resolution;
     u32 intr_moder_tx_interval;
     struct ena_intr_moder_entry *intr_moder_tbl;
+
+    struct ena_com_llq_info llq_info;
 };
 struct ena_com_dev_get_features_ctx {
@@ -342,6 +379,7 @@ struct ena_com_dev_get_features_ctx {
     struct ena_admin_feature_aenq_desc aenq;
     struct ena_admin_feature_offload_desc offload;
     struct ena_admin_ena_hw_hints hw_hints;
+    struct ena_admin_feature_llq_desc llq;
 };
 struct ena_com_create_io_ctx {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 04a70b78091a..61595b8ea7e8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2953,7 +2953,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
     /* In case of LLQ use the llq number in the get feature cmd */
     if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-        io_sq_num = get_feat_ctx->max_queues.max_llq_num;
+        io_sq_num = get_feat_ctx->max_queues.max_legacy_llq_num;
         if (io_sq_num == 0) {
             dev_err(&pdev->dev,
@@ -2989,7 +2989,7 @@ static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
     has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
     /* Enable push mode if device supports LLQ */
-    if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
+    if (has_mem_bar && get_feat_ctx->max_queues.max_legacy_llq_num > 0)
         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
     else
         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -3125,7 +3125,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev,
     if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
         queue_size = min_t(u32, queue_size,
-                   get_feat_ctx->max_queues.max_llq_depth);
+                   get_feat_ctx->max_queues.max_legacy_llq_depth);
     queue_size = rounddown_pow_of_two(queue_size);
--
2.17.1
SOURCES/_RHEL7_18-28_net_ena_add_functions_for_handling_Low_Latency_Queues_in_.patch
New file
@@ -0,0 +1,844 @@
Date: Tue, 30 Oct 2018 13:25:33 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 18/28] net: ena: add functions for handling Low Latency
 Queues in ena_com
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
This patch introduces APIs for detection, initialization, configuration
and actual usage of low latency queues(LLQ). It extends transmit API with
creation of LLQ descriptors in device memory (which include host buffers
descriptors as well as packet header)
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 689b2bdaaa1480ad2c14bdc4c6eaf38284549022)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c     | 249 +++++++++++++++++-
 drivers/net/ethernet/amazon/ena/ena_com.h     |  28 ++
 drivers/net/ethernet/amazon/ena/ena_eth_com.c | 231 ++++++++++++----
 drivers/net/ethernet/amazon/ena/ena_eth_com.h |  25 +-
 drivers/net/ethernet/amazon/ena/ena_netdev.c  |  21 +-
 5 files changed, 474 insertions(+), 80 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index b6e6a4721931..5220c7578d6b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -58,6 +58,8 @@
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT    4
+
 #define ENA_REGS_ADMIN_INTR_MASK 1
 #define ENA_POLL_MS    5
@@ -352,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
                             &io_sq->desc_addr.phys_addr,
                             GFP_KERNEL);
         }
-    } else {
+
+        if (!io_sq->desc_addr.virt_addr) {
+            pr_err("memory allocation failed");
+            return -ENOMEM;
+        }
+    }
+
+    if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+        /* Allocate bounce buffers */
+        io_sq->bounce_buf_ctrl.buffer_size =
+            ena_dev->llq_info.desc_list_entry_size;
+        io_sq->bounce_buf_ctrl.buffers_num =
+            ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+        io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+        size = io_sq->bounce_buf_ctrl.buffer_size *
+             io_sq->bounce_buf_ctrl.buffers_num;
+
         dev_node = dev_to_node(ena_dev->dmadev);
         set_dev_node(ena_dev->dmadev, ctx->numa_node);
-        io_sq->desc_addr.virt_addr =
+        io_sq->bounce_buf_ctrl.base_buffer =
             devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
         set_dev_node(ena_dev->dmadev, dev_node);
-        if (!io_sq->desc_addr.virt_addr) {
-            io_sq->desc_addr.virt_addr =
+        if (!io_sq->bounce_buf_ctrl.base_buffer)
+            io_sq->bounce_buf_ctrl.base_buffer =
                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+        if (!io_sq->bounce_buf_ctrl.base_buffer) {
+            pr_err("bounce buffer memory allocation failed");
+            return -ENOMEM;
         }
-    }
-    if (!io_sq->desc_addr.virt_addr) {
-        pr_err("memory allocation failed");
-        return -ENOMEM;
+        memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+               sizeof(io_sq->llq_info));
+
+        /* Initiate the first bounce buffer */
+        io_sq->llq_buf_ctrl.curr_bounce_buf =
+            ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+        memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+               0x0, io_sq->llq_info.desc_list_entry_size);
+        io_sq->llq_buf_ctrl.descs_left_in_line =
+            io_sq->llq_info.descs_num_before_header;
     }
     io_sq->tail = 0;
@@ -554,6 +583,156 @@ err:
     return ret;
 }
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+    struct ena_com_admin_queue *admin_queue;
+    struct ena_admin_set_feat_cmd cmd;
+    struct ena_admin_set_feat_resp resp;
+    struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+    int ret;
+
+    memset(&cmd, 0x0, sizeof(cmd));
+    admin_queue = &ena_dev->admin_queue;
+
+    cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+    cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+    cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+    cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+    cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
+    cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+    ret = ena_com_execute_admin_command(admin_queue,
+                        (struct ena_admin_aq_entry *)&cmd,
+                        sizeof(cmd),
+                        (struct ena_admin_acq_entry *)&resp,
+                        sizeof(resp));
+
+    if (unlikely(ret))
+        pr_err("Failed to set LLQ configurations: %d\n", ret);
+
+    return ret;
+}
+
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                   struct ena_admin_feature_llq_desc *llq_features,
+                   struct ena_llq_configurations *llq_default_cfg)
+{
+    struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+    u16 supported_feat;
+    int rc;
+
+    memset(llq_info, 0, sizeof(*llq_info));
+
+    supported_feat = llq_features->header_location_ctrl_supported;
+
+    if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+        llq_info->header_location_ctrl =
+            llq_default_cfg->llq_header_location;
+    } else {
+        pr_err("Invalid header location control, supported: 0x%x\n",
+               supported_feat);
+        return -EINVAL;
+    }
+
+    if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+        supported_feat = llq_features->descriptors_stride_ctrl_supported;
+        if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+            llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
+        } else    {
+            if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+                llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+            } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+                llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+            } else {
+                pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
+                       supported_feat);
+                return -EINVAL;
+            }
+
+            pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+                   llq_default_cfg->llq_stride_ctrl, supported_feat,
+                   llq_info->desc_stride_ctrl);
+        }
+    } else {
+        llq_info->desc_stride_ctrl = 0;
+    }
+
+    supported_feat = llq_features->entry_size_ctrl_supported;
+    if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+        llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
+        llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
+    } else {
+        if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+            llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+            llq_info->desc_list_entry_size = 128;
+        } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+            llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+            llq_info->desc_list_entry_size = 192;
+        } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+            llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+            llq_info->desc_list_entry_size = 256;
+        } else {
+            pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
+                   supported_feat);
+            return -EINVAL;
+        }
+
+        pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+               llq_default_cfg->llq_ring_entry_size, supported_feat,
+               llq_info->desc_list_entry_size);
+    }
+    if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
+        /* The desc list entry size should be whole multiply of 8
+         * This requirement comes from __iowrite64_copy()
+         */
+        pr_err("illegal entry size %d\n",
+               llq_info->desc_list_entry_size);
+        return -EINVAL;
+    }
+
+    if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
+        llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+            sizeof(struct ena_eth_io_tx_desc);
+    else
+        llq_info->descs_per_entry = 1;
+
+    supported_feat = llq_features->desc_num_before_header_supported;
+    if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
+        llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
+    } else {
+        if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+            llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+        } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+            llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+        } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+            llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+        } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+            llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+        } else {
+            pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
+                   supported_feat);
+            return -EINVAL;
+        }
+
+        pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+               llq_default_cfg->llq_num_decs_before_header,
+               supported_feat, llq_info->descs_num_before_header);
+    }
+
+    rc = ena_com_set_llq(ena_dev);
+    if (rc)
+        pr_err("Cannot set LLQ configuration: %d\n", rc);
+
+    return 0;
+}
+
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
                             struct ena_com_admin_queue *admin_queue)
 {
@@ -725,15 +904,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
     if (io_sq->desc_addr.virt_addr) {
         size = io_sq->desc_entry_size * io_sq->q_depth;
-        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-            dma_free_coherent(ena_dev->dmadev, size,
-                      io_sq->desc_addr.virt_addr,
-                      io_sq->desc_addr.phys_addr);
-        else
-            devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+        dma_free_coherent(ena_dev->dmadev, size,
+                  io_sq->desc_addr.virt_addr,
+                  io_sq->desc_addr.phys_addr);
         io_sq->desc_addr.virt_addr = NULL;
     }
+
+    if (io_sq->bounce_buf_ctrl.base_buffer) {
+        devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+        io_sq->bounce_buf_ctrl.base_buffer = NULL;
+    }
 }
 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -1740,6 +1921,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
     else
         return rc;
+    rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+    if (!rc)
+        memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+               sizeof(get_resp.u.llq));
+    else if (rc == -EOPNOTSUPP)
+        memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+    else
+        return rc;
+
     return 0;
 }
@@ -2708,3 +2898,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
     intr_moder_tbl[level].pkts_per_interval;
     entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
 }
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                struct ena_admin_feature_llq_desc *llq_features,
+                struct ena_llq_configurations *llq_default_cfg)
+{
+    int rc;
+    int size;
+
+    if (!llq_features->max_llq_num) {
+        ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+        return 0;
+    }
+
+    rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
+    if (rc)
+        return rc;
+
+    /* Validate the descriptor is not too big */
+    size = ena_dev->tx_max_header_size;
+    size += ena_dev->llq_info.descs_num_before_header *
+        sizeof(struct ena_eth_io_tx_desc);
+
+    if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+        pr_err("the size of the LLQ entry is smaller than needed\n");
+        return -EINVAL;
+    }
+
+    ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+    return 0;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 50e6c8f6f138..25af8d025919 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -37,6 +37,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/gfp.h>
+#include <linux/io.h>
 #include <linux/sched.h>
 #include <linux/sizes.h>
 #include <linux/spinlock.h>
@@ -973,6 +974,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
                        enum ena_intr_moder_level level,
                        struct ena_intr_moder_entry *entry);
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq_features: LLQ feature descriptor, retrieve via
+ *                ena_com_get_dev_attr_feat.
+ * @ena_llq_config: The default driver LLQ parameters configurations
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                struct ena_admin_feature_llq_desc *llq_features,
+                struct ena_llq_configurations *llq_default_config);
+
 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
 {
     return ena_dev->adaptive_coalescing;
@@ -1082,4 +1093,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
         intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
 }
+static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+    u16 size, buffers_num;
+    u8 *buf;
+
+    size = bounce_buf_ctrl->buffer_size;
+    buffers_num = bounce_buf_ctrl->buffers_num;
+
+    buf = bounce_buf_ctrl->base_buffer +
+        (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+    prefetchw(bounce_buf_ctrl->base_buffer +
+        (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+    return buf;
+}
+
 #endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 9c0511e9f9a2..17107ca107e3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -59,7 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
     return cdesc;
 }
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
 {
     u16 tail_masked;
     u32 offset;
@@ -71,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
     return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
 }
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
+static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+                             u8 *bounce_buffer)
 {
-    u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
-    u32 offset = tail_masked * io_sq->desc_entry_size;
+    struct ena_com_llq_info *llq_info = &io_sq->llq_info;
-    /* In case this queue isn't a LLQ */
-    if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-        return;
+    u16 dst_tail_mask;
+    u32 dst_offset;
-    memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
-            io_sq->desc_addr.virt_addr + offset,
-            io_sq->desc_entry_size);
-}
+    dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+    dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+
+    /* Make sure everything was written into the bounce buffer before
+     * writing the bounce buffer to the device
+     */
+    wmb();
+
+    /* The line is completed. Copy it to dev */
+    __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+             bounce_buffer, (llq_info->desc_list_entry_size) / 8);
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
-{
     io_sq->tail++;
     /* Switch phase bit in case of wrap around */
     if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
         io_sq->phase ^= 1;
+
+    return 0;
 }
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
-                       u8 *head_src, u16 header_len)
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+                         u8 *header_src,
+                         u16 header_len)
 {
-    u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
-    u8 __iomem *dev_head_addr =
-        io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+    struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+    struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+    u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+    u16 header_offset;
-    if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+    if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
         return 0;
-    if (unlikely(!io_sq->header_addr)) {
-        pr_err("Push buffer header ptr is NULL\n");
-        return -EINVAL;
+    header_offset =
+        llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+    if (unlikely((header_offset + header_len) >
+             llq_info->desc_list_entry_size)) {
+        pr_err("trying to write header larger than llq entry can accommodate\n");
+        return -EFAULT;
+    }
+
+    if (unlikely(!bounce_buffer)) {
+        pr_err("bounce buffer is NULL\n");
+        return -EFAULT;
+    }
+
+    memcpy(bounce_buffer + header_offset, header_src, header_len);
+
+    return 0;
+}
+
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+    struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+    u8 *bounce_buffer;
+    void *sq_desc;
+
+    bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+    if (unlikely(!bounce_buffer)) {
+        pr_err("bounce buffer is NULL\n");
+        return NULL;
+    }
+
+    sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+    pkt_ctrl->idx++;
+    pkt_ctrl->descs_left_in_line--;
+
+    return sq_desc;
+}
+
+static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+    struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+    struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+    int rc;
+
+    if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
+        return 0;
+
+    /* bounce buffer was used, so write it and get a new one */
+    if (pkt_ctrl->idx) {
+        rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+                            pkt_ctrl->curr_bounce_buf);
+        if (unlikely(rc))
+            return rc;
+
+        pkt_ctrl->curr_bounce_buf =
+            ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+        memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+               0x0, llq_info->desc_list_entry_size);
+    }
+
+    pkt_ctrl->idx = 0;
+    pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+    return 0;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+    if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+        return get_sq_desc_llq(io_sq);
+
+    return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+    struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+    struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+    int rc;
+
+    if (!pkt_ctrl->descs_left_in_line) {
+        rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+                            pkt_ctrl->curr_bounce_buf);
+        if (unlikely(rc))
+            return rc;
+
+        pkt_ctrl->curr_bounce_buf =
+            ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+            memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                   0x0, llq_info->desc_list_entry_size);
+
+        pkt_ctrl->idx = 0;
+        if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
+            pkt_ctrl->descs_left_in_line = 1;
+        else
+            pkt_ctrl->descs_left_in_line =
+            llq_info->desc_list_entry_size / io_sq->desc_entry_size;
     }
-    memcpy_toio(dev_head_addr, head_src, header_len);
+    return 0;
+}
+
+static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+    if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+        return ena_com_sq_update_llq_tail(io_sq);
+
+    io_sq->tail++;
+
+    /* Switch phase bit in case of wrap around */
+    if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+        io_sq->phase ^= 1;
     return 0;
 }
@@ -177,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
     return false;
 }
-static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
-                             struct ena_com_tx_ctx *ena_tx_ctx)
+static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+                            struct ena_com_tx_ctx *ena_tx_ctx)
 {
     struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
     struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -223,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
     memcpy(&io_sq->cached_tx_meta, ena_meta,
            sizeof(struct ena_com_tx_meta));
-    ena_com_copy_curr_sq_desc_to_dev(io_sq);
-    ena_com_sq_update_tail(io_sq);
+    return ena_com_sq_update_tail(io_sq);
 }
 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
@@ -262,18 +375,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
 {
     struct ena_eth_io_tx_desc *desc = NULL;
     struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
-    void *push_header = ena_tx_ctx->push_header;
+    void *buffer_to_push = ena_tx_ctx->push_header;
     u16 header_len = ena_tx_ctx->header_len;
     u16 num_bufs = ena_tx_ctx->num_bufs;
-    int total_desc, i, rc;
+    u16 start_tail = io_sq->tail;
+    int i, rc;
     bool have_meta;
     u64 addr_hi;
     WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
     /* num_bufs +1 for potential meta desc */
-    if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
-        pr_err("Not enough space in the tx queue\n");
+    if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
+        pr_debug("Not enough space in the tx queue\n");
         return -ENOMEM;
     }
@@ -283,23 +397,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
         return -EINVAL;
     }
-    /* start with pushing the header (if needed) */
-    rc = ena_com_write_header(io_sq, push_header, header_len);
+    if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+             !buffer_to_push))
+        return -EINVAL;
+
+    rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
     if (unlikely(rc))
         return rc;
     have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
             ena_tx_ctx);
-    if (have_meta)
-        ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+    if (have_meta) {
+        rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+        if (unlikely(rc))
+            return rc;
+    }
-    /* If the caller doesn't want send packets */
+    /* If the caller doesn't want to send packets */
     if (unlikely(!num_bufs && !header_len)) {
-        *nb_hw_desc = have_meta ? 0 : 1;
-        return 0;
+        rc = ena_com_close_bounce_buffer(io_sq);
+        *nb_hw_desc = io_sq->tail - start_tail;
+        return rc;
     }
     desc = get_sq_desc(io_sq);
+    if (unlikely(!desc))
+        return -EFAULT;
     memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
     /* Set first desc when we don't have meta descriptor */
@@ -351,10 +474,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
     for (i = 0; i < num_bufs; i++) {
         /* The first desc share the same desc as the header */
         if (likely(i != 0)) {
-            ena_com_copy_curr_sq_desc_to_dev(io_sq);
-            ena_com_sq_update_tail(io_sq);
+            rc = ena_com_sq_update_tail(io_sq);
+            if (unlikely(rc))
+                return rc;
             desc = get_sq_desc(io_sq);
+            if (unlikely(!desc))
+                return -EFAULT;
+
             memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
             desc->len_ctrl |= (io_sq->phase <<
@@ -377,15 +504,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
     /* set the last desc indicator */
     desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
-    ena_com_copy_curr_sq_desc_to_dev(io_sq);
-
-    ena_com_sq_update_tail(io_sq);
+    rc = ena_com_sq_update_tail(io_sq);
+    if (unlikely(rc))
+        return rc;
-    total_desc = max_t(u16, num_bufs, 1);
-    total_desc += have_meta ? 1 : 0;
+    rc = ena_com_close_bounce_buffer(io_sq);
-    *nb_hw_desc = total_desc;
-    return 0;
+    *nb_hw_desc = io_sq->tail - start_tail;
+    return rc;
 }
 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
@@ -444,15 +570,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
     WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
-    if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+    if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
         return -ENOSPC;
     desc = get_sq_desc(io_sq);
+    if (unlikely(!desc))
+        return -EFAULT;
+
     memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
     desc->length = ena_buf->len;
-    desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+    desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
     desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
     desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
     desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
@@ -463,9 +592,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
     desc->buff_addr_hi =
         ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
-    ena_com_sq_update_tail(io_sq);
-
-    return 0;
+    return ena_com_sq_update_tail(io_sq);
 }
 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 4930324e9d8d..bcc84072367d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -94,7 +94,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
     writel(intr_reg->intr_control, io_cq->unmask_reg);
 }
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
 {
     u16 tail, next_to_comp, cnt;
@@ -105,11 +105,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
     return io_sq->q_depth - 1 - cnt;
 }
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+                        u16 required_buffers)
 {
-    u16 tail;
+    int temp;
-    tail = io_sq->tail;
+    if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+        return ena_com_free_desc(io_sq) >= required_buffers;
+
+    /* This calculation doesn't need to be 100% accurate. So to reduce
+     * the calculation overhead just Subtract 2 lines from the free descs
+     * (one for the header line and one to compensate the devision
+     * down calculation.
+     */
+    temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+    return ena_com_free_desc(io_sq) > temp;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+    u16 tail = io_sq->tail;
     pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
          io_sq->qid, tail);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 61595b8ea7e8..7b0820d7cf6a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -806,12 +806,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
      */
     smp_mb();
-    above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
-        ENA_TX_WAKEUP_THRESH;
+    above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                            ENA_TX_WAKEUP_THRESH);
     if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
         __netif_tx_lock(txq, smp_processor_id());
-        above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
-            ENA_TX_WAKEUP_THRESH;
+        above_thresh =
+            ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                             ENA_TX_WAKEUP_THRESH);
         if (netif_tx_queue_stopped(txq) && above_thresh) {
             netif_tx_wake_queue(txq);
             u64_stats_update_begin(&tx_ring->syncp);
@@ -1103,7 +1104,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
     rx_ring->next_to_clean = next_to_clean;
-    refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+    refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
     refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
     /* Optimization, try to batch new rx buffers */
@@ -2109,8 +2110,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
      * to sgl_size + 2. one for the meta descriptor and one for header
      * (if the header is larger than tx_max_header_size).
      */
-    if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
-             (tx_ring->sgl_size + 2))) {
+    if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                           tx_ring->sgl_size + 2))) {
         netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
               __func__, qid);
@@ -2129,8 +2130,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
          */
         smp_mb();
-        if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
-                > ENA_TX_WAKEUP_THRESH) {
+        if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                         ENA_TX_WAKEUP_THRESH)) {
             netif_tx_wake_queue(txq);
             u64_stats_update_begin(&tx_ring->syncp);
             tx_ring->tx_stats.queue_wakeup++;
@@ -2807,7 +2808,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
         rx_ring = &adapter->rx_ring[i];
         refill_required =
-            ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+            ena_com_free_desc(rx_ring->ena_com_io_sq);
         if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
             rx_ring->empty_rx_queue++;
--
2.17.1
SOURCES/_RHEL7_19-28_net_ena_add_functions_for_handling_Low_Latency_Queues_in_.patch
New file
@@ -0,0 +1,667 @@
Date: Tue, 30 Oct 2018 13:25:34 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 19/28] net: ena: add functions for handling Low Latency
 Queues in ena_netdev
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
This patch includes all code changes necessary in ena_netdev to enable
packet sending via the LLQ placemnt mode.
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 38005ca816a7ef5516dc8e59ae95716739aa75b0)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_ethtool.c |   1 +
 drivers/net/ethernet/amazon/ena/ena_netdev.c  | 387 +++++++++++-------
 drivers/net/ethernet/amazon/ena/ena_netdev.h  |   6 +
 3 files changed, 251 insertions(+), 143 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 060cb18fa659..7643faaebb42 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
     ENA_STAT_TX_ENTRY(doorbells),
     ENA_STAT_TX_ENTRY(prepare_ctx_err),
     ENA_STAT_TX_ENTRY(bad_req_id),
+    ENA_STAT_TX_ENTRY(llq_buffer_copy),
     ENA_STAT_TX_ENTRY(missed_tx),
 };
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7b0820d7cf6a..3a1f7a89cf29 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -238,6 +238,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
         }
     }
+    size = tx_ring->tx_max_header_size;
+    tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
+    if (!tx_ring->push_buf_intermediate_buf) {
+        tx_ring->push_buf_intermediate_buf = vzalloc(size);
+        if (!tx_ring->push_buf_intermediate_buf) {
+            vfree(tx_ring->tx_buffer_info);
+            vfree(tx_ring->free_tx_ids);
+            return -ENOMEM;
+        }
+    }
+
     /* Req id ring for TX out of order completions */
     for (i = 0; i < tx_ring->ring_size; i++)
         tx_ring->free_tx_ids[i] = i;
@@ -266,6 +277,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
     vfree(tx_ring->free_tx_ids);
     tx_ring->free_tx_ids = NULL;
+
+    vfree(tx_ring->push_buf_intermediate_buf);
+    tx_ring->push_buf_intermediate_buf = NULL;
 }
 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
@@ -603,6 +617,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
         ena_free_rx_bufs(adapter, i);
 }
+static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
+                    struct ena_tx_buffer *tx_info)
+{
+    struct ena_com_buf *ena_buf;
+    u32 cnt;
+    int i;
+
+    ena_buf = tx_info->bufs;
+    cnt = tx_info->num_of_bufs;
+
+    if (unlikely(!cnt))
+        return;
+
+    if (tx_info->map_linear_data) {
+        dma_unmap_single(tx_ring->dev,
+                 dma_unmap_addr(ena_buf, paddr),
+                 dma_unmap_len(ena_buf, len),
+                 DMA_TO_DEVICE);
+        ena_buf++;
+        cnt--;
+    }
+
+    /* unmap remaining mapped pages */
+    for (i = 0; i < cnt; i++) {
+        dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
+                   dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
+        ena_buf++;
+    }
+}
+
 /* ena_free_tx_bufs - Free Tx Buffers per Queue
  * @tx_ring: TX ring for which buffers be freed
  */
@@ -613,9 +657,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
     for (i = 0; i < tx_ring->ring_size; i++) {
         struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
-        struct ena_com_buf *ena_buf;
-        int nr_frags;
-        int j;
         if (!tx_info->skb)
             continue;
@@ -631,21 +672,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
                    tx_ring->qid, i);
         }
-        ena_buf = tx_info->bufs;
-        dma_unmap_single(tx_ring->dev,
-                 ena_buf->paddr,
-                 ena_buf->len,
-                 DMA_TO_DEVICE);
-
-        /* unmap remaining mapped pages */
-        nr_frags = tx_info->num_of_bufs - 1;
-        for (j = 0; j < nr_frags; j++) {
-            ena_buf++;
-            dma_unmap_page(tx_ring->dev,
-                       ena_buf->paddr,
-                       ena_buf->len,
-                       DMA_TO_DEVICE);
-        }
+        ena_unmap_tx_skb(tx_ring, tx_info);
         dev_kfree_skb_any(tx_info->skb);
     }
@@ -737,8 +764,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
     while (tx_pkts < budget) {
         struct ena_tx_buffer *tx_info;
         struct sk_buff *skb;
-        struct ena_com_buf *ena_buf;
-        int i, nr_frags;
         rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
                         &req_id);
@@ -758,24 +783,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
         tx_info->skb = NULL;
         tx_info->last_jiffies = 0;
-        if (likely(tx_info->num_of_bufs != 0)) {
-            ena_buf = tx_info->bufs;
-
-            dma_unmap_single(tx_ring->dev,
-                     dma_unmap_addr(ena_buf, paddr),
-                     dma_unmap_len(ena_buf, len),
-                     DMA_TO_DEVICE);
-
-            /* unmap remaining mapped pages */
-            nr_frags = tx_info->num_of_bufs - 1;
-            for (i = 0; i < nr_frags; i++) {
-                ena_buf++;
-                dma_unmap_page(tx_ring->dev,
-                           dma_unmap_addr(ena_buf, paddr),
-                           dma_unmap_len(ena_buf, len),
-                           DMA_TO_DEVICE);
-            }
-        }
+        ena_unmap_tx_skb(tx_ring, tx_info);
         netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
               "tx_poll: q %d skb %p completed\n", tx_ring->qid,
@@ -1285,7 +1293,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
     /* Reserved the max msix vectors we might need */
     msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
-
     netif_dbg(adapter, probe, adapter->netdev,
           "trying to enable MSI-X, vectors %d\n", msix_vecs);
@@ -1585,7 +1592,7 @@ static int ena_up_complete(struct ena_adapter *adapter)
 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
 {
-    struct ena_com_create_io_ctx ctx = { 0 };
+    struct ena_com_create_io_ctx ctx;
     struct ena_com_dev *ena_dev;
     struct ena_ring *tx_ring;
     u32 msix_vector;
@@ -1598,6 +1605,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
     msix_vector = ENA_IO_IRQ_IDX(qid);
     ena_qid = ENA_IO_TXQ_IDX(qid);
+    memset(&ctx, 0x0, sizeof(ctx));
+
     ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
     ctx.qid = ena_qid;
     ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
@@ -1651,7 +1660,7 @@ create_err:
 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
 {
     struct ena_com_dev *ena_dev;
-    struct ena_com_create_io_ctx ctx = { 0 };
+    struct ena_com_create_io_ctx ctx;
     struct ena_ring *rx_ring;
     u32 msix_vector;
     u16 ena_qid;
@@ -1663,6 +1672,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
     msix_vector = ENA_IO_IRQ_IDX(qid);
     ena_qid = ENA_IO_RXQ_IDX(qid);
+    memset(&ctx, 0x0, sizeof(ctx));
+
     ctx.qid = ena_qid;
     ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
     ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -1980,73 +1991,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
     return rc;
 }
-/* Called with netif_tx_lock. */
-static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int ena_tx_map_skb(struct ena_ring *tx_ring,
+              struct ena_tx_buffer *tx_info,
+              struct sk_buff *skb,
+              void **push_hdr,
+              u16 *header_len)
 {
-    struct ena_adapter *adapter = netdev_priv(dev);
-    struct ena_tx_buffer *tx_info;
-    struct ena_com_tx_ctx ena_tx_ctx;
-    struct ena_ring *tx_ring;
-    struct netdev_queue *txq;
+    struct ena_adapter *adapter = tx_ring->adapter;
     struct ena_com_buf *ena_buf;
-    void *push_hdr;
-    u32 len, last_frag;
-    u16 next_to_use;
-    u16 req_id;
-    u16 push_len;
-    u16 header_len;
     dma_addr_t dma;
-    int qid, rc, nb_hw_desc;
-    int i = -1;
-
-    netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
-    /*  Determine which tx ring we will be placed on */
-    qid = skb_get_queue_mapping(skb);
-    tx_ring = &adapter->tx_ring[qid];
-    txq = netdev_get_tx_queue(dev, qid);
-
-    rc = ena_check_and_linearize_skb(tx_ring, skb);
-    if (unlikely(rc))
-        goto error_drop_packet;
-
-    skb_tx_timestamp(skb);
-    len = skb_headlen(skb);
+    u32 skb_head_len, frag_len, last_frag;
+    u16 push_len = 0;
+    u16 delta = 0;
+    int i = 0;
-    next_to_use = tx_ring->next_to_use;
-    req_id = tx_ring->free_tx_ids[next_to_use];
-    tx_info = &tx_ring->tx_buffer_info[req_id];
-    tx_info->num_of_bufs = 0;
-
-    WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
-    ena_buf = tx_info->bufs;
+    skb_head_len = skb_headlen(skb);
     tx_info->skb = skb;
+    ena_buf = tx_info->bufs;
     if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-        /* prepared the push buffer */
-        push_len = min_t(u32, len, tx_ring->tx_max_header_size);
-        header_len = push_len;
-        push_hdr = skb->data;
+        /* When the device is LLQ mode, the driver will copy
+         * the header into the device memory space.
+         * the ena_com layer assume the header is in a linear
+         * memory space.
+         * This assumption might be wrong since part of the header
+         * can be in the fragmented buffers.
+         * Use skb_header_pointer to make sure the header is in a
+         * linear memory space.
+         */
+
+        push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
+        *push_hdr = skb_header_pointer(skb, 0, push_len,
+                           tx_ring->push_buf_intermediate_buf);
+        *header_len = push_len;
+        if (unlikely(skb->data != *push_hdr)) {
+            u64_stats_update_begin(&tx_ring->syncp);
+            tx_ring->tx_stats.llq_buffer_copy++;
+            u64_stats_update_end(&tx_ring->syncp);
+
+            delta = push_len - skb_head_len;
+        }
     } else {
-        push_len = 0;
-        header_len = min_t(u32, len, tx_ring->tx_max_header_size);
-        push_hdr = NULL;
+        *push_hdr = NULL;
+        *header_len = min_t(u32, skb_head_len,
+                    tx_ring->tx_max_header_size);
     }
-    netif_dbg(adapter, tx_queued, dev,
+    netif_dbg(adapter, tx_queued, adapter->netdev,
           "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
-          push_hdr, push_len);
+          *push_hdr, push_len);
-    if (len > push_len) {
+    if (skb_head_len > push_len) {
         dma = dma_map_single(tx_ring->dev, skb->data + push_len,
-                     len - push_len, DMA_TO_DEVICE);
-        if (dma_mapping_error(tx_ring->dev, dma))
+                     skb_head_len - push_len, DMA_TO_DEVICE);
+        if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
             goto error_report_dma_error;
         ena_buf->paddr = dma;
-        ena_buf->len = len - push_len;
+        ena_buf->len = skb_head_len - push_len;
         ena_buf++;
         tx_info->num_of_bufs++;
+        tx_info->map_linear_data = 1;
+    } else {
+        tx_info->map_linear_data = 0;
     }
     last_frag = skb_shinfo(skb)->nr_frags;
@@ -2054,18 +2062,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
     for (i = 0; i < last_frag; i++) {
         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-        len = skb_frag_size(frag);
-        dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
-                       DMA_TO_DEVICE);
-        if (dma_mapping_error(tx_ring->dev, dma))
+        frag_len = skb_frag_size(frag);
+
+        if (unlikely(delta >= frag_len)) {
+            delta -= frag_len;
+            continue;
+        }
+
+        dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
+                       frag_len - delta, DMA_TO_DEVICE);
+        if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
             goto error_report_dma_error;
         ena_buf->paddr = dma;
-        ena_buf->len = len;
+        ena_buf->len = frag_len - delta;
         ena_buf++;
+        tx_info->num_of_bufs++;
+        delta = 0;
     }
-    tx_info->num_of_bufs += last_frag;
+    return 0;
+
+error_report_dma_error:
+    u64_stats_update_begin(&tx_ring->syncp);
+    tx_ring->tx_stats.dma_mapping_err++;
+    u64_stats_update_end(&tx_ring->syncp);
+    netdev_warn(adapter->netdev, "failed to map skb\n");
+
+    tx_info->skb = NULL;
+
+    tx_info->num_of_bufs += i;
+    ena_unmap_tx_skb(tx_ring, tx_info);
+
+    return -EINVAL;
+}
+
+/* Called with netif_tx_lock. */
+static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+    struct ena_adapter *adapter = netdev_priv(dev);
+    struct ena_tx_buffer *tx_info;
+    struct ena_com_tx_ctx ena_tx_ctx;
+    struct ena_ring *tx_ring;
+    struct netdev_queue *txq;
+    void *push_hdr;
+    u16 next_to_use, req_id, header_len;
+    int qid, rc, nb_hw_desc;
+
+    netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
+    /*  Determine which tx ring we will be placed on */
+    qid = skb_get_queue_mapping(skb);
+    tx_ring = &adapter->tx_ring[qid];
+    txq = netdev_get_tx_queue(dev, qid);
+
+    rc = ena_check_and_linearize_skb(tx_ring, skb);
+    if (unlikely(rc))
+        goto error_drop_packet;
+
+    skb_tx_timestamp(skb);
+
+    next_to_use = tx_ring->next_to_use;
+    req_id = tx_ring->free_tx_ids[next_to_use];
+    tx_info = &tx_ring->tx_buffer_info[req_id];
+    tx_info->num_of_bufs = 0;
+
+    WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
+
+    rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
+    if (unlikely(rc))
+        goto error_drop_packet;
     memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
     ena_tx_ctx.ena_bufs = tx_info->bufs;
@@ -2081,14 +2146,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
     rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
                 &nb_hw_desc);
+    /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
+     * since the number of free descriptors in the queue is checked
+     * after sending the previous packet. In case there isn't enough
+     * space in the queue for the next packet, it is stopped
+     * until there is again enough available space in the queue.
+     * All other failure reasons of ena_com_prepare_tx() are fatal
+     * and therefore require a device reset.
+     */
     if (unlikely(rc)) {
         netif_err(adapter, tx_queued, dev,
               "failed to prepare tx bufs\n");
         u64_stats_update_begin(&tx_ring->syncp);
-        tx_ring->tx_stats.queue_stop++;
         tx_ring->tx_stats.prepare_ctx_err++;
         u64_stats_update_end(&tx_ring->syncp);
-        netif_tx_stop_queue(txq);
+        adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
+        set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
         goto error_unmap_dma;
     }
@@ -2151,35 +2224,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
     return NETDEV_TX_OK;
-error_report_dma_error:
-    u64_stats_update_begin(&tx_ring->syncp);
-    tx_ring->tx_stats.dma_mapping_err++;
-    u64_stats_update_end(&tx_ring->syncp);
-    netdev_warn(adapter->netdev, "failed to map skb\n");
-
-    tx_info->skb = NULL;
-
 error_unmap_dma:
-    if (i >= 0) {
-        /* save value of frag that failed */
-        last_frag = i;
-
-        /* start back at beginning and unmap skb */
-        tx_info->skb = NULL;
-        ena_buf = tx_info->bufs;
-        dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
-                 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
-
-        /* unmap remaining mapped pages */
-        for (i = 0; i < last_frag; i++) {
-            ena_buf++;
-            dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
-                       dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
-        }
-    }
+    ena_unmap_tx_skb(tx_ring, tx_info);
+    tx_info->skb = NULL;
 error_drop_packet:
-
     dev_kfree_skb(skb);
     return NETDEV_TX_OK;
 }
@@ -2615,7 +2664,9 @@ static int ena_restore_device(struct ena_adapter *adapter)
     set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
     mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
-    dev_err(&pdev->dev, "Device reset completed successfully\n");
+    dev_err(&pdev->dev,
+        "Device reset completed successfully, Driver info: %s\n",
+        version);
     return rc;
 err_disable_msix:
@@ -2982,18 +3033,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
     return io_queue_num;
 }
-static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
-                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_set_queues_placement_policy(struct pci_dev *pdev,
+                       struct ena_com_dev *ena_dev,
+                       struct ena_admin_feature_llq_desc *llq,
+                       struct ena_llq_configurations *llq_default_configurations)
 {
     bool has_mem_bar;
+    int rc;
+    u32 llq_feature_mask;
+
+    llq_feature_mask = 1 << ENA_ADMIN_LLQ;
+    if (!(ena_dev->supported_features & llq_feature_mask)) {
+        dev_err(&pdev->dev,
+            "LLQ is not supported Fallback to host mode policy.\n");
+        ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+        return 0;
+    }
     has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
-    /* Enable push mode if device supports LLQ */
-    if (has_mem_bar && get_feat_ctx->max_queues.max_legacy_llq_num > 0)
-        ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
-    else
+    rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
+    if (unlikely(rc)) {
+        dev_err(&pdev->dev,
+            "Failed to configure the device mode.  Fallback to host mode policy.\n");
         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+        return 0;
+    }
+
+    /* Nothing to config, exit */
+    if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+        return 0;
+
+    if (!has_mem_bar) {
+        dev_err(&pdev->dev,
+            "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
+        ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+        return 0;
+    }
+
+    ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+                       pci_resource_start(pdev, ENA_MEM_BAR),
+                       pci_resource_len(pdev, ENA_MEM_BAR));
+
+    if (!ena_dev->mem_bar)
+        return -EFAULT;
+
+    return 0;
 }
 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
@@ -3111,6 +3196,15 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
     pci_release_selected_regions(pdev, release_bars);
 }
+static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+{
+    llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
+    llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+    llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+    llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+    llq_config->llq_ring_entry_size_value = 128;
+}
+
 static int ena_calc_queue_size(struct pci_dev *pdev,
                    struct ena_com_dev *ena_dev,
                    u16 *max_tx_sgl_size,
@@ -3159,7 +3253,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
     static int version_printed;
     struct net_device *netdev;
     struct ena_adapter *adapter;
+    struct ena_llq_configurations llq_config;
     struct ena_com_dev *ena_dev = NULL;
+    char *queue_type_str;
     static int adapters_found;
     int io_queue_num, bars, rc;
     int queue_size;
@@ -3213,16 +3309,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         goto err_free_region;
     }
-    ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
+    set_default_llq_configurations(&llq_config);
-    if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-        ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
-                           pci_resource_start(pdev, ENA_MEM_BAR),
-                           pci_resource_len(pdev, ENA_MEM_BAR));
-        if (!ena_dev->mem_bar) {
-            rc = -EFAULT;
-            goto err_device_destroy;
-        }
+    rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
+                         &llq_config);
+    if (rc) {
+        dev_err(&pdev->dev, "ena device init failed\n");
+        goto err_device_destroy;
     }
     /* initial Tx interrupt delay, Assumes 1 usec granularity.
@@ -3237,8 +3330,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         goto err_device_destroy;
     }
-    dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
-         io_queue_num, queue_size);
+    dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
+         io_queue_num, queue_size,
+         (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
+         "ENABLED" : "DISABLED");
     /* dev zeroed in init_etherdev */
     netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
@@ -3329,9 +3424,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
             (unsigned long)adapter);
     mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
-    dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
+    if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+        queue_type_str = "Regular";
+    else
+        queue_type_str = "Low Latency";
+
+    dev_info(&pdev->dev,
+         "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
          DEVICE_NAME, (long)pci_resource_start(pdev, 0),
-         netdev->dev_addr, io_queue_num);
+         netdev->dev_addr, io_queue_num, queue_type_str);
     set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index c9c85332d134..39b52db1de72 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -148,6 +148,9 @@ struct ena_tx_buffer {
     /* num of buffers used by this skb */
     u32 num_of_bufs;
+    /* Indicate if bufs[0] map the linear data of the skb. */
+    u8 map_linear_data;
+
     /* Used for detect missing tx packets to limit the number of prints */
     u32 print_once;
     /* Save the last jiffies to detect missing tx packets
@@ -183,6 +186,7 @@ struct ena_stats_tx {
     u64 tx_poll;
     u64 doorbells;
     u64 bad_req_id;
+    u64 llq_buffer_copy;
     u64 missed_tx;
 };
@@ -254,6 +258,8 @@ struct ena_ring {
         struct ena_stats_tx tx_stats;
         struct ena_stats_rx rx_stats;
     };
+
+    u8 *push_buf_intermediate_buf;
     int empty_rx_queue;
 } ____cacheline_aligned;
--
2.17.1
SOURCES/_RHEL7_20-28_net_ena_use_CSUM_CHECKED_device_indication_to_report_skb..patch
New file
@@ -0,0 +1,137 @@
Date: Tue, 30 Oct 2018 13:25:35 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 20/28] net: ena: use CSUM_CHECKED device indication to report
 skb's checksum status
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Set skb->ip_summed to the correct value as reported by the device.
Add counter for the case where rx csum offload is enabled but
device didn't check it.
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit cb36bb36e1f17d2a7b9a9751e5cfec4235b46c93)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_eth_com.c     |  3 +++
 drivers/net/ethernet/amazon/ena/ena_eth_com.h     |  1 +
 drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h | 10 ++++++++--
 drivers/net/ethernet/amazon/ena/ena_ethtool.c     |  1 +
 drivers/net/ethernet/amazon/ena/ena_netdev.c      | 13 ++++++++++++-
 drivers/net/ethernet/amazon/ena/ena_netdev.h      |  1 +
 6 files changed, 26 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 17107ca107e3..f6c2d3855be8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -354,6 +354,9 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
     ena_rx_ctx->l4_csum_err =
         !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
         ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
+    ena_rx_ctx->l4_csum_checked =
+        !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
+        ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
     ena_rx_ctx->hash = cdesc->hash;
     ena_rx_ctx->frag =
         (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index bcc84072367d..340d02b64ca6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -67,6 +67,7 @@ struct ena_com_rx_ctx {
     enum ena_eth_io_l4_proto_index l4_proto;
     bool l3_csum_err;
     bool l4_csum_err;
+    u8 l4_csum_checked;
     /* fragmented packet */
     bool frag;
     u32 hash;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index f320c58793a5..4c5ccaa13c42 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -242,9 +242,13 @@ struct ena_eth_io_rx_cdesc_base {
      *    checksum error detected, or, the controller didn't
      *    validate the checksum. This bit is valid only when
      *    l4_proto_idx indicates TCP/UDP packet, and,
-     *    ipv4_frag is not set
+     *    ipv4_frag is not set. This bit is valid only when
+     *    l4_csum_checked below is set.
      * 15 : ipv4_frag - Indicates IPv4 fragmented packet
-     * 23:16 : reserved16
+     * 16 : l4_csum_checked - L4 checksum was verified
+     *    (could be OK or error), when cleared the status of
+     *    checksum is unknown
+     * 23:17 : reserved17 - MBZ
      * 24 : phase
      * 25 : l3_csum2 - second checksum engine result
      * 26 : first - Indicates first descriptor in
@@ -390,6 +394,8 @@ struct ena_eth_io_numa_node_cfg_reg {
 #define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14)
 #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15
 #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15)
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16
+#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16)
 #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24
 #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24)
 #define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 7643faaebb42..fd07122532f1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -97,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
     ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
     ENA_STAT_RX_ENTRY(bad_req_id),
     ENA_STAT_RX_ENTRY(empty_rx_ring),
+    ENA_STAT_RX_ENTRY(csum_unchecked),
 };
 static const struct ena_stats ena_stats_ena_com_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 3a1f7a89cf29..4a318f351ab2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -996,8 +996,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
             return;
         }
-        skb->ip_summed = CHECKSUM_UNNECESSARY;
+        if (likely(ena_rx_ctx->l4_csum_checked)) {
+            skb->ip_summed = CHECKSUM_UNNECESSARY;
+        } else {
+            u64_stats_update_begin(&rx_ring->syncp);
+            rx_ring->rx_stats.csum_unchecked++;
+            u64_stats_update_end(&rx_ring->syncp);
+            skb->ip_summed = CHECKSUM_NONE;
+        }
+    } else {
+        skb->ip_summed = CHECKSUM_NONE;
+        return;
     }
+
 }
 static void ena_set_rx_hash(struct ena_ring *rx_ring,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 39b52db1de72..d9fc9c71215c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -202,6 +202,7 @@ struct ena_stats_rx {
     u64 rx_copybreak_pkt;
     u64 bad_req_id;
     u64 empty_rx_ring;
+    u64 csum_unchecked;
 };
 struct ena_ring {
--
2.17.1
SOURCES/_RHEL7_21-28_net_ena_explicit_casting_and_initialization,_and_clearer_.patch
New file
@@ -0,0 +1,240 @@
Date: Tue, 30 Oct 2018 13:25:36 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 21/28] net: ena: explicit casting and initialization, and
 clearer error handling
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit bd791175a6432d24fc5d7b348304276027372545)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c    | 39 ++++++++++++--------
 drivers/net/ethernet/amazon/ena/ena_netdev.c |  5 +--
 drivers/net/ethernet/amazon/ena/ena_netdev.h | 27 ++++++++------
 3 files changed, 40 insertions(+), 31 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 5220c7578d6b..5c468b28723b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -235,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
     tail_masked = admin_queue->sq.tail & queue_size_mask;
     /* In case of queue FULL */
-    cnt = atomic_read(&admin_queue->outstanding_cmds);
+    cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
     if (cnt >= admin_queue->q_depth) {
         pr_debug("admin queue is full.\n");
         admin_queue->stats.out_of_space++;
@@ -304,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
                              struct ena_admin_acq_entry *comp,
                              size_t comp_size_in_bytes)
 {
-    unsigned long flags;
+    unsigned long flags = 0;
     struct ena_comp_ctx *comp_ctx;
     spin_lock_irqsave(&admin_queue->q_lock, flags);
@@ -332,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
     memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
-    io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
+    io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
     io_sq->desc_entry_size =
         (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
         sizeof(struct ena_eth_io_tx_desc) :
@@ -486,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
     /* Go over all the completions */
     while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
-            ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+        ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
         /* Do not read the rest of the completion entry before the
          * phase bit was validated
          */
@@ -537,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
                              struct ena_com_admin_queue *admin_queue)
 {
-    unsigned long flags, timeout;
+    unsigned long flags = 0;
+    unsigned long timeout;
     int ret;
     timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
@@ -736,7 +737,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
                             struct ena_com_admin_queue *admin_queue)
 {
-    unsigned long flags;
+    unsigned long flags = 0;
     int ret;
     wait_for_completion_timeout(&comp_ctx->wait_event,
@@ -782,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
     volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
         mmio_read->read_resp;
     u32 mmio_read_reg, ret, i;
-    unsigned long flags;
+    unsigned long flags = 0;
     u32 timeout = mmio_read->reg_read_to;
     might_sleep();
@@ -1426,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
 {
     struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-    unsigned long flags;
+    unsigned long flags = 0;
     spin_lock_irqsave(&admin_queue->q_lock, flags);
     while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
@@ -1470,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
 {
     struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
-    unsigned long flags;
+    unsigned long flags = 0;
     spin_lock_irqsave(&admin_queue->q_lock, flags);
     ena_dev->admin_queue.running_state = state;
@@ -1504,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
     }
     if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
-        pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
+        pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
             get_resp.u.aenq.supported_groups, groups_flag);
         return -EOPNOTSUPP;
     }
@@ -1652,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
                     sizeof(*mmio_read->read_resp),
                     &mmio_read->read_resp_dma_addr, GFP_KERNEL);
     if (unlikely(!mmio_read->read_resp))
-        return -ENOMEM;
+        goto err;
     ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
@@ -1661,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
     mmio_read->readless_supported = true;
     return 0;
+
+err:
+
+    return -ENOMEM;
 }
 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
@@ -1961,6 +1966,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
     struct ena_admin_aenq_entry *aenq_e;
     struct ena_admin_aenq_common_desc *aenq_common;
     struct ena_com_aenq *aenq  = &dev->aenq;
+    unsigned long long timestamp;
     ena_aenq_handler handler_cb;
     u16 masked_head, processed = 0;
     u8 phase;
@@ -1978,10 +1984,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
          */
         dma_rmb();
+        timestamp =
+            (unsigned long long)aenq_common->timestamp_low |
+            ((unsigned long long)aenq_common->timestamp_high << 32);
         pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
-             aenq_common->group, aenq_common->syndrom,
-             (u64)aenq_common->timestamp_low +
-                 ((u64)aenq_common->timestamp_high << 32));
+             aenq_common->group, aenq_common->syndrom, timestamp);
         /* Handle specific event*/
         handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -2623,8 +2630,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
     if (unlikely(!host_attr->host_info))
         return -ENOMEM;
-    host_attr->host_info->ena_spec_version =
-        ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+    host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
+        ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
         (ENA_COMMON_SPEC_VERSION_MINOR));
     return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4a318f351ab2..b11bb6f52d16 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2598,15 +2598,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
     dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
     adapter->dev_up_before_reset = dev_up;
-
     if (!graceful)
         ena_com_set_admin_running_state(ena_dev, false);
     if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
         ena_down(adapter);
-    /* Before releasing the ENA resources, a device reset is required.
-     * (to prevent the device from accessing them).
+    /* Stop the device from sending AENQ events (in case reset flag is set
+     *  and device is up, ena_close already reset the device
      * In case the reset flag is set and the device is up, ena_down()
      * already perform the reset, so it can be skipped.
      */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index d9fc9c71215c..727d62cdfa90 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -58,7 +58,21 @@
 #define DEVICE_NAME    "Elastic Network Adapter (ENA)"
 /* 1 for AENQ + ADMIN */
-#define ENA_MAX_MSIX_VEC(io_queues)    (1 + (io_queues))
+#define ENA_ADMIN_MSIX_VEC        1
+#define ENA_MAX_MSIX_VEC(io_queues)    (ENA_ADMIN_MSIX_VEC + (io_queues))
+
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passes 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
+#define ENA_MIN_MSIX_VEC        2
 #define ENA_REG_BAR            0
 #define ENA_MEM_BAR            2
@@ -360,15 +374,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
 int ena_get_sset_count(struct net_device *netdev, int sset);
-/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
- * driver passas 0.
- * Since the max packet size the ENA handles is ~9kB limit the buffer length to
- * 16kB.
- */
-#if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
-#else
-#define ENA_PAGE_SIZE PAGE_SIZE
-#endif
-
 #endif /* !(ENA_H) */
--
2.17.1
SOURCES/_RHEL7_22-28_net_ena_limit_refill_Rx_threshold_to_256_to_avoid_latency.patch
New file
@@ -0,0 +1,66 @@
Date: Tue, 30 Oct 2018 13:25:37 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 22/28] net: ena: limit refill Rx threshold to 256 to avoid
 latency issues
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Currently Rx refill is done when the number of required descriptors is
above 1/8 queue size. With a default of 1024 entries per queue the
threshold is 128 descriptors.
There is intention to increase the queue size to 8196 entries.
In this case threshold of 1024 descriptors is too large and can hurt
latency.
Add another limitation to Rx threshold to be at most 256 descriptors.
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 0574bb806dad29a3dada0ee42b01645477d48282)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 4 +++-
 drivers/net/ethernet/amazon/ena/ena_netdev.h | 5 +++--
 2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b11bb6f52d16..203047d24d5b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1124,7 +1124,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
     rx_ring->next_to_clean = next_to_clean;
     refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
-    refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
+    refill_threshold =
+        min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
+              ENA_RX_REFILL_THRESH_PACKET);
     /* Optimization, try to batch new rx buffers */
     if (refill_required > refill_threshold) {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 727d62cdfa90..0d792d76d2a2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -106,10 +106,11 @@
  */
 #define ENA_TX_POLL_BUDGET_DIVIDER    4
-/* Refill Rx queue when number of available descriptors is below
- * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER
+/* Refill Rx queue when number of required descriptors is above
+ * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET
  */
 #define ENA_RX_REFILL_THRESH_DIVIDER    8
+#define ENA_RX_REFILL_THRESH_PACKET    256
 /* Number of queues to check for missing queues per timer service */
 #define ENA_MONITORED_TX_QUEUES    4
--
2.17.1
SOURCES/_RHEL7_23-28_net_ena_change_rx_copybreak_default_to_reduce_kernel_memo.patch
New file
@@ -0,0 +1,40 @@
Date: Tue, 30 Oct 2018 13:25:38 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 23/28] net: ena: change rx copybreak default to reduce kernel
 memory pressure
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Improves socket memory utilization when receiving packets larger
than 128 bytes (the previous rx copybreak) and smaller than 256 bytes.
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 87731f0c681c9682c5521e5197d89e561b7da395)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0d792d76d2a2..95c20882b57c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -81,7 +81,7 @@
 #define ENA_DEFAULT_RING_SIZE    (1024)
 #define ENA_TX_WAKEUP_THRESH        (MAX_SKB_FRAGS + 2)
-#define ENA_DEFAULT_RX_COPYBREAK    (128 - NET_IP_ALIGN)
+#define ENA_DEFAULT_RX_COPYBREAK    (256 - NET_IP_ALIGN)
 /* limit the buffer size to 600 bytes to handle MTU changes from very
  * small to very large, in which case the number of buffers per packet
--
2.17.1
SOURCES/_RHEL7_24-28_net_ena_remove_redundant_parameter_in_ena_com_admin_init_.patch
New file
@@ -0,0 +1,88 @@
Date: Tue, 30 Oct 2018 13:25:39 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 24/28] net: ena: remove redundant parameter in
 ena_com_admin_init()
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Remove redundant spinlock acquire parameter from ena_com_admin_init()
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit f1e90f6e2c1fb0e491f910540314015324fed1e2)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c    | 6 ++----
 drivers/net/ethernet/amazon/ena/ena_com.h    | 5 +----
 drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +-
 3 files changed, 4 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 5c468b28723b..420cede41ca4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1701,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
 }
 int ena_com_admin_init(struct ena_com_dev *ena_dev,
-               struct ena_aenq_handlers *aenq_handlers,
-               bool init_spinlock)
+               struct ena_aenq_handlers *aenq_handlers)
 {
     struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
     u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
@@ -1728,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
     atomic_set(&admin_queue->outstanding_cmds, 0);
-    if (init_spinlock)
-        spin_lock_init(&admin_queue->q_lock);
+    spin_lock_init(&admin_queue->q_lock);
     ret = ena_com_init_comp_ctxt(admin_queue);
     if (ret)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 25af8d025919..ae8b4857fce3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -436,8 +436,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
 /* ena_com_admin_init - Init the admin and the async queues
  * @ena_dev: ENA communication layer struct
  * @aenq_handlers: Those handlers to be called upon event.
- * @init_spinlock: Indicate if this method should init the admin spinlock or
- * the spinlock was init before (for example, in a case of FLR).
  *
  * Initialize the admin submission and completion queues.
  * Initialize the asynchronous events notification queues.
@@ -445,8 +443,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev);
  * @return - 0 on success, negative value on failure.
  */
 int ena_com_admin_init(struct ena_com_dev *ena_dev,
-               struct ena_aenq_handlers *aenq_handlers,
-               bool init_spinlock);
+               struct ena_aenq_handlers *aenq_handlers);
 /* ena_com_admin_destroy - Destroy the admin and the async events queues.
  * @ena_dev: ENA communication layer struct
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 203047d24d5b..56088a906ab2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2502,7 +2502,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
     }
     /* ENA admin level init */
-    rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
+    rc = ena_com_admin_init(ena_dev, &aenq_handlers);
     if (rc) {
         dev_err(dev,
             "Can not initialize ena admin queue with device\n");
--
2.17.1
SOURCES/_RHEL7_25-28_net_ena_update_driver_version_to_2.0.1.patch
New file
@@ -0,0 +1,40 @@
Date: Tue, 30 Oct 2018 13:25:40 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 25/28] net: ena: update driver version to 2.0.1
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 3a7b9d8ddd200bdafaa3ef75b8544d2403eaa03b)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 drivers/net/ethernet/amazon/ena/ena_netdev.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 95c20882b57c..6407112594a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -43,9 +43,9 @@
 #include "ena_com.h"
 #include "ena_eth_com.h"
-#define DRV_MODULE_VER_MAJOR    1
-#define DRV_MODULE_VER_MINOR    5
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_VER_MAJOR    2
+#define DRV_MODULE_VER_MINOR    0
+#define DRV_MODULE_VER_SUBMINOR 1
 #define DRV_MODULE_NAME        "ena"
 #ifndef DRV_MODULE_VERSION
--
2.17.1
SOURCES/_RHEL7_26-28_net_ena_fix_indentations_in_ena_defs_for_better_readabili.patch
<
New file
@@ -0,0 +1,1012 @@
Date: Tue, 30 Oct 2018 13:25:41 -0400
From: linville@redhat.com
To: rhkernel-list@redhat.com
Cc: Lin Liu <linl@redhat.com>
Subject: [RHEL7 26/28] net: ena: fix indentations in ena_defs for better
 readability
From: "John W. Linville" <linville@redhat.com>
BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1633418
Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=18850510
Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit be26667cb3947c90322467f1d15ad86b02350e00)
Signed-off-by: John W. Linville <linville@redhat.com>
---
 .../net/ethernet/amazon/ena/ena_admin_defs.h  | 334 +++++++-----------
 .../net/ethernet/amazon/ena/ena_eth_io_defs.h | 223 ++++++------
 .../net/ethernet/amazon/ena/ena_regs_defs.h   | 206 +++++------
 3 files changed, 338 insertions(+), 425 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index b439ec1b3edb..9f80b73f90b1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -32,119 +32,81 @@
 #ifndef _ENA_ADMIN_H_
 #define _ENA_ADMIN_H_
-enum ena_admin_aq_opcode {
-    ENA_ADMIN_CREATE_SQ    = 1,
-
-    ENA_ADMIN_DESTROY_SQ    = 2,
-
-    ENA_ADMIN_CREATE_CQ    = 3,
-
-    ENA_ADMIN_DESTROY_CQ    = 4,
-
-    ENA_ADMIN_GET_FEATURE    = 8,
-    ENA_ADMIN_SET_FEATURE    = 9,
-
-    ENA_ADMIN_GET_STATS    = 11,
+enum ena_admin_aq_opcode {
+    ENA_ADMIN_CREATE_SQ                         = 1,
+    ENA_ADMIN_DESTROY_SQ                        = 2,
+    ENA_ADMIN_CREATE_CQ                         = 3,
+    ENA_ADMIN_DESTROY_CQ                        = 4,
+    ENA_ADMIN_GET_FEATURE                       = 8,
+    ENA_ADMIN_SET_FEATURE                       = 9,
+    ENA_ADMIN_GET_STATS                         = 11,
 };
 enum ena_admin_aq_completion_status {
-    ENA_ADMIN_SUCCESS            = 0,
-
-    ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE    = 1,
-
-    ENA_ADMIN_BAD_OPCODE            = 2,
-
-    ENA_ADMIN_UNSUPPORTED_OPCODE        = 3,
-
-    ENA_ADMIN_MALFORMED_REQUEST        = 4,
-
+    ENA_ADMIN_SUCCESS                           = 0,
+    ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE       = 1,
+    ENA_ADMIN_BAD_OPCODE                        = 2,
+    ENA_ADMIN_UNSUPPORTED_OPCODE                = 3,
+    ENA_ADMIN_MALFORMED_REQUEST                 = 4,
     /* Additional status is provided in ACQ entry extended_status */
-    ENA_ADMIN_ILLEGAL_PARAMETER        = 5,
-
-    ENA_ADMIN_UNKNOWN_ERROR            = 6,
-
-    ENA_ADMIN_RESOURCE_BUSY                 = 7,
+    ENA_ADMIN_ILLEGAL_PARAMETER                 = 5,
+    ENA_ADMIN_UNKNOWN_ERROR                     = 6,
+    ENA_ADMIN_RESOURCE_BUSY                     = 7,
 };
 enum ena_admin_aq_feature_id {
-    ENA_ADMIN_DEVICE_ATTRIBUTES        = 1,
-
-    ENA_ADMIN_MAX_QUEUES_NUM        = 2,
-
-    ENA_ADMIN_HW_HINTS            = 3,
-
-    ENA_ADMIN_LLQ                           = 4,
-
-    ENA_ADMIN_RSS_HASH_FUNCTION        = 10,
-
-    ENA_ADMIN_STATELESS_OFFLOAD_CONFIG    = 11,
-
-    ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG    = 12,
-
-    ENA_ADMIN_MTU                = 14,
-
-    ENA_ADMIN_RSS_HASH_INPUT        = 18,
-
-    ENA_ADMIN_INTERRUPT_MODERATION        = 20,
-
-    ENA_ADMIN_AENQ_CONFIG            = 26,
-
-    ENA_ADMIN_LINK_CONFIG            = 27,
-
-    ENA_ADMIN_HOST_ATTR_CONFIG        = 28,
-
-    ENA_ADMIN_FEATURES_OPCODE_NUM        = 32,
+    ENA_ADMIN_DEVICE_ATTRIBUTES                 = 1,
+    ENA_ADMIN_MAX_QUEUES_NUM                    = 2,
+    ENA_ADMIN_HW_HINTS                          = 3,
+    ENA_ADMIN_LLQ                               = 4,
+    ENA_ADMIN_RSS_HASH_FUNCTION                 = 10,
+    ENA_ADMIN_STATELESS_OFFLOAD_CONFIG          = 11,
+    ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG      = 12,
+    ENA_ADMIN_MTU                               = 14,
+    ENA_ADMIN_RSS_HASH_INPUT                    = 18,
+    ENA_ADMIN_INTERRUPT_MODERATION              = 20,
+    ENA_ADMIN_AENQ_CONFIG                       = 26,
+    ENA_ADMIN_LINK_CONFIG                       = 27,
+    ENA_ADMIN_HOST_ATTR_CONFIG                  = 28,
+    ENA_ADMIN_FEATURES_OPCODE_NUM               = 32,
 };
 enum ena_admin_placement_policy_type {
     /* descriptors and headers are in host memory */
-    ENA_ADMIN_PLACEMENT_POLICY_HOST    = 1,
-
+    ENA_ADMIN_PLACEMENT_POLICY_HOST             = 1,
     /* descriptors and headers are in device memory (a.k.a Low Latency
      * Queue)
      */
-    ENA_ADMIN_PLACEMENT_POLICY_DEV    = 3,
+    ENA_ADMIN_PLACEMENT_POLICY_DEV              = 3,
 };
 enum ena_admin_link_types {
-    ENA_ADMIN_LINK_SPEED_1G        = 0x1,
-
-    ENA_ADMIN_LINK_SPEED_2_HALF_G    = 0x2,
-
-    ENA_ADMIN_LINK_SPEED_5G        = 0x4,
-
-    ENA_ADMIN_LINK_SPEED_10G    = 0x8,
-
-    ENA_ADMIN_LINK_SPEED_25G    = 0x10,
-
-    ENA_ADMIN_LINK_SPEED_40G    = 0x20,
-
-    ENA_ADMIN_LINK_SPEED_50G    = 0x40,
-
-    ENA_ADMIN_LINK_SPEED_100G    = 0x80,
-
-    ENA_ADMIN_LINK_SPEED_200G    = 0x100,
-
-    ENA_ADMIN_LINK_SPEED_400G    = 0x200,
+    ENA_ADMIN_LINK_SPEED_1G                     = 0x1,
+    ENA_ADMIN_LINK_SPEED_2_HALF_G               = 0x2,
+    ENA_ADMIN_LINK_SPEED_5G                     = 0x4,
+    ENA_ADMIN_LINK_SPEED_10G                    = 0x8,
+    ENA_ADMIN_LINK_SPEED_25G                    = 0x10,
+    ENA_ADMIN_LINK_SPEED_40G                    = 0x20,
+    ENA_ADMIN_LINK_SPEED_50G                    = 0x40,
+    ENA_ADMIN_LINK_SPEED_100G                   = 0x80,
+    ENA_ADMIN_LINK_SPEED_200G                   = 0x100,
+    ENA_ADMIN_LINK_SPEED_400G                   = 0x200,
 };
 enum ena_admin_completion_policy_type {
     /* completion queue entry for each sq descriptor */
-    ENA_ADMIN_COMPLETION_POLICY_DESC        = 0,
-
+    ENA_ADMIN_COMPLETION_POLICY_DESC            = 0,
     /* completion queue entry upon request in sq descriptor */
-    ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND    = 1,
-
+    ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND  = 1,
     /* current queue head pointer is updated in OS memory upon sq
      * descriptor request
      */
-    ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND    = 2,
-
+    ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND  = 2,
     /* current queue head pointer is updated in OS memory for each sq
      * descriptor
      */
-    ENA_ADMIN_COMPLETION_POLICY_HEAD        = 3,
+    ENA_ADMIN_COMPLETION_POLICY_HEAD            = 3,
 };
 /* basic stats return ena_admin_basic_stats while extanded stats return a
@@ -152,15 +114,13 @@ enum ena_admin_completion_policy_type {
  * device id
  */
 enum ena_admin_get_stats_type {
-    ENA_ADMIN_GET_STATS_TYPE_BASIC        = 0,
-
-    ENA_ADMIN_GET_STATS_TYPE_EXTENDED    = 1,
+    ENA_ADMIN_GET_STATS_TYPE_BASIC              = 0,
+    ENA_ADMIN_GET_STATS_TYPE_EXTENDED           = 1,
 };
 enum ena_admin_get_stats_scope {
-    ENA_ADMIN_SPECIFIC_QUEUE    = 0,
-
-    ENA_ADMIN_ETH_TRAFFIC        = 1,
+    ENA_ADMIN_SPECIFIC_QUEUE                    = 0,
+    ENA_ADMIN_ETH_TRAFFIC                       = 1,
 };
 struct ena_admin_aq_common_desc {
@@ -231,7 +191,9 @@ struct ena_admin_acq_common_desc {
     u16 extended_status;
-    /* serves as a hint what AQ entries can be revoked */
+    /* indicates to the driver which AQ entry has been consumed by the
+     *    device and could be reused
+     */
     u16 sq_head_indx;
 };
@@ -300,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd {
 };
 enum ena_admin_sq_direction {
-    ENA_ADMIN_SQ_DIRECTION_TX    = 1,
-
-    ENA_ADMIN_SQ_DIRECTION_RX    = 2,
+    ENA_ADMIN_SQ_DIRECTION_TX                   = 1,
+    ENA_ADMIN_SQ_DIRECTION_RX                   = 2,
 };
 struct ena_admin_acq_create_sq_resp_desc {
@@ -664,9 +625,8 @@ struct ena_admin_feature_offload_desc {
 };
 enum ena_admin_hash_functions {
-    ENA_ADMIN_TOEPLITZ    = 1,
-
-    ENA_ADMIN_CRC32        = 2,
+    ENA_ADMIN_TOEPLITZ                          = 1,
+    ENA_ADMIN_CRC32                             = 2,
 };
 struct ena_admin_feature_rss_flow_hash_control {
@@ -692,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function {
 /* RSS flow hash protocols */
 enum ena_admin_flow_hash_proto {
-    ENA_ADMIN_RSS_TCP4    = 0,
-
-    ENA_ADMIN_RSS_UDP4    = 1,
-
-    ENA_ADMIN_RSS_TCP6    = 2,
-
-    ENA_ADMIN_RSS_UDP6    = 3,
-
-    ENA_ADMIN_RSS_IP4    = 4,
-
-    ENA_ADMIN_RSS_IP6    = 5,
-
-    ENA_ADMIN_RSS_IP4_FRAG    = 6,
-
-    ENA_ADMIN_RSS_NOT_IP    = 7,
-
+    ENA_ADMIN_RSS_TCP4                          = 0,
+    ENA_ADMIN_RSS_UDP4                          = 1,
+    ENA_ADMIN_RSS_TCP6                          = 2,
+    ENA_ADMIN_RSS_UDP6                          = 3,
+    ENA_ADMIN_RSS_IP4                           = 4,
+    ENA_ADMIN_RSS_IP6                           = 5,
+    ENA_ADMIN_RSS_IP4_FRAG                      = 6,
+    ENA_ADMIN_RSS_NOT_IP                        = 7,
     /* TCPv6 with extension header */
-    ENA_ADMIN_RSS_TCP6_EX    = 8,
-
+    ENA_ADMIN_RSS_TCP6_EX                       = 8,
     /* IPv6 with extension header */
-    ENA_ADMIN_RSS_IP6_EX    = 9,
-
-    ENA_ADMIN_RSS_PROTO_NUM    = 16,
+    ENA_ADMIN_RSS_IP6_EX                        = 9,
+    ENA_ADMIN_RSS_PROTO_NUM                     = 16,
 };
 /* RSS flow hash fields */
 enum ena_admin_flow_hash_fields {
     /* Ethernet Dest Addr */
-    ENA_ADMIN_RSS_L2_DA    = BIT(0),
-
+    ENA_ADMIN_RSS_L2_DA                         = BIT(0),
     /* Ethernet Src Addr */
-    ENA_ADMIN_RSS_L2_SA    = BIT(1),
-
+    ENA_ADMIN_RSS_L2_SA                         = BIT(1),
     /* ipv4/6 Dest Addr */
-    ENA_ADMIN_RSS_L3_DA    = BIT(2),
-
+    ENA_ADMIN_RSS_L3_DA                         = BIT(2),
     /* ipv4/6 Src Addr */
-    ENA_ADMIN_RSS_L3_SA    = BIT(3),
-
+    ENA_ADMIN_RSS_L3_SA                         = BIT(3),
     /* tcp/udp Dest Port */
-    ENA_ADMIN_RSS_L4_DP    = BIT(4),
-
+    ENA_ADMIN_RSS_L4_DP                         = BIT(4),
     /* tcp/udp Src Port */
-    ENA_ADMIN_RSS_L4_SP    = BIT(5),
+    ENA_ADMIN_RSS_L4_SP                         = BIT(5),
 };
 struct ena_admin_proto_input {
@@ -774,19 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input {
 };
 enum ena_admin_os_type {
-    ENA_ADMIN_OS_LINUX    = 1,
-
-    ENA_ADMIN_OS_WIN    = 2,
-
-    ENA_ADMIN_OS_DPDK    = 3,
-
-    ENA_ADMIN_OS_FREEBSD    = 4,
-
-    ENA_ADMIN_OS_IPXE    = 5,
-
-    ENA_ADMIN_OS_ESXI       = 6,
-
-    ENA_ADMIN_OS_GROUPS_NUM = 6,
+    ENA_ADMIN_OS_LINUX                          = 1,
+    ENA_ADMIN_OS_WIN                            = 2,
+    ENA_ADMIN_OS_DPDK                           = 3,
+    ENA_ADMIN_OS_FREEBSD                        = 4,
+    ENA_ADMIN_OS_IPXE                           = 5,
+    ENA_ADMIN_OS_ESXI                = 6,
+    ENA_ADMIN_OS_GROUPS_NUM                = 6,
 };
 struct ena_admin_host_info {
@@ -981,25 +920,18 @@ struct ena_admin_aenq_common_desc {
 /* asynchronous event notification groups */
 enum ena_admin_aenq_group {
-    ENA_ADMIN_LINK_CHANGE        = 0,
-
-    ENA_ADMIN_FATAL_ERROR        = 1,
-
-    ENA_ADMIN_WARNING        = 2,
-
-    ENA_ADMIN_NOTIFICATION        = 3,
-
-    ENA_ADMIN_KEEP_ALIVE        = 4,
-
-    ENA_ADMIN_AENQ_GROUPS_NUM    = 5,
+    ENA_ADMIN_LINK_CHANGE                       = 0,
+    ENA_ADMIN_FATAL_ERROR                       = 1,
+    ENA_ADMIN_WARNING                           = 2,
+    ENA_ADMIN_NOTIFICATION                      = 3,
+    ENA_ADMIN_KEEP_ALIVE                        = 4,
+    ENA_ADMIN_AENQ_GROUPS_NUM                   = 5,
 };
 enum ena_admin_aenq_notification_syndrom {
-    ENA_ADMIN_SUSPEND    = 0,
-
-    ENA_ADMIN_RESUME    = 1,
-
-    ENA_ADMIN_UPDATE_HINTS    = 2,
+    ENA_ADMIN_SUSPEND                           = 0,
+    ENA_ADMIN_RESUME                            = 1,
+    ENA_ADMIN_UPDATE_HINTS                      = 2,
 };
 struct ena_admin_aenq_entry {
@@ -1034,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp {
 };
 /* aq_common_desc */
-#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1)
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2
-#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2)
+#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK            GENMASK(11, 0)
+#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK                 BIT(0)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT            1
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK             BIT(1)
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT   2
+#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK    BIT(2)
 /* sq */
-#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5)
+#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT                     5
+#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK                      GENMASK(7, 5)
 /* acq_common_desc */
-#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
-#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK           GENMASK(11, 0)
+#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK                BIT(0)
 /* aq_create_sq_cmd */
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0)
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4
-#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4)
+#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT       5