Blame SOURCES/0042-netdrv-bnxt_en-Refactor-TPA-logic.patch

f95c89
From 746679f876254905a83b0c7ad192d4cc9c149315 Mon Sep 17 00:00:00 2001
f95c89
From: Jonathan Toppins <jtoppins@redhat.com>
f95c89
Date: Wed, 2 Oct 2019 18:22:57 -0400
f95c89
Subject: [PATCH 42/96] [netdrv] bnxt_en: Refactor TPA logic
f95c89
f95c89
Message-id: <b6a806b7d4b8e826e928a731708ab23ed16a4e2d.1570027456.git.jtoppins@redhat.com>
f95c89
Patchwork-id: 276460
f95c89
O-Subject: [RHEL-8.2 PATCH 35/78] bnxt_en: Refactor TPA logic.
f95c89
Bugzilla: 1724766
f95c89
RH-Acked-by: John Linville <linville@redhat.com>
f95c89
RH-Acked-by: Jarod Wilson <jarod@redhat.com>
f95c89
f95c89
Refactor the TPA logic slightly, so that the code can be more easily
f95c89
extended to support TPA on the new 57500 chips.  In particular, the
f95c89
logic to get the next aggregation completion is refactored into a
f95c89
new function bnxt_get_agg() so that this operation is made more
f95c89
generalized.  This operation will be different on the new chip in TPA
f95c89
mode.  The logic to recycle the aggregation buffers has a new start
f95c89
index parameter added for the same purpose.
f95c89
f95c89
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
f95c89
Signed-off-by: David S. Miller <davem@davemloft.net>
f95c89
(cherry picked from commit 4a228a3a5e58e5c05c6ffb5b430e5cb936865a8b)
f95c89
Bugzilla: 1724766
f95c89
Build Info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=23809532
f95c89
Tested: build, boot, basic ping
f95c89
Signed-off-by: Jonathan Toppins <jtoppins@redhat.com>
f95c89
Signed-off-by: Bruno Meneguele <bmeneg@redhat.com>
f95c89
---
f95c89
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 117 ++++++++++++++++++------------
f95c89
 1 file changed, 69 insertions(+), 48 deletions(-)
f95c89
f95c89
Index: src/drivers/net/ethernet/broadcom/bnxt/bnxt.c
f95c89
===================================================================
f95c89
--- src.orig/drivers/net/ethernet/broadcom/bnxt/bnxt.c	2020-02-06 16:23:15.092518825 +0100
f95c89
+++ src/drivers/net/ethernet/broadcom/bnxt/bnxt.c	2020-02-06 16:23:15.989510591 +0100
f95c89
@@ -830,8 +830,20 @@
f95c89
 	return 0;
f95c89
 }
f95c89
 
f95c89
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
f95c89
-				   u32 agg_bufs)
f95c89
+static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
f95c89
+				       struct bnxt_cp_ring_info *cpr,
f95c89
+				       u16 cp_cons, u16 curr)
f95c89
+{
f95c89
+	struct rx_agg_cmp *agg;
f95c89
+
f95c89
+	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
f95c89
+	agg = (struct rx_agg_cmp *)
f95c89
+		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
f95c89
+	return agg;
f95c89
+}
f95c89
+
f95c89
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
f95c89
+				   u16 start, u32 agg_bufs, bool tpa)
f95c89
 {
f95c89
 	struct bnxt_napi *bnapi = cpr->bnapi;
f95c89
 	struct bnxt *bp = bnapi->bp;
f95c89
@@ -847,8 +859,7 @@
f95c89
 		struct rx_bd *prod_bd;
f95c89
 		struct page *page;
f95c89
 
f95c89
-		agg = (struct rx_agg_cmp *)
f95c89
-			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
f95c89
+		agg = bnxt_get_agg(bp, cpr, idx, start + i);
f95c89
 		cons = agg->rx_agg_cmp_opaque;
f95c89
 		__clear_bit(cons, rxr->rx_agg_bmap);
f95c89
 
f95c89
@@ -876,7 +887,6 @@
f95c89
 
f95c89
 		prod = NEXT_RX_AGG(prod);
f95c89
 		sw_prod = NEXT_RX_AGG(sw_prod);
f95c89
-		cp_cons = NEXT_CMP(cp_cons);
f95c89
 	}
f95c89
 	rxr->rx_agg_prod = prod;
f95c89
 	rxr->rx_sw_agg_prod = sw_prod;
f95c89
@@ -959,8 +969,8 @@
f95c89
 
f95c89
 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
f95c89
 				     struct bnxt_cp_ring_info *cpr,
f95c89
-				     struct sk_buff *skb, u16 cp_cons,
f95c89
-				     u32 agg_bufs)
f95c89
+				     struct sk_buff *skb, u16 idx,
f95c89
+				     u32 agg_bufs, bool tpa)
f95c89
 {
f95c89
 	struct bnxt_napi *bnapi = cpr->bnapi;
f95c89
 	struct pci_dev *pdev = bp->pdev;
f95c89
@@ -975,8 +985,7 @@
f95c89
 		struct page *page;
f95c89
 		dma_addr_t mapping;
f95c89
 
f95c89
-		agg = (struct rx_agg_cmp *)
f95c89
-			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
f95c89
+		agg = bnxt_get_agg(bp, cpr, idx, i);
f95c89
 		cons = agg->rx_agg_cmp_opaque;
f95c89
 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
f95c89
 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
f95c89
@@ -1010,7 +1019,7 @@
f95c89
 			 * allocated already.
f95c89
 			 */
f95c89
 			rxr->rx_agg_prod = prod;
f95c89
-			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
f95c89
+			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
f95c89
 			return NULL;
f95c89
 		}
f95c89
 
f95c89
@@ -1023,7 +1032,6 @@
f95c89
 		skb->truesize += PAGE_SIZE;
f95c89
 
f95c89
 		prod = NEXT_RX_AGG(prod);
f95c89
-		cp_cons = NEXT_CMP(cp_cons);
f95c89
 	}
f95c89
 	rxr->rx_agg_prod = prod;
f95c89
 	return skb;
f95c89
@@ -1083,9 +1091,7 @@
f95c89
 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
f95c89
 		struct rx_tpa_end_cmp *tpa_end = cmp;
f95c89
 
f95c89
-		agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
f95c89
-			    RX_TPA_END_CMP_AGG_BUFS) >>
f95c89
-			   RX_TPA_END_CMP_AGG_BUFS_SHIFT;
f95c89
+		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
f95c89
 	}
f95c89
 
f95c89
 	if (agg_bufs) {
f95c89
@@ -1197,11 +1203,10 @@
f95c89
 	cons_rx_buf->data = NULL;
f95c89
 }
f95c89
 
f95c89
-static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
f95c89
-			   u32 agg_bufs)
f95c89
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
f95c89
 {
f95c89
 	if (agg_bufs)
f95c89
-		bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
f95c89
+		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
f95c89
 }
f95c89
 
f95c89
 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
f95c89
@@ -1373,9 +1378,7 @@
f95c89
 	skb_shinfo(skb)->gso_size =
f95c89
 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
f95c89
 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
f95c89
-	payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
f95c89
-		       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
f95c89
-		      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
f95c89
+	payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
f95c89
 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
f95c89
 	if (likely(skb))
f95c89
 		tcp_gro_complete(skb);
f95c89
@@ -1405,11 +1408,11 @@
f95c89
 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
f95c89
 	u8 agg_id = TPA_END_AGG_ID(tpa_end);
f95c89
 	u8 *data_ptr, agg_bufs;
f95c89
-	u16 cp_cons = RING_CMP(*raw_cons);
f95c89
 	unsigned int len;
f95c89
 	struct bnxt_tpa_info *tpa_info;
f95c89
 	dma_addr_t mapping;
f95c89
 	struct sk_buff *skb;
f95c89
+	u16 idx = 0;
f95c89
 	void *data;
f95c89
 
f95c89
 	if (unlikely(bnapi->in_reset)) {
f95c89
@@ -1427,19 +1430,19 @@
f95c89
 	len = tpa_info->len;
f95c89
 	mapping = tpa_info->mapping;
f95c89
 
f95c89
-	agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
f95c89
-		    RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
f95c89
+	agg_bufs = TPA_END_AGG_BUFS(tpa_end);
f95c89
 
f95c89
 	if (agg_bufs) {
f95c89
+		idx = RING_CMP(*raw_cons);
f95c89
 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
f95c89
 			return ERR_PTR(-EBUSY);
f95c89
 
f95c89
 		*event |= BNXT_AGG_EVENT;
f95c89
-		cp_cons = NEXT_CMP(cp_cons);
f95c89
+		idx = NEXT_CMP(idx);
f95c89
 	}
f95c89
 
f95c89
 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
f95c89
-		bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
f95c89
+		bnxt_abort_tpa(cpr, idx, agg_bufs);
f95c89
 		if (agg_bufs > MAX_SKB_FRAGS)
f95c89
 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
f95c89
 				    agg_bufs, (int)MAX_SKB_FRAGS);
f95c89
@@ -1449,7 +1452,7 @@
f95c89
 	if (len <= bp->rx_copy_thresh) {
f95c89
 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
f95c89
 		if (!skb) {
f95c89
-			bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
f95c89
+			bnxt_abort_tpa(cpr, idx, agg_bufs);
f95c89
 			return NULL;
f95c89
 		}
f95c89
 	} else {
f95c89
@@ -1458,7 +1461,7 @@
f95c89
 
f95c89
 		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
f95c89
 		if (!new_data) {
f95c89
-			bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
f95c89
+			bnxt_abort_tpa(cpr, idx, agg_bufs);
f95c89
 			return NULL;
f95c89
 		}
f95c89
 
f95c89
@@ -1473,7 +1476,7 @@
f95c89
 
f95c89
 		if (!skb) {
f95c89
 			kfree(data);
f95c89
-			bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
f95c89
+			bnxt_abort_tpa(cpr, idx, agg_bufs);
f95c89
 			return NULL;
f95c89
 		}
f95c89
 		skb_reserve(skb, bp->rx_offset);
f95c89
@@ -1481,7 +1484,7 @@
f95c89
 	}
f95c89
 
f95c89
 	if (agg_bufs) {
f95c89
-		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
f95c89
+		skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
f95c89
 		if (!skb) {
f95c89
 			/* Page reuse already handled by bnxt_rx_pages(). */
f95c89
 			return NULL;
f95c89
@@ -1625,7 +1628,8 @@
f95c89
 
f95c89
 		bnxt_reuse_rx_data(rxr, cons, data);
f95c89
 		if (agg_bufs)
f95c89
-			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
f95c89
+			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
f95c89
+					       false);
f95c89
 
f95c89
 		rc = -EIO;
f95c89
 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
f95c89
@@ -1648,7 +1652,8 @@
f95c89
 		bnxt_reuse_rx_data(rxr, cons, data);
f95c89
 		if (!skb) {
f95c89
 			if (agg_bufs)
f95c89
-				bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
f95c89
+				bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
f95c89
+						       agg_bufs, false);
f95c89
 			rc = -ENOMEM;
f95c89
 			goto next_rx;
f95c89
 		}
f95c89
@@ -1668,7 +1673,7 @@
f95c89
 	}
f95c89
 
f95c89
 	if (agg_bufs) {
f95c89
-		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
f95c89
+		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
f95c89
 		if (!skb) {
f95c89
 			rc = -ENOMEM;
f95c89
 			goto next_rx;
f95c89
@@ -2486,6 +2491,33 @@
f95c89
 	return 0;
f95c89
 }
f95c89
 
f95c89
+static void bnxt_free_tpa_info(struct bnxt *bp)
f95c89
+{
f95c89
+	int i;
f95c89
+
f95c89
+	for (i = 0; i < bp->rx_nr_rings; i++) {
f95c89
+		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
f95c89
+
f95c89
+		kfree(rxr->rx_tpa);
f95c89
+		rxr->rx_tpa = NULL;
f95c89
+	}
f95c89
+}
f95c89
+
f95c89
+static int bnxt_alloc_tpa_info(struct bnxt *bp)
f95c89
+{
f95c89
+	int i;
f95c89
+
f95c89
+	for (i = 0; i < bp->rx_nr_rings; i++) {
f95c89
+		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
f95c89
+
f95c89
+		rxr->rx_tpa = kcalloc(MAX_TPA, sizeof(struct bnxt_tpa_info),
f95c89
+				      GFP_KERNEL);
f95c89
+		if (!rxr->rx_tpa)
f95c89
+			return -ENOMEM;
f95c89
+	}
f95c89
+	return 0;
f95c89
+}
f95c89
+
f95c89
 static void bnxt_free_rx_rings(struct bnxt *bp)
f95c89
 {
f95c89
 	int i;
f95c89
@@ -2493,6 +2525,7 @@
f95c89
 	if (!bp->rx_ring)
f95c89
 		return;
f95c89
 
f95c89
+	bnxt_free_tpa_info(bp);
f95c89
 	for (i = 0; i < bp->rx_nr_rings; i++) {
f95c89
 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
f95c89
 		struct bnxt_ring_struct *ring;
f95c89
@@ -2506,9 +2539,6 @@
f95c89
 		page_pool_destroy(rxr->page_pool);
f95c89
 		rxr->page_pool = NULL;
f95c89
 
f95c89
-		kfree(rxr->rx_tpa);
f95c89
-		rxr->rx_tpa = NULL;
f95c89
-
f95c89
 		kfree(rxr->rx_agg_bmap);
f95c89
 		rxr->rx_agg_bmap = NULL;
f95c89
 
f95c89
@@ -2542,7 +2572,7 @@
f95c89
 
f95c89
 static int bnxt_alloc_rx_rings(struct bnxt *bp)
f95c89
 {
f95c89
-	int i, rc, agg_rings = 0, tpa_rings = 0;
f95c89
+	int i, rc = 0, agg_rings = 0;
f95c89
 
f95c89
 	if (!bp->rx_ring)
f95c89
 		return -ENOMEM;
f95c89
@@ -2550,9 +2580,6 @@
f95c89
 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
f95c89
 		agg_rings = 1;
f95c89
 
f95c89
-	if (bp->flags & BNXT_FLAG_TPA)
f95c89
-		tpa_rings = 1;
f95c89
-
f95c89
 	for (i = 0; i < bp->rx_nr_rings; i++) {
f95c89
 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
f95c89
 		struct bnxt_ring_struct *ring;
f95c89
@@ -2594,17 +2621,11 @@
f95c89
 			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
f95c89
 			if (!rxr->rx_agg_bmap)
f95c89
 				return -ENOMEM;
f95c89
-
f95c89
-			if (tpa_rings) {
f95c89
-				rxr->rx_tpa = kcalloc(MAX_TPA,
f95c89
-						sizeof(struct bnxt_tpa_info),
f95c89
-						GFP_KERNEL);
f95c89
-				if (!rxr->rx_tpa)
f95c89
-					return -ENOMEM;
f95c89
-			}
f95c89
 		}
f95c89
 	}
f95c89
-	return 0;
f95c89
+	if (bp->flags & BNXT_FLAG_TPA)
f95c89
+		rc = bnxt_alloc_tpa_info(bp);
f95c89
+	return rc;
f95c89
 }
f95c89
 
f95c89
 static void bnxt_free_tx_rings(struct bnxt *bp)