diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3b10995 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +SOURCES/mlx5_core-redhat-5.0_0_dup8.2.tar.bz2 diff --git a/.kmod-redhat-mlx5_core.metadata b/.kmod-redhat-mlx5_core.metadata new file mode 100644 index 0000000..047b2a3 --- /dev/null +++ b/.kmod-redhat-mlx5_core.metadata @@ -0,0 +1 @@ +c6b12c77b647c399d937311a20a55a2ba2d7fcc5 SOURCES/mlx5_core-redhat-5.0_0_dup8.2.tar.bz2 diff --git a/SOURCES/0001-netdrv-mlx5e-allow-TSO-on-VXLAN-over-VLAN-topologies.patch b/SOURCES/0001-netdrv-mlx5e-allow-TSO-on-VXLAN-over-VLAN-topologies.patch new file mode 100644 index 0000000..5e10712 --- /dev/null +++ b/SOURCES/0001-netdrv-mlx5e-allow-TSO-on-VXLAN-over-VLAN-topologies.patch @@ -0,0 +1,58 @@ +From d03ac6626a42264e3b6a0cea3ec19e8c7a83f326 Mon Sep 17 00:00:00 2001 +From: Davide Caratti +Date: Tue, 28 Jan 2020 09:13:33 -0500 +Subject: [PATCH 001/312] [netdrv] mlx5e: allow TSO on VXLAN over VLAN + topologies + +Message-id: <92832a2adaee9760b05b903f7b15c4b107dab620.1580148241.git.dcaratti@redhat.com> +Patchwork-id: 294141 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 netdrv] net/mlx5e: allow TSO on VXLAN over VLAN topologies +Bugzilla: 1780643 +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Paolo Abeni +RH-Acked-by: David S. Miller + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1780643 +Upstream Status: net-next.git commit a1718505d7f6 +Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=26037831 +Tested: using a variant of the script used to verify bz1626213 +Conflicts: none + +commit a1718505d7f67ee0ab051322f1cbc7ac42b5da82 +Author: Davide Caratti +Date: Thu Jan 9 12:07:59 2020 +0100 + + net/mlx5e: allow TSO on VXLAN over VLAN topologies + + since mlx5 hardware can segment correctly TSO packets on VXLAN over VLAN + topologies, CPU usage can improve significantly if we enable tunnel + offloads in dev->vlan_features, like it was done in the past with other + NIC drivers (e.g. mlx4, be2net and ixgbe). + + Signed-off-by: Davide Caratti + Signed-off-by: Saeed Mahameed + +Signed-off-by: Davide Caratti +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 00ef0cd3ca13..7447b84e2d44 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4855,6 +4855,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; ++ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | ++ NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + + if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { +-- +2.13.6 + diff --git a/SOURCES/0002-netdrv-net-reject-PTP-periodic-output-requests-with-.patch b/SOURCES/0002-netdrv-net-reject-PTP-periodic-output-requests-with-.patch new file mode 100644 index 0000000..2fc99f0 --- /dev/null +++ b/SOURCES/0002-netdrv-net-reject-PTP-periodic-output-requests-with-.patch @@ -0,0 +1,75 @@ +From 2c53f8c40495fbe39613f8cf3a800474846fa96b Mon Sep 17 00:00:00 2001 +From: Petr Oros +Date: Mon, 24 Feb 2020 16:46:48 -0500 +Subject: [PATCH 002/312] [netdrv] net: reject PTP periodic output requests + with unsupported flags + +Message-id: +Patchwork-id: 295286 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 net PATCH 03/14] net: reject PTP periodic output requests with unsupported flags +Bugzilla: 1795192 +RH-Acked-by: Neil Horman +RH-Acked-by: Prarit Bhargava +RH-Acked-by: Corinna Vinschen +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/show_bug.cgi?id=1795192 + +Conflicts: \ +- Unmerged path drivers/net/ethernet/microchip/lan743x_ptp.c + +Upstream commit(s): +commit 7f9048f1df6f0c1c7a74a15c8b4ce033a753f274 +Author: Jacob Keller +Date: Thu Nov 14 10:44:56 2019 -0800 + + net: reject PTP periodic output requests with unsupported flags + + Commit 823eb2a3c4c7 ("PTP: add support for one-shot output") introduced + a new flag for the PTP periodic output request ioctl. This flag is not + currently supported by any driver. + + Fix all drivers which implement the periodic output request ioctl to + explicitly reject any request with flags they do not understand. This + ensures that the driver does not accidentally misinterpret the + PTP_PEROUT_ONE_SHOT flag, or any new flag introduced in the future. + + This is important for forward compatibility: if a new flag is + introduced, the driver should reject requests to enable the flag until + the driver has actually been modified to support the flag in question. + + Cc: Felipe Balbi + Cc: David S. Miller + Cc: Christopher Hall + Signed-off-by: Jacob Keller + Signed-off-by: Richard Cochran + Tested-by: Aaron Brown + Reviewed-by: Saeed Mahameed + Signed-off-by: David S. Miller + +Signed-off-by: Petr Oros +Signed-off-by: Timothy Redaelli +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +index 0059b290e095..cff6b60de304 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +@@ -290,6 +290,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, + if (!MLX5_PPS_CAP(mdev)) + return -EOPNOTSUPP; + ++ /* Reject requests with unsupported flags */ ++ if (rq->perout.flags) ++ return -EOPNOTSUPP; ++ + if (rq->perout.index >= clock->ptp_info.n_pins) + return -EINVAL; + +-- +2.13.6 + diff --git a/SOURCES/0003-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch b/SOURCES/0003-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch new file mode 100644 index 0000000..7722f9b --- /dev/null +++ b/SOURCES/0003-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch @@ -0,0 +1,78 @@ +From 87d65423773d32028e88214dbbb13e147b0388ac Mon Sep 17 00:00:00 2001 +From: Petr Oros +Date: Mon, 24 Feb 2020 16:46:52 -0500 +Subject: [PATCH 003/312] [netdrv] mlx5: reject unsupported external timestamp + flags + +Message-id: <37f4742ef0d140155bdf2a2761983f6b886c9289.1582559430.git.poros@redhat.com> +Patchwork-id: 295290 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 net PATCH 07/14] mlx5: reject unsupported external timestamp flags +Bugzilla: 1795192 +RH-Acked-by: Neil Horman +RH-Acked-by: Prarit Bhargava +RH-Acked-by: Corinna Vinschen +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/show_bug.cgi?id=1795192 + +Upstream commit(s): +commit 2e0645a00e25f7122cad6da57ce3cc855df49ddd +Author: Jacob Keller +Date: Thu Nov 14 10:45:00 2019 -0800 + + mlx5: reject unsupported external timestamp flags + + Fix the mlx5 core PTP support to explicitly reject any future flags that + get added to the external timestamp request ioctl. + + In order to maintain currently functioning code, this patch accepts all + three current flags. This is because the PTP_RISING_EDGE and + PTP_FALLING_EDGE flags have unclear semantics and each driver seems to + have interpreted them slightly differently. + + [ RC: I'm not 100% sure what this driver does, but if I'm not wrong it + follows the dp83640: + + flags Meaning + ---------------------------------------------------- -------------------------- + PTP_ENABLE_FEATURE Time stamp rising edge + PTP_ENABLE_FEATURE|PTP_RISING_EDGE Time stamp rising edge + PTP_ENABLE_FEATURE|PTP_FALLING_EDGE Time stamp falling edge + PTP_ENABLE_FEATURE|PTP_RISING_EDGE|PTP_FALLING_EDGE Time stamp falling edge + ] + + Cc: Feras Daoud + Cc: Eugenia Emantayev + Signed-off-by: Jacob Keller + Reviewed-by: Richard Cochran + Reviewed-by: Saeed Mahameed + Signed-off-by: David S. Miller + +Signed-off-by: Petr Oros +Signed-off-by: Timothy Redaelli +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +index cff6b60de304..9a40f24e3193 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +@@ -236,6 +236,12 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, + if (!MLX5_PPS_CAP(mdev)) + return -EOPNOTSUPP; + ++ /* Reject requests with unsupported flags */ ++ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | ++ PTP_RISING_EDGE | ++ PTP_FALLING_EDGE)) ++ return -EOPNOTSUPP; ++ + if (rq->extts.index >= clock->ptp_info.n_pins) + return -EINVAL; + +-- +2.13.6 + diff --git a/SOURCES/0004-netdrv-mlx5e-Reorder-mirrer-action-parsing-to-check-.patch b/SOURCES/0004-netdrv-mlx5e-Reorder-mirrer-action-parsing-to-check-.patch new file mode 100644 index 0000000..a4a5a0f --- /dev/null +++ b/SOURCES/0004-netdrv-mlx5e-Reorder-mirrer-action-parsing-to-check-.patch @@ -0,0 +1,103 @@ +From 1ee524cc59988f1b56d8bc6f1f49ba56223852fe Mon Sep 17 00:00:00 2001 +From: Ivan Vecera +Date: Fri, 27 Mar 2020 19:44:24 -0400 +Subject: [PATCH 004/312] [netdrv] mlx5e: Reorder mirrer action parsing to + check for encap first + +Message-id: <20200327194424.1643094-20-ivecera@redhat.com> +Patchwork-id: 298090 +Patchwork-instance: patchwork +O-Subject: [RHEL-8.3 net PATCH 19/19] net/mlx5e: Reorder mirrer action parsing to check for encap first +Bugzilla: 1818074 +RH-Acked-by: Tony Camuso +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Petr Oros + +Bugzilla: http://bugzilla.redhat.com/show_bug.cgi?id=1818074 + +Upstream commit(s): +commit b6a4ac24c14be1247b0fd896737a01b8fa121318 +Author: Vlad Buslov +Date: Thu Nov 7 13:37:57 2019 +0200 + + net/mlx5e: Reorder mirrer action parsing to check for encap first + + Mirred action parsing code in parse_tc_fdb_actions() first checks if + out_dev has same parent id, and only verifies that there is a pending encap + action that was parsed before. Recent change in vxlan module made function + netdev_port_same_parent_id() to return true when called for mlx5 eswitch + representor and vxlan device created explicitly on mlx5 representor + device (vxlan devices created with "external" flag without explicitly + specifying parent interface are not affected). With call to + netdev_port_same_parent_id() returning true, incorrect code path is chosen + and encap rules fail to offload because vxlan dev is not a valid eswitch + forwarding dev. Dmesg log of error: + + [ 1784.389797] devices ens1f0_0 vxlan1 not on same switch HW, can't offload forwarding + + In order to fix the issue, rearrange conditional in parse_tc_fdb_actions() + to check for pending encap action before checking if out_dev has the same + parent id. + + Fixes: 0ce1822c2a08 ("vxlan: add adjacent link to limit depth level") + Signed-off-by: Vlad Buslov + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Ivan Vecera +Signed-off-by: Timothy Redaelli +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 28 ++++++++++++------------- + 1 file changed, 14 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index db960e3ea3cd..f06e99eb06b9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3270,7 +3270,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; +- if (netdev_port_same_parent_id(priv->netdev, out_dev)) { ++ if (encap) { ++ parse_attr->mirred_ifindex[attr->out_count] = ++ out_dev->ifindex; ++ parse_attr->tun_info[attr->out_count] = dup_tun_info(info); ++ if (!parse_attr->tun_info[attr->out_count]) ++ return -ENOMEM; ++ encap = false; ++ attr->dests[attr->out_count].flags |= ++ MLX5_ESW_DEST_ENCAP; ++ attr->out_count++; ++ /* attr->dests[].rep is resolved when we ++ * handle encap ++ */ ++ } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + struct net_device *uplink_upper; +@@ -3312,19 +3325,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + attr->dests[attr->out_count].rep = rpriv->rep; + attr->dests[attr->out_count].mdev = out_priv->mdev; + attr->out_count++; +- } else if (encap) { +- parse_attr->mirred_ifindex[attr->out_count] = +- out_dev->ifindex; +- parse_attr->tun_info[attr->out_count] = dup_tun_info(info); +- if (!parse_attr->tun_info[attr->out_count]) +- return -ENOMEM; +- encap = false; +- attr->dests[attr->out_count].flags |= +- MLX5_ESW_DEST_ENCAP; +- attr->out_count++; +- /* attr->dests[].rep is resolved when we +- * handle encap +- */ + } else if (parse_attr->filter_dev != priv->netdev) { + /* All mlx5 devices are called to configure + * high level device filters. Therefore, the +-- +2.13.6 + diff --git a/SOURCES/0005-netdrv-net-mlx5e-Move-the-SW-XSK-code-from-NAPI-poll.patch b/SOURCES/0005-netdrv-net-mlx5e-Move-the-SW-XSK-code-from-NAPI-poll.patch new file mode 100644 index 0000000..bbc886c --- /dev/null +++ b/SOURCES/0005-netdrv-net-mlx5e-Move-the-SW-XSK-code-from-NAPI-poll.patch @@ -0,0 +1,84 @@ +From edf3630554bc462e0bee93faa5685e8e11a5a936 Mon Sep 17 00:00:00 2001 +From: Jiri Benc +Date: Wed, 22 Apr 2020 18:18:00 -0400 +Subject: [PATCH 005/312] [netdrv] net/mlx5e: Move the SW XSK code from NAPI + poll to a separate function +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Message-id: <6bb2443d30349d894a710f787928942121ac29dc.1587578778.git.jbenc@redhat.com> +Patchwork-id: 304519 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 net 09/46] net/mlx5e: Move the SW XSK code from NAPI poll to a separate function +Bugzilla: 1819630 +RH-Acked-by: Hangbin Liu +RH-Acked-by: Toke Høiland-Jørgensen +RH-Acked-by: Ivan Vecera + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1819630 + +commit 871aa189a69f7bbe6254459d17b78e1cce65c9ae +Author: Maxim Mikityanskiy +Date: Wed Aug 14 09:27:22 2019 +0200 + + net/mlx5e: Move the SW XSK code from NAPI poll to a separate function + + Two XSK tasks are performed during NAPI polling, that are not bound to + hardware interrupts: TXing packets and polling for frames in the Fill + Ring. They are special in a way that the hardware doesn't know about + these tasks, so it doesn't trigger interrupts if there is still some + work to be done, it's our driver's responsibility to ensure NAPI will be + rescheduled if needed. + + Create a new function to handle these tasks and move the corresponding + code from mlx5e_napi_poll to the new function to improve modularity and + prepare for the changes in the following patch. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Reviewed-by: Saeed Mahameed + Acked-by: Jonathan Lemon + Signed-off-by: Daniel Borkmann + +Signed-off-by: Jiri Benc +Signed-off-by: Timothy Redaelli +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +index 49b06b256c92..6d16dee38ede 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +@@ -81,6 +81,16 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); + } + ++static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq) ++{ ++ bool busy_xsk = false; ++ ++ busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); ++ busy_xsk |= xskrq->post_wqes(xskrq); ++ ++ return busy_xsk; ++} ++ + int mlx5e_napi_poll(struct napi_struct *napi, int budget) + { + struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, +@@ -122,8 +132,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) + if (xsk_open) { + mlx5e_poll_ico_cq(&c->xskicosq.cq); + busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); +- busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); +- busy_xsk |= xskrq->post_wqes(xskrq); ++ busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); + } + + busy |= busy_xsk; +-- +2.13.6 + diff --git a/SOURCES/0006-netdrv-mlx5e-Allow-XSK-frames-smaller-than-a-page.patch b/SOURCES/0006-netdrv-mlx5e-Allow-XSK-frames-smaller-than-a-page.patch new file mode 100644 index 0000000..f66f411 --- /dev/null +++ b/SOURCES/0006-netdrv-mlx5e-Allow-XSK-frames-smaller-than-a-page.patch @@ -0,0 +1,163 @@ +From d1ac1b641ea39e946e94c155520c590a5a27e23a Mon Sep 17 00:00:00 2001 +From: Jiri Benc +Date: Wed, 22 Apr 2020 18:18:11 -0400 +Subject: [PATCH 006/312] [netdrv] mlx5e: Allow XSK frames smaller than a page +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Message-id: <5c0604430537e1022e4424f8683b5611f3ccceb3.1587578778.git.jbenc@redhat.com> +Patchwork-id: 304531 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 net 20/46] net/mlx5e: Allow XSK frames smaller than a page +Bugzilla: 1819630 +RH-Acked-by: Hangbin Liu +RH-Acked-by: Toke Høiland-Jørgensen +RH-Acked-by: Ivan Vecera + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1819630 + +commit 282c0c798f8ec883c2ac2f1ce2dc06ef9421731c +Author: Maxim Mikityanskiy +Date: Tue Aug 27 02:25:26 2019 +0000 + + net/mlx5e: Allow XSK frames smaller than a page + + Relax the requirements to the XSK frame size to allow it to be smaller + than a page and even not a power of two. The current implementation can + work in this mode, both with Striding RQ and without it. + + The code that checks `mtu + headroom <= XSK frame size` is modified + accordingly. Any frame size between 2048 and PAGE_SIZE is accepted. + + Functions that worked with pages only now work with XSK frames, even if + their size is different from PAGE_SIZE. + + With XSK queues, regardless of the frame size, Striding RQ uses the + stride size of PAGE_SIZE, and UMR MTTs are posted using starting + addresses of frames, but PAGE_SIZE as page size. MTU guarantees that no + packet data will overlap with other frames. UMR MTT size is made equal + to the stride size of the RQ, because UMEM frames may come in random + order, and we need to handle them one by one. PAGE_SIZE is just a power + of two that is bigger than any allowed XSK frame size, and also it + doesn't require making additional changes to the code. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Saeed Mahameed + Acked-by: Jonathan Lemon + Signed-off-by: Daniel Borkmann + +Signed-off-by: Jiri Benc +Signed-off-by: Timothy Redaelli +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/params.c | 23 ++++++++++++++++++---- + .../net/ethernet/mellanox/mlx5/core/en/params.h | 2 ++ + .../net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | 2 +- + .../net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | 15 +++++++++----- + 4 files changed, 32 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +index 79301d116667..eb2e1f2138e4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +@@ -25,18 +25,33 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, + return headroom; + } + +-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, +- struct mlx5e_xsk_param *xsk) ++u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, ++ struct mlx5e_xsk_param *xsk) + { + u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk); +- u32 frag_sz = linear_rq_headroom + hw_mtu; ++ ++ return linear_rq_headroom + hw_mtu; ++} ++ ++u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, ++ struct mlx5e_xsk_param *xsk) ++{ ++ u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk); + + /* AF_XDP doesn't build SKBs in place. */ + if (!xsk) + frag_sz = MLX5_SKB_FRAG_SZ(frag_sz); + +- /* XDP in mlx5e doesn't support multiple packets per page. */ ++ /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a ++ * special case. It can run with frames smaller than a page, as it ++ * doesn't allocate pages dynamically. However, here we pretend that ++ * fragments are page-sized: it allows to treat XSK frames like pages ++ * by redirecting alloc and free operations to XSK rings and by using ++ * the fact there are no multiple packets per "page" (which is a frame). ++ * The latter is important, because frames may come in a random order, ++ * and we will have trouble assemblying a real page of multiple frames. ++ */ + if (mlx5e_rx_is_xdp(params, xsk)) + frag_sz = max_t(u32, frag_sz, PAGE_SIZE); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +index 3a615d663d84..989d8f429438 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +@@ -76,6 +76,8 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile, + + u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk); ++u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, ++ struct mlx5e_xsk_param *xsk); + u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk); + u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +index 6a55573ec8f2..3783776b6d70 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +@@ -104,7 +104,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, + + /* head_offset is not used in this function, because di->xsk.data and + * di->addr point directly to the necessary place. Furthermore, in the +- * current implementation, one page = one packet = one frame, so ++ * current implementation, UMR pages are mapped to XSK frames, so + * head_offset should always be 0. + */ + WARN_ON_ONCE(head_offset); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +index d3a173e88e24..81efd2fbc75d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +@@ -4,18 +4,23 @@ + #include "setup.h" + #include "en/params.h" + ++/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may ++ * change unexpectedly, and mlx5e has a minimum valid stride size for striding ++ * RQ, keep this check in the driver. ++ */ ++#define MLX5E_MIN_XSK_CHUNK_SIZE 2048 ++ + bool mlx5e_validate_xsk_param(struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, + struct mlx5_core_dev *mdev) + { +- /* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current +- * mlx5e XDP implementation doesn't support multiple packets per page. +- */ +- if (xsk->chunk_size != PAGE_SIZE) ++ /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ ++ if (xsk->chunk_size > PAGE_SIZE || ++ xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) + return false; + + /* Current MTU and XSK headroom don't allow packets to fit the frames. */ +- if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size) ++ if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size) + return false; + + /* frag_sz is different for regular and XSK RQs, so ensure that linear +-- +2.13.6 + diff --git a/SOURCES/0007-netdrv-net-Use-skb-accessors-in-network-drivers.patch b/SOURCES/0007-netdrv-net-Use-skb-accessors-in-network-drivers.patch new file mode 100644 index 0000000..7ddc8c9 --- /dev/null +++ b/SOURCES/0007-netdrv-net-Use-skb-accessors-in-network-drivers.patch @@ -0,0 +1,55 @@ +From a0952a05dcb2a18564f90d1181591f7682cc9728 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:24 -0400 +Subject: [PATCH 007/312] [netdrv] net: Use skb accessors in network drivers + +Message-id: <20200510145245.10054-2-ahleihel@redhat.com> +Patchwork-id: 306543 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 01/82] net: Use skb accessors in network drivers +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 +Conflicts: + - Take mlx5 changes only. + +commit d7840976e3915669382c62ddd1700960f348328e +Author: Matthew Wilcox (Oracle) +Date: Mon Jul 22 20:08:25 2019 -0700 + + net: Use skb accessors in network drivers + + In preparation for unifying the skb_frag and bio_vec, use the fine + accessors which already exist and use skb_frag_t instead of + struct skb_frag_struct. + + Signed-off-by: Matthew Wilcox (Oracle) + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index 79f891c627da..5be0bad6d359 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -211,7 +211,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { +- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, +-- +2.13.6 + diff --git a/SOURCES/0008-netdrv-net-mlx5e-xsk-dynamically-allocate-mlx5e_chan.patch b/SOURCES/0008-netdrv-net-mlx5e-xsk-dynamically-allocate-mlx5e_chan.patch new file mode 100644 index 0000000..95a3de6 --- /dev/null +++ b/SOURCES/0008-netdrv-net-mlx5e-xsk-dynamically-allocate-mlx5e_chan.patch @@ -0,0 +1,123 @@ +From bd1ba9688ed45fe25f151e33657b2c50c0b4f424 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:25 -0400 +Subject: [PATCH 008/312] [netdrv] net/mlx5e: xsk: dynamically allocate + mlx5e_channel_param + +Message-id: <20200510145245.10054-3-ahleihel@redhat.com> +Patchwork-id: 306542 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 02/82] net/mlx5e: xsk: dynamically allocate mlx5e_channel_param +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 658688ce6c936254c34ea1f31549ec62439574aa +Author: Arnd Bergmann +Date: Tue Jul 23 12:02:26 2019 +0000 + + net/mlx5e: xsk: dynamically allocate mlx5e_channel_param + + The structure is too large to put on the stack, resulting in a + warning on 32-bit ARM: + + drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c:59:5: error: stack frame size of 1344 bytes in function + 'mlx5e_open_xsk' [-Werror,-Wframe-larger-than=] + + Use kvzalloc() instead. + + Fixes: a038e9794541 ("net/mlx5e: Add XSK zero-copy support") + Signed-off-by: Arnd Bergmann + Signed-off-by: Maxim Mikityanskiy + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | 27 ++++++++++++++-------- + 1 file changed, 18 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +index 81efd2fbc75d..79060ee60c98 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +@@ -65,24 +65,28 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, struct xdp_umem *umem, + struct mlx5e_channel *c) + { +- struct mlx5e_channel_param cparam = {}; ++ struct mlx5e_channel_param *cparam; + struct dim_cq_moder icocq_moder = {}; + int err; + + if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) + return -EINVAL; + +- mlx5e_build_xsk_cparam(priv, params, xsk, &cparam); ++ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL); ++ if (!cparam) ++ return -ENOMEM; + +- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq); ++ mlx5e_build_xsk_cparam(priv, params, xsk, cparam); ++ ++ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq); + if (unlikely(err)) +- return err; ++ goto err_free_cparam; + +- err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq); ++ err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq); + if (unlikely(err)) + goto err_close_rx_cq; + +- err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq); ++ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq); + if (unlikely(err)) + goto err_close_rq; + +@@ -92,21 +96,23 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, + * is disabled and then reenabled, but the SQ continues receiving CQEs + * from the old UMEM. + */ +- err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true); ++ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true); + if (unlikely(err)) + goto err_close_tx_cq; + +- err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq); ++ err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq); + if (unlikely(err)) + goto err_close_sq; + + /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be + * triggered and NAPI to be called on the correct CPU. + */ +- err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq); ++ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq); + if (unlikely(err)) + goto err_close_icocq; + ++ kvfree(cparam); ++ + spin_lock_init(&c->xskicosq_lock); + + set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); +@@ -128,6 +134,9 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, + err_close_rx_cq: + mlx5e_close_cq(&c->xskrq.cq); + ++err_free_cparam: ++ kvfree(cparam); ++ + return err; + } + +-- +2.13.6 + diff --git a/SOURCES/0009-netdrv-net-mlx5-E-Switch-add-ingress-rate-support.patch b/SOURCES/0009-netdrv-net-mlx5-E-Switch-add-ingress-rate-support.patch new file mode 100644 index 0000000..8790b33 --- /dev/null +++ b/SOURCES/0009-netdrv-net-mlx5-E-Switch-add-ingress-rate-support.patch @@ -0,0 +1,276 @@ +From ef56ac3b60e0e366983a421b51afc0e980c7cb1d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:29 -0400 +Subject: [PATCH 009/312] [netdrv] net/mlx5: E-Switch, add ingress rate support + +Message-id: <20200510145245.10054-7-ahleihel@redhat.com> +Patchwork-id: 306545 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 06/82] net/mlx5: E-Switch, add ingress rate support +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit fcb64c0f5640e629bd77c2cb088f9fd70ff5bde7 +Author: Eli Cohen +Date: Wed May 8 11:44:56 2019 +0300 + + net/mlx5: E-Switch, add ingress rate support + + Use the scheduling elements to implement ingress rate limiter on an + eswitch ports ingress traffic. Since the ingress of eswitch port is the + egress of VF port, we control eswitch ingress by controlling VF egress. + + Configuration is done using the ports' representor net devices. + + Please note that burst size configuration is not supported by devices + ConnectX-5 and earlier generations. + + Configuration examples: + tc: + tc filter add dev enp59s0f0_0 root protocol ip matchall action police rate 1mbit burst 20k + + ovs: + ovs-vsctl set interface eth0 ingress_policing_rate=1000 + + Signed-off-by: Eli Cohen + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 19 ++++ + drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 1 + + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 100 ++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 7 ++ + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 16 ++++ + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 + + 6 files changed, 145 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index c8ebd93ad5ac..66c8c2ace4b9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1179,6 +1179,23 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, + } + } + ++static ++int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv, ++ struct tc_cls_matchall_offload *ma) ++{ ++ switch (ma->command) { ++ case TC_CLSMATCHALL_REPLACE: ++ return mlx5e_tc_configure_matchall(priv, ma); ++ case TC_CLSMATCHALL_DESTROY: ++ return mlx5e_tc_delete_matchall(priv, ma); ++ case TC_CLSMATCHALL_STATS: ++ mlx5e_tc_stats_matchall(priv, ma); ++ return 0; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ + static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) + { +@@ -1188,6 +1205,8 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); ++ case TC_SETUP_CLSMATCHALL: ++ return mlx5e_rep_setup_tc_cls_matchall(priv, type_data); + default: + return -EOPNOTSUPP; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +index fcc5e52023ef..c8f3bbdc1ffb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +@@ -90,6 +90,7 @@ struct mlx5e_rep_priv { + struct mlx5_flow_handle *vport_rx_rule; + struct list_head vport_sqs_list; + struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */ ++ struct rtnl_link_stats64 prev_vf_vport_stats; + struct devlink_port dl_port; + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index f06e99eb06b9..1f76974dc946 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3932,6 +3932,106 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, + return err; + } + ++static int apply_police_params(struct mlx5e_priv *priv, u32 rate, ++ struct netlink_ext_ack *extack) ++{ ++ struct mlx5e_rep_priv *rpriv = priv->ppriv; ++ struct mlx5_eswitch *esw; ++ u16 vport_num; ++ u32 rate_mbps; ++ int err; ++ ++ esw = priv->mdev->priv.eswitch; ++ /* rate is given in bytes/sec. ++ * First convert to bits/sec and then round to the nearest mbit/secs. ++ * mbit means million bits. ++ * Moreover, if rate is non zero we choose to configure to a minimum of ++ * 1 mbit/sec. ++ */ ++ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; ++ vport_num = rpriv->rep->vport; ++ ++ err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); ++ if (err) ++ NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); ++ ++ return err; ++} ++ ++static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, ++ struct flow_action *flow_action, ++ struct netlink_ext_ack *extack) ++{ ++ struct mlx5e_rep_priv *rpriv = priv->ppriv; ++ const struct flow_action_entry *act; ++ int err; ++ int i; ++ ++ if (!flow_action_has_entries(flow_action)) { ++ NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); ++ return -EINVAL; ++ } ++ ++ if (!flow_offload_has_one_action(flow_action)) { ++ NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); ++ return -EOPNOTSUPP; ++ } ++ ++ flow_action_for_each(i, act, flow_action) { ++ switch (act->id) { ++ case FLOW_ACTION_POLICE: ++ err = apply_police_params(priv, act->police.rate_bytes_ps, extack); ++ if (err) ++ return err; ++ ++ rpriv->prev_vf_vport_stats = priv->stats.vf_vport; ++ break; ++ default: ++ NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); ++ return -EOPNOTSUPP; ++ } ++ } ++ ++ return 0; ++} ++ ++int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, ++ struct tc_cls_matchall_offload *ma) ++{ ++ struct netlink_ext_ack *extack = ma->common.extack; ++ int prio = TC_H_MAJ(ma->common.prio) >> 16; ++ ++ if (prio != 1) { ++ NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); ++ return -EINVAL; ++ } ++ ++ return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); ++} ++ ++int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, ++ struct tc_cls_matchall_offload *ma) ++{ ++ struct netlink_ext_ack *extack = ma->common.extack; ++ ++ return apply_police_params(priv, 0, extack); ++} ++ ++void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, ++ struct tc_cls_matchall_offload *ma) ++{ ++ struct mlx5e_rep_priv *rpriv = priv->ppriv; ++ struct rtnl_link_stats64 cur_stats; ++ u64 dbytes; ++ u64 dpkts; ++ ++ cur_stats = priv->stats.vf_vport; ++ dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; ++ dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; ++ rpriv->prev_vf_vport_stats = cur_stats; ++ flow_stats_update(&ma->stats, dpkts, dbytes, jiffies); ++} ++ + static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, + struct mlx5e_priv *peer_priv) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 876a78a09dd6..924c6ef86a14 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -63,6 +63,13 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, + int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, + struct flow_cls_offload *f, unsigned long flags); + ++int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, ++ struct tc_cls_matchall_offload *f); ++int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, ++ struct tc_cls_matchall_offload *f); ++void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, ++ struct tc_cls_matchall_offload *ma); ++ + struct mlx5e_encap_entry; + void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 691f5e27e389..386e82850ed5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1580,6 +1580,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, + return 0; + } + ++int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, ++ u32 rate_mbps) ++{ ++ u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; ++ struct mlx5_vport *vport; ++ ++ vport = mlx5_eswitch_get_vport(esw, vport_num); ++ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); ++ ++ return mlx5_modify_scheduling_element_cmd(esw->dev, ++ SCHEDULING_HIERARCHY_E_SWITCH, ++ ctx, ++ vport->qos.esw_tsar_ix, ++ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); ++} ++ + static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) + { + ((u8 *)node_guid)[7] = mac[0]; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 1747b6616e66..436c633407d6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -263,6 +263,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); + void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); ++int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, ++ u32 rate_mbps); + + /* E-Switch API */ + int mlx5_eswitch_init(struct mlx5_core_dev *dev); +-- +2.13.6 + diff --git a/SOURCES/0010-netdrv-net-mlx5e-Tx-Strict-the-room-needed-for-SQ-ed.patch b/SOURCES/0010-netdrv-net-mlx5e-Tx-Strict-the-room-needed-for-SQ-ed.patch new file mode 100644 index 0000000..5fa2517 --- /dev/null +++ b/SOURCES/0010-netdrv-net-mlx5e-Tx-Strict-the-room-needed-for-SQ-ed.patch @@ -0,0 +1,56 @@ +From 20db6bb321f335b527ccf7befb50c50696e37ebf Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:30 -0400 +Subject: [PATCH 010/312] [netdrv] net/mlx5e: Tx, Strict the room needed for SQ + edge NOPs + +Message-id: <20200510145245.10054-8-ahleihel@redhat.com> +Patchwork-id: 306547 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 07/82] net/mlx5e: Tx, Strict the room needed for SQ edge NOPs +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 68865419ba1bf502a5bd279a500deda64000249d +Author: Tariq Toukan +Date: Thu Jul 11 11:20:22 2019 +0300 + + net/mlx5e: Tx, Strict the room needed for SQ edge NOPs + + We use NOPs to populate the WQ fragment edge if the WQE does not fit + in frag, to avoid WQEs crossing a page boundary (or wrap-around the WQ). + + The upper bound on the needed number of NOPs is one WQEBB less than + the largest possible WQE, for otherwise the WQE would certainly fit. + + Signed-off-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +index b495e6a976a1..a7a2cd415e69 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +@@ -6,7 +6,7 @@ + + #include "en.h" + +-#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS ++#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1) + #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ + MLX5E_SQ_NOPS_ROOM) + +-- +2.13.6 + diff --git a/SOURCES/0011-netdrv-net-mlx5e-XDP-Close-TX-MPWQE-session-when-no-.patch b/SOURCES/0011-netdrv-net-mlx5e-XDP-Close-TX-MPWQE-session-when-no-.patch new file mode 100644 index 0000000..d892cc3 --- /dev/null +++ b/SOURCES/0011-netdrv-net-mlx5e-XDP-Close-TX-MPWQE-session-when-no-.patch @@ -0,0 +1,328 @@ +From eee2fd0e4f3d4d9f833a2eec6169c8c46c9388c2 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:31 -0400 +Subject: [PATCH 011/312] [netdrv] net/mlx5e: XDP, Close TX MPWQE session when + no room for inline packet left + +Message-id: <20200510145245.10054-9-ahleihel@redhat.com> +Patchwork-id: 306548 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 08/82] net/mlx5e: XDP, Close TX MPWQE session when no room for inline packet left +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 6c085a8aab5183d8658c9a692bcfda3e24195b7a +Author: Shay Agroskin +Date: Sun May 12 18:28:27 2019 +0300 + + net/mlx5e: XDP, Close TX MPWQE session when no room for inline packet left + + In MPWQE mode, when transmitting packets with XDP, a packet that is smaller + than a certain size (set to 256 bytes) would be sent inline within its WQE + TX descriptor (mem-copied), in case the hardware tx queue is congested + beyond a pre-defined water-mark. + + If a MPWQE cannot contain an additional inline packet, we close this + MPWQE session, and send the packet inlined within the next MPWQE. + To save some MPWQE session close+open operations, we don't open MPWQE + sessions that are contiguously smaller than certain size (set to the + HW MPWQE maximum size). If there isn't enough contiguous room in the + send queue, we fill it with NOPs and wrap the send queue index around. + + This way, qualified packets are always sent inline. + + Perf tests: + Tested packet rate for UDP 64Byte multi-stream + over two dual port ConnectX-5 100Gbps NICs. + CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz + + XDP_TX: + + With 24 channels: + | ------ | bounced packets | inlined packets | inline ratio | + | before | 113.6Mpps | 96.3Mpps | 84% | + | after | 115Mpps | 99.5Mpps | 86% | + + With one channel: + + | ------ | bounced packets | inlined packets | inline ratio | + | before | 6.7Mpps | 0pps | 0% | + | after | 6.8Mpps | 0pps | 0% | + + As we can see, there is improvement in both inline ratio and overall + packet rate for 24 channels. Also, we see no degradation for the + one-channel case. + + Signed-off-by: Shay Agroskin + Signed-off-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 - + drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 32 ++++--------- + drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 53 ++++++++++++++++++---- + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 6 +++ + drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 3 ++ + 5 files changed, 63 insertions(+), 33 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 3b77b43db748..bc2c38faadc8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -488,8 +488,6 @@ struct mlx5e_xdp_mpwqe { + struct mlx5e_tx_wqe *wqe; + u8 ds_count; + u8 pkt_count; +- u8 max_ds_count; +- u8 complete; + u8 inline_on; + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +index b0b982cf69bb..8cb98326531f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +@@ -179,34 +179,22 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) + struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5e_xdpsq_stats *stats = sq->stats; + struct mlx5_wq_cyc *wq = &sq->wq; +- u8 wqebbs; +- u16 pi; ++ u16 pi, contig_wqebbs; ++ ++ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); ++ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); ++ ++ if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS)) ++ mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs); + + mlx5e_xdpsq_fetch_wqe(sq, &session->wqe); + + prefetchw(session->wqe->data); + session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; + session->pkt_count = 0; +- session->complete = 0; + + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + +-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS +- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. +- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a +- * full-session WQE be cache-aligned. +- */ +-#if L1_CACHE_BYTES < 128 +-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) +-#else +-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) +-#endif +- +- wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi), +- MLX5E_XDP_MPW_MAX_WQEBBS); +- +- session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs; +- + mlx5e_xdp_update_inline_state(sq); + + stats->mpwqe++; +@@ -244,7 +232,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) + { + if (unlikely(!sq->mpwqe.wqe)) { + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, +- MLX5_SEND_WQE_MAX_WQEBBS))) { ++ MLX5E_XDPSQ_STOP_ROOM))) { + /* SQ is full, ring doorbell */ + mlx5e_xmit_xdp_doorbell(sq); + sq->stats->full++; +@@ -285,8 +273,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, + + mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); + +- if (unlikely(session->complete || +- session->ds_count == session->max_ds_count)) ++ if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) || ++ session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS)) + mlx5e_xdp_mpwqe_complete(sq); + + mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +index d5b0d55d434b..c52f72062b33 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +@@ -40,6 +40,26 @@ + (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) + ++#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM) ++ ++#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) ++#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \ ++ DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS) ++ ++/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS ++ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. ++ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a ++ * full-session WQE be cache-aligned. ++ */ ++#if L1_CACHE_BYTES < 128 ++#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) ++#else ++#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) ++#endif ++ ++#define MLX5E_XDP_MPW_MAX_NUM_DS \ ++ (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) ++ + struct mlx5e_xsk_param; + int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); + bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, +@@ -110,6 +130,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) + session->inline_on = 1; + } + ++static inline bool ++mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session) ++{ ++ return session->inline_on && ++ session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS; ++} ++ ++static inline void ++mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq, ++ u16 pi, u16 nnops) ++{ ++ struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; ++ ++ edge_wi = wi + nnops; ++ /* fill sq frag edge with nops to avoid wqe wrapping two pages */ ++ for (; wi < edge_wi; wi++) { ++ wi->num_wqebbs = 1; ++ wi->num_pkts = 0; ++ mlx5e_post_nop(wq, sq->sqn, &sq->pc); ++ } ++ ++ sq->stats->nops += nnops; ++} ++ + static inline void + mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, + struct mlx5e_xdp_xmit_data *xdptxd, +@@ -122,20 +166,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, + + session->pkt_count++; + +-#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg)) +- + if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) { + struct mlx5_wqe_inline_seg *inline_dseg = + (struct mlx5_wqe_inline_seg *)dseg; + u16 ds_len = sizeof(*inline_dseg) + dma_len; + u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS); + +- if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) { +- /* Not enough space for inline wqe, send with memory pointer */ +- session->complete = true; +- goto no_inline; +- } +- + inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG); + memcpy(inline_dseg->data, xdptxd->data, dma_len); + +@@ -144,7 +180,6 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, + return; + } + +-no_inline: + dseg->addr = cpu_to_be64(xdptxd->dma_addr); + dseg->byte_count = cpu_to_be32(dma_len); + dseg->lkey = sq->mkey_be; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +index b4f5ae30dae2..3d993e2e7bea 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +@@ -126,6 +126,7 @@ static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, +@@ -142,6 +143,7 @@ static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, +@@ -252,6 +254,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) + s->rx_xdp_tx_xmit += xdpsq_stats->xmit; + s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; + s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; ++ s->rx_xdp_tx_nops += xdpsq_stats->nops; + s->rx_xdp_tx_full += xdpsq_stats->full; + s->rx_xdp_tx_err += xdpsq_stats->err; + s->rx_xdp_tx_cqe += xdpsq_stats->cqes; +@@ -279,6 +282,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) + s->tx_xdp_xmit += xdpsq_red_stats->xmit; + s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; + s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; ++ s->tx_xdp_nops += xdpsq_red_stats->nops; + s->tx_xdp_full += xdpsq_red_stats->full; + s->tx_xdp_err += xdpsq_red_stats->err; + s->tx_xdp_cqes += xdpsq_red_stats->cqes; +@@ -1517,6 +1521,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = { + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, ++ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, +@@ -1526,6 +1531,7 @@ static const struct counter_desc xdpsq_stats_desc[] = { + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, ++ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +index 0f9fa22a955e..a4a43613d026 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +@@ -132,6 +132,7 @@ struct mlx5e_sw_stats { + u64 rx_xdp_tx_xmit; + u64 rx_xdp_tx_mpwqe; + u64 rx_xdp_tx_inlnw; ++ u64 rx_xdp_tx_nops; + u64 rx_xdp_tx_full; + u64 rx_xdp_tx_err; + u64 rx_xdp_tx_cqe; +@@ -148,6 +149,7 @@ struct mlx5e_sw_stats { + u64 tx_xdp_xmit; + u64 tx_xdp_mpwqe; + u64 tx_xdp_inlnw; ++ u64 tx_xdp_nops; + u64 tx_xdp_full; + u64 tx_xdp_err; + u64 tx_xdp_cqes; +@@ -341,6 +343,7 @@ struct mlx5e_xdpsq_stats { + u64 xmit; + u64 mpwqe; + u64 inlnw; ++ u64 nops; + u64 full; + u64 err; + /* dirtied @completion */ +-- +2.13.6 + diff --git a/SOURCES/0012-netdrv-net-mlx5e-XDP-Slight-enhancement-for-WQE-fetc.patch b/SOURCES/0012-netdrv-net-mlx5e-XDP-Slight-enhancement-for-WQE-fetc.patch new file mode 100644 index 0000000..42665ed --- /dev/null +++ b/SOURCES/0012-netdrv-net-mlx5e-XDP-Slight-enhancement-for-WQE-fetc.patch @@ -0,0 +1,90 @@ +From b881433dfe615d066de735fd8b7e49db22fd4460 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:32 -0400 +Subject: [PATCH 012/312] [netdrv] net/mlx5e: XDP, Slight enhancement for WQE + fetch function + +Message-id: <20200510145245.10054-10-ahleihel@redhat.com> +Patchwork-id: 306549 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 09/82] net/mlx5e: XDP, Slight enhancement for WQE fetch function +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 7cf6f811b72aced0c48e1065fe059d604ef6363d +Author: Tariq Toukan +Date: Sun Jul 14 17:50:51 2019 +0300 + + net/mlx5e: XDP, Slight enhancement for WQE fetch function + + Instead of passing an output param, let function return the + WQE pointer. + In addition, pass &pi so it gets its value in the function, + and save the redundant assignment that comes after it. + + Signed-off-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 4 +--- + drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | 13 ++++++++----- + 2 files changed, 9 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +index 8cb98326531f..1ed5c33e022f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +@@ -187,14 +187,12 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) + if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS)) + mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs); + +- mlx5e_xdpsq_fetch_wqe(sq, &session->wqe); ++ session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi); + + prefetchw(session->wqe->data); + session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; + session->pkt_count = 0; + +- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); +- + mlx5e_xdp_update_inline_state(sq); + + stats->mpwqe++; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +index c52f72062b33..d7587f40ecae 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +@@ -186,14 +186,17 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, + session->ds_count++; + } + +-static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, +- struct mlx5e_tx_wqe **wqe) ++static inline struct mlx5e_tx_wqe * ++mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi) + { + struct mlx5_wq_cyc *wq = &sq->wq; +- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); ++ struct mlx5e_tx_wqe *wqe; + +- *wqe = mlx5_wq_cyc_get_wqe(wq, pi); +- memset(*wqe, 0, sizeof(**wqe)); ++ *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); ++ wqe = mlx5_wq_cyc_get_wqe(wq, *pi); ++ memset(wqe, 0, sizeof(*wqe)); ++ ++ return wqe; + } + + static inline void +-- +2.13.6 + diff --git a/SOURCES/0013-netdrv-net-mlx5e-Tx-Soften-inline-mode-VLAN-dependen.patch b/SOURCES/0013-netdrv-net-mlx5e-Tx-Soften-inline-mode-VLAN-dependen.patch new file mode 100644 index 0000000..e6c3521 --- /dev/null +++ b/SOURCES/0013-netdrv-net-mlx5e-Tx-Soften-inline-mode-VLAN-dependen.patch @@ -0,0 +1,222 @@ +From a1c13dde2d8edd63949ada1ee41cd9d88b328aaa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:33 -0400 +Subject: [PATCH 013/312] [netdrv] net/mlx5e: Tx, Soften inline mode VLAN + dependencies + +Message-id: <20200510145245.10054-11-ahleihel@redhat.com> +Patchwork-id: 306551 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 10/82] net/mlx5e: Tx, Soften inline mode VLAN dependencies +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit b431302e92f00b7acd5617a4d289f8006394bfc2 +Author: Tariq Toukan +Date: Mon Jul 1 12:08:08 2019 +0300 + + net/mlx5e: Tx, Soften inline mode VLAN dependencies + + If capable, use zero inline mode in TX WQE for non-VLAN packets. + For VLAN ones, keep the enforcement of at least L2 inline mode, + unless the WQE VLAN insertion offload cap is on. + + Performance: + Tested single core packet rate of 64Bytes. + + NIC: ConnectX-5 + CPU: Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz + + pktgen: + Before: 12.46 Mpps + After: 14.65 Mpps (+17.5%) + + XDP_TX: + The MPWQE flow is not affected, as it already has this optimization. + So we test with priv-flag xdp_tx_mpwqe: off. + + Before: 9.90 Mpps + After: 10.20 Mpps (+3%) + + Signed-off-by: Tariq Toukan + Tested-by: Noam Stolero + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 22 ++++++++++++++++++++-- + .../net/ethernet/mellanox/mlx5/core/en_common.c | 12 ------------ + drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +++- + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 7 ++++--- + drivers/net/ethernet/mellanox/mlx5/core/vport.c | 7 ++++--- + 7 files changed, 33 insertions(+), 23 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index bc2c38faadc8..84575c0bcca6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -364,6 +364,7 @@ enum { + MLX5E_SQ_STATE_IPSEC, + MLX5E_SQ_STATE_AM, + MLX5E_SQ_STATE_TLS, ++ MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, + }; + + struct mlx5e_sq_wqe_info { +@@ -1151,7 +1152,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); + void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, + u16 num_channels); +-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); + void mlx5e_rx_dim_work(struct work_struct *work); + void mlx5e_tx_dim_work(struct work_struct *work); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +index a7a2cd415e69..182d5c5664eb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +@@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, + mlx5_write64((__be32 *)ctrl, uar_map); + } + +-static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe) ++static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg) + { +- return !!wqe->ctrl.tisn; ++ return cseg && !!cseg->tisn; ++} ++ ++static inline u8 ++mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, ++ struct sk_buff *skb) ++{ ++ u8 mode; ++ ++ if (mlx5e_transport_inline_tx_wqe(cseg)) ++ return MLX5_INLINE_MODE_TCP_UDP; ++ ++ mode = sq->min_inline_mode; ++ ++ if (skb_vlan_tag_present(skb) && ++ test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) ++ mode = max_t(u8, MLX5_INLINE_MODE_L2, mode); ++ ++ return mode; + } + + static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +index 1539cf3de5dc..f7890e0ce96c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +@@ -180,15 +180,3 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) + + return err; + } +- +-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev) +-{ +- u8 min_inline_mode; +- +- mlx5_query_min_inline(mdev, &min_inline_mode); +- if (min_inline_mode == MLX5_INLINE_MODE_NONE && +- !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) +- min_inline_mode = MLX5_INLINE_MODE_L2; +- +- return min_inline_mode; +-} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +index 8dd31b5c740c..01f2918063af 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +@@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) + static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv, + struct mlx5e_params *params) + { +- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev); ++ mlx5_query_min_inline(priv->mdev, ¶ms->tx_min_inline_mode); + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP && + params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) + params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 7447b84e2d44..5be38cf34551 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -1121,6 +1121,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, + sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + sq->stop_room = MLX5E_SQ_STOP_ROOM; + INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); ++ if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) ++ set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); + if (MLX5_IPSEC_DEV(c->priv->mdev)) + set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); + if (mlx5_accel_is_tls_device(c->priv->mdev)) { +@@ -4772,7 +4774,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, + mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); + + /* TX inline */ +- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); ++ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); + + /* RSS */ + mlx5e_build_rss_params(rss_params, params->num_channels); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index 5be0bad6d359..9cc22b62d73d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -293,8 +293,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + stats->packets += skb_shinfo(skb)->gso_segs; + } else { +- u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ? +- MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode; ++ u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb); + + opcode = MLX5_OPCODE_SEND; + mss = 0; +@@ -612,9 +611,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + stats->packets += skb_shinfo(skb)->gso_segs; + } else { ++ u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb); ++ + opcode = MLX5_OPCODE_SEND; + mss = 0; +- ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); ++ ihs = mlx5e_calc_min_inline(mode, skb); + num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + stats->packets++; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c +index c912d82ca64b..30f7848a6f88 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c +@@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) + { + switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { ++ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: ++ if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode)) ++ break; ++ /* fall through */ + case MLX5_CAP_INLINE_MODE_L2: + *min_inline_mode = MLX5_INLINE_MODE_L2; + break; +- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: +- mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); +- break; + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = MLX5_INLINE_MODE_NONE; + break; +-- +2.13.6 + diff --git a/SOURCES/0014-netdrv-net-mlx5e-Rx-checksum-handling-refactoring.patch b/SOURCES/0014-netdrv-net-mlx5e-Rx-checksum-handling-refactoring.patch new file mode 100644 index 0000000..7cc7849 --- /dev/null +++ b/SOURCES/0014-netdrv-net-mlx5e-Rx-checksum-handling-refactoring.patch @@ -0,0 +1,88 @@ +From e31d980ab9849683588324b04f8596e901b3721e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:34 -0400 +Subject: [PATCH 014/312] [netdrv] net/mlx5e: Rx, checksum handling refactoring + +Message-id: <20200510145245.10054-12-ahleihel@redhat.com> +Patchwork-id: 306554 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 11/82] net/mlx5e: Rx, checksum handling refactoring +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 8c7698d5caa7852bebae0cf7402b7d3a1f30423b +Author: Saeed Mahameed +Date: Fri May 3 15:12:46 2019 -0700 + + net/mlx5e: Rx, checksum handling refactoring + + Move vlan checksum fixup flow into mlx5e_skb_padding_csum(), which is + supposed to fixup SKB checksum if needed. And rename + mlx5e_skb_padding_csum() to mlx5e_skb_csum_fixup(). + + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 26 +++++++++++++------------ + 1 file changed, 14 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 6518d1101de0..a22b3a3db253 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -860,13 +860,24 @@ tail_padding_csum(struct sk_buff *skb, int offset, + } + + static void +-mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, +- struct mlx5e_rq_stats *stats) ++mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, ++ struct mlx5e_rq_stats *stats) + { + struct ipv6hdr *ip6; + struct iphdr *ip4; + int pkt_len; + ++ /* Fixup vlan headers, if any */ ++ if (network_depth > ETH_HLEN) ++ /* CQE csum is calculated from the IP header and does ++ * not cover VLAN headers (if present). This will add ++ * the checksum manually. ++ */ ++ skb->csum = csum_partial(skb->data + ETH_HLEN, ++ network_depth - ETH_HLEN, ++ skb->csum); ++ ++ /* Fixup tail padding, if any */ + switch (proto) { + case htons(ETH_P_IP): + ip4 = (struct iphdr *)(skb->data + network_depth); +@@ -932,16 +943,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, + return; /* CQE csum covers all received bytes */ + + /* csum might need some fixups ...*/ +- if (network_depth > ETH_HLEN) +- /* CQE csum is calculated from the IP header and does +- * not cover VLAN headers (if present). This will add +- * the checksum manually. +- */ +- skb->csum = csum_partial(skb->data + ETH_HLEN, +- network_depth - ETH_HLEN, +- skb->csum); +- +- mlx5e_skb_padding_csum(skb, network_depth, proto, stats); ++ mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); + return; + } + +-- +2.13.6 + diff --git a/SOURCES/0015-netdrv-net-mlx5e-Set-tx-reporter-only-on-successful-.patch b/SOURCES/0015-netdrv-net-mlx5e-Set-tx-reporter-only-on-successful-.patch new file mode 100644 index 0000000..d4f27a0 --- /dev/null +++ b/SOURCES/0015-netdrv-net-mlx5e-Set-tx-reporter-only-on-successful-.patch @@ -0,0 +1,103 @@ +From ac0e05eab5ead240e977ff6b629bfddf78c5c2c6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:35 -0400 +Subject: [PATCH 015/312] [netdrv] net/mlx5e: Set tx reporter only on + successful creation + +Message-id: <20200510145245.10054-13-ahleihel@redhat.com> +Patchwork-id: 306553 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 12/82] net/mlx5e: Set tx reporter only on successful creation +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit baf6dfdb10e9695637d72429159fd26fc36d30c3 +Author: Aya Levin +Date: Mon Jun 24 19:34:42 2019 +0300 + + net/mlx5e: Set tx reporter only on successful creation + + When failing to create tx reporter, don't set the reporter's pointer. + Creating a reporter is not mandatory for driver load, avoid + garbage/error pointer. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 14 ++++++++------ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- + 2 files changed, 9 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index 2b3d2292b8c5..d9116e77ef68 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -116,7 +116,7 @@ static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, + char *err_str, + struct mlx5e_tx_err_ctx *err_ctx) + { +- if (IS_ERR_OR_NULL(tx_reporter)) { ++ if (!tx_reporter) { + netdev_err(err_ctx->sq->channel->netdev, err_str); + return err_ctx->recover(err_ctx->sq); + } +@@ -288,25 +288,27 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { + + int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) + { ++ struct devlink_health_reporter *reporter; + struct mlx5_core_dev *mdev = priv->mdev; + struct devlink *devlink = priv_to_devlink(mdev); + +- priv->tx_reporter = ++ reporter = + devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, + MLX5_REPORTER_TX_GRACEFUL_PERIOD, + true, priv); +- if (IS_ERR(priv->tx_reporter)) { ++ if (IS_ERR(reporter)) { + netdev_warn(priv->netdev, + "Failed to create tx reporter, err = %ld\n", +- PTR_ERR(priv->tx_reporter)); +- return PTR_ERR(priv->tx_reporter); ++ PTR_ERR(reporter)); ++ return PTR_ERR(reporter); + } ++ priv->tx_reporter = reporter; + return 0; + } + + void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) + { +- if (IS_ERR_OR_NULL(priv->tx_reporter)) ++ if (!priv->tx_reporter) + return; + + devlink_health_reporter_destroy(priv->tx_reporter); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 5be38cf34551..9ffcfa017d4f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2323,7 +2323,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, + goto err_close_channels; + } + +- if (!IS_ERR_OR_NULL(priv->tx_reporter)) ++ if (priv->tx_reporter) + devlink_health_reporter_state_update(priv->tx_reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); + +-- +2.13.6 + diff --git a/SOURCES/0016-netdrv-net-mlx5e-TX-reporter-cleanup.patch b/SOURCES/0016-netdrv-net-mlx5e-TX-reporter-cleanup.patch new file mode 100644 index 0000000..3d77bac --- /dev/null +++ b/SOURCES/0016-netdrv-net-mlx5e-TX-reporter-cleanup.patch @@ -0,0 +1,65 @@ +From a9b583f090f95ac9fe24ef0906c897f216014da3 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:36 -0400 +Subject: [PATCH 016/312] [netdrv] net/mlx5e: TX reporter cleanup + +Message-id: <20200510145245.10054-14-ahleihel@redhat.com> +Patchwork-id: 306552 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 13/82] net/mlx5e: TX reporter cleanup +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit c9e6c7209a9a26a0281b311c6880b9e2382ad635 +Author: Aya Levin +Date: Mon Jun 24 20:33:52 2019 +0300 + + net/mlx5e: TX reporter cleanup + + Remove redundant include files. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h | 1 - + drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 1 - + 2 files changed, 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h +index e78e92753d73..ed7a3881d2c5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h +@@ -4,7 +4,6 @@ + #ifndef __MLX5E_EN_REPORTER_H + #define __MLX5E_EN_REPORTER_H + +-#include + #include "en.h" + + int mlx5e_tx_reporter_create(struct mlx5e_priv *priv); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index d9116e77ef68..817c6ea7e349 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -1,7 +1,6 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + /* Copyright (c) 2019 Mellanox Technologies. */ + +-#include + #include "reporter.h" + #include "lib/eq.h" + +-- +2.13.6 + diff --git a/SOURCES/0017-netdrv-net-mlx5e-Allow-dropping-specific-tunnel-pack.patch b/SOURCES/0017-netdrv-net-mlx5e-Allow-dropping-specific-tunnel-pack.patch new file mode 100644 index 0000000..30952fa --- /dev/null +++ b/SOURCES/0017-netdrv-net-mlx5e-Allow-dropping-specific-tunnel-pack.patch @@ -0,0 +1,63 @@ +From e8913d1bb5b7a35a1ddc3d58fb18ec240b2d2110 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:37 -0400 +Subject: [PATCH 017/312] [netdrv] net/mlx5e: Allow dropping specific tunnel + packets + +Message-id: <20200510145245.10054-15-ahleihel@redhat.com> +Patchwork-id: 306555 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 14/82] net/mlx5e: Allow dropping specific tunnel packets +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 6830b468259b45e3b73070474b8cec9388aa8c11 +Author: Tonghao Zhang +Date: Thu Aug 1 16:40:59 2019 +0800 + + net/mlx5e: Allow dropping specific tunnel packets + + In some case, we don't want to allow specific tunnel packets + to host that can avoid to take up high CPU (e.g network attacks). + But other tunnel packets which not matched in hardware will be + sent to host too. + + $ tc filter add dev vxlan_sys_4789 \ + protocol ip chain 0 parent ffff: prio 1 handle 1 \ + flower dst_ip 1.1.1.100 ip_proto tcp dst_port 80 \ + enc_dst_ip 2.2.2.100 enc_key_id 100 enc_dst_port 4789 \ + action tunnel_key unset pipe action drop + + Signed-off-by: Tonghao Zhang + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 1f76974dc946..d7d2151d1ef3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2715,7 +2715,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + + if (flow_flag_test(flow, EGRESS) && + !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || +- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP))) ++ (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || ++ (actions & MLX5_FLOW_CONTEXT_ACTION_DROP))) + return false; + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) +-- +2.13.6 + diff --git a/SOURCES/0018-netdrv-mlx5-no-need-to-check-return-value-of-debugfs.patch b/SOURCES/0018-netdrv-mlx5-no-need-to-check-return-value-of-debugfs.patch new file mode 100644 index 0000000..797260e --- /dev/null +++ b/SOURCES/0018-netdrv-mlx5-no-need-to-check-return-value-of-debugfs.patch @@ -0,0 +1,436 @@ +From fffd2ca5a253c4a49aa53caa87e833bd0d56e78a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:39 -0400 +Subject: [PATCH 018/312] [netdrv] mlx5: no need to check return value of + debugfs_create functions + +Message-id: <20200510145245.10054-17-ahleihel@redhat.com> +Patchwork-id: 306556 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 16/82] mlx5: no need to check return value of debugfs_create functions +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 9f818c8a7388ad1a5c60ace50be6f658c058a5f2 +Author: Greg Kroah-Hartman +Date: Sat Aug 10 12:17:18 2019 +0200 + + mlx5: no need to check return value of debugfs_create functions + + When calling debugfs functions, there is no need to ever check the + return value. The function can work or not, but the code logic should + never do something different based on this. + + This cleans up a lot of unneeded code and logic around the debugfs + files, making all of this much simpler and easier to understand as we + don't need to keep the dentries saved anymore. + + Cc: Saeed Mahameed + Cc: Leon Romanovsky + Cc: netdev@vger.kernel.org + Signed-off-by: Greg Kroah-Hartman + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 51 ++--------- + drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 102 ++------------------- + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 11 +-- + drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 7 +- + .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 2 +- + include/linux/mlx5/driver.h | 4 +- + 7 files changed, 24 insertions(+), 155 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 8da5a1cd87af..4b7ca04ae25e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -1368,49 +1368,19 @@ static void clean_debug_files(struct mlx5_core_dev *dev) + debugfs_remove_recursive(dbg->dbg_root); + } + +-static int create_debugfs_files(struct mlx5_core_dev *dev) ++static void create_debugfs_files(struct mlx5_core_dev *dev) + { + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; +- int err = -ENOMEM; +- +- if (!mlx5_debugfs_root) +- return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); +- if (!dbg->dbg_root) +- return err; +- +- dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, +- dev, &dfops); +- if (!dbg->dbg_in) +- goto err_dbg; + +- dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, +- dev, &dfops); +- if (!dbg->dbg_out) +- goto err_dbg; +- +- dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, +- dev, &olfops); +- if (!dbg->dbg_outlen) +- goto err_dbg; +- +- dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, +- &dbg->status); +- if (!dbg->dbg_status) +- goto err_dbg; +- +- dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); +- if (!dbg->dbg_run) +- goto err_dbg; ++ debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); ++ debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); ++ debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); ++ debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); ++ debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); + + mlx5_cmdif_debugfs_init(dev); +- +- return 0; +- +-err_dbg: +- clean_debug_files(dev); +- return err; + } + + static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) +@@ -2007,17 +1977,10 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) + goto err_cache; + } + +- err = create_debugfs_files(dev); +- if (err) { +- err = -ENOMEM; +- goto err_wq; +- } ++ create_debugfs_files(dev); + + return 0; + +-err_wq: +- destroy_workqueue(cmd->wq); +- + err_cache: + destroy_msg_cache(dev); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +index a11e22d0b0cc..04854e5fbcd7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +@@ -92,8 +92,6 @@ EXPORT_SYMBOL(mlx5_debugfs_root); + void mlx5_register_debugfs(void) + { + mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL); +- if (IS_ERR_OR_NULL(mlx5_debugfs_root)) +- mlx5_debugfs_root = NULL; + } + + void mlx5_unregister_debugfs(void) +@@ -101,45 +99,25 @@ void mlx5_unregister_debugfs(void) + debugfs_remove(mlx5_debugfs_root); + } + +-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) ++void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) + { +- if (!mlx5_debugfs_root) +- return 0; +- + atomic_set(&dev->num_qps, 0); + + dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); +- if (!dev->priv.qp_debugfs) +- return -ENOMEM; +- +- return 0; + } + + void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) + { +- if (!mlx5_debugfs_root) +- return; +- + debugfs_remove_recursive(dev->priv.qp_debugfs); + } + +-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) ++void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) + { +- if (!mlx5_debugfs_root) +- return 0; +- + dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); +- if (!dev->priv.eq_debugfs) +- return -ENOMEM; +- +- return 0; + } + + void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) + { +- if (!mlx5_debugfs_root) +- return; +- + debugfs_remove_recursive(dev->priv.eq_debugfs); + } + +@@ -183,85 +161,41 @@ static const struct file_operations stats_fops = { + .write = average_write, + }; + +-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) ++void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) + { + struct mlx5_cmd_stats *stats; + struct dentry **cmd; + const char *namep; +- int err; + int i; + +- if (!mlx5_debugfs_root) +- return 0; +- + cmd = &dev->priv.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); +- if (!*cmd) +- return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { + stats = &dev->cmd.stats[i]; + namep = mlx5_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmd); +- if (!stats->root) { +- mlx5_core_warn(dev, "failed adding command %d\n", +- i); +- err = -ENOMEM; +- goto out; +- } +- +- stats->avg = debugfs_create_file("average", 0400, +- stats->root, stats, +- &stats_fops); +- if (!stats->avg) { +- mlx5_core_warn(dev, "failed creating debugfs file\n"); +- err = -ENOMEM; +- goto out; +- } +- +- stats->count = debugfs_create_u64("n", 0400, +- stats->root, +- &stats->n); +- if (!stats->count) { +- mlx5_core_warn(dev, "failed creating debugfs file\n"); +- err = -ENOMEM; +- goto out; +- } ++ ++ debugfs_create_file("average", 0400, stats->root, stats, ++ &stats_fops); ++ debugfs_create_u64("n", 0400, stats->root, &stats->n); + } + } +- +- return 0; +-out: +- debugfs_remove_recursive(dev->priv.cmdif_debugfs); +- return err; + } + + void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) + { +- if (!mlx5_debugfs_root) +- return; +- + debugfs_remove_recursive(dev->priv.cmdif_debugfs); + } + +-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) ++void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) + { +- if (!mlx5_debugfs_root) +- return 0; +- + dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); +- if (!dev->priv.cq_debugfs) +- return -ENOMEM; +- +- return 0; + } + + void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) + { +- if (!mlx5_debugfs_root) +- return; +- + debugfs_remove_recursive(dev->priv.cq_debugfs); + } + +@@ -484,7 +418,6 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, + { + struct mlx5_rsc_debug *d; + char resn[32]; +- int err; + int i; + + d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL); +@@ -496,30 +429,15 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); +- if (!d->root) { +- err = -ENOMEM; +- goto out_free; +- } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; +- d->fields[i].dent = debugfs_create_file(field[i], 0400, +- d->root, &d->fields[i], +- &fops); +- if (!d->fields[i].dent) { +- err = -ENOMEM; +- goto out_rem; +- } ++ debugfs_create_file(field[i], 0400, d->root, &d->fields[i], ++ &fops); + } + *dbg = d; + + return 0; +-out_rem: +- debugfs_remove_recursive(d->root); +- +-out_free: +- kfree(d); +- return err; + } + + static void rem_res_tree(struct mlx5_rsc_debug *d) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +index 2df9aaa421c6..09d4c64b6e73 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +@@ -411,7 +411,7 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) + int mlx5_eq_table_init(struct mlx5_core_dev *dev) + { + struct mlx5_eq_table *eq_table; +- int i, err; ++ int i; + + eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL); + if (!eq_table) +@@ -419,9 +419,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) + + dev->priv.eq_table = eq_table; + +- err = mlx5_eq_debugfs_init(dev); +- if (err) +- goto kvfree_eq_table; ++ mlx5_eq_debugfs_init(dev); + + mutex_init(&eq_table->lock); + for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++) +@@ -429,11 +427,6 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) + + eq_table->irq_table = dev->priv.irq_table; + return 0; +- +-kvfree_eq_table: +- kvfree(eq_table); +- dev->priv.eq_table = NULL; +- return err; + } + + void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +index 3dfab91ae5f2..4be4d2d36218 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h +@@ -87,7 +87,7 @@ void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev); + + int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); + void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); ++void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); + void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); + + /* This function should only be called after mlx5_cmd_force_teardown_hca */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index a5bae398a9e7..568d973725b6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -855,11 +855,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) + goto err_eq_cleanup; + } + +- err = mlx5_cq_debugfs_init(dev); +- if (err) { +- mlx5_core_err(dev, "failed to initialize cq debugfs\n"); +- goto err_events_cleanup; +- } ++ mlx5_cq_debugfs_init(dev); + + mlx5_init_qp_table(dev); + +@@ -924,7 +920,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) + mlx5_cleanup_mkey_table(dev); + mlx5_cleanup_qp_table(dev); + mlx5_cq_debugfs_cleanup(dev); +-err_events_cleanup: + mlx5_events_cleanup(dev); + err_eq_cleanup: + mlx5_eq_table_cleanup(dev); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +index bbcf4ee40ad5..b100489dc85c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +@@ -146,7 +146,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, + + void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); + void mlx5_cmd_flush(struct mlx5_core_dev *dev); +-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); ++void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); + void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); + + int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index 2b8b0ef2e425..904d864f7259 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -982,7 +982,7 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, + int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); + int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); + +-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); ++void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); + void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); + int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, +@@ -994,7 +994,7 @@ int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, + void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); + + const char *mlx5_command_str(int command); +-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); ++void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); + void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); + int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, + int npsvs, u32 *sig_index); +-- +2.13.6 + diff --git a/SOURCES/0019-netdrv-net-mlx5-Use-debug-message-instead-of-warn.patch b/SOURCES/0019-netdrv-net-mlx5-Use-debug-message-instead-of-warn.patch new file mode 100644 index 0000000..caa3620 --- /dev/null +++ b/SOURCES/0019-netdrv-net-mlx5-Use-debug-message-instead-of-warn.patch @@ -0,0 +1,53 @@ +From 371f9058e2e9ac34a7db38b39dbf6f64593c0905 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:40 -0400 +Subject: [PATCH 019/312] [netdrv] net/mlx5: Use debug message instead of warn + +Message-id: <20200510145245.10054-18-ahleihel@redhat.com> +Patchwork-id: 306559 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 17/82] net/mlx5: Use debug message instead of warn +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 647d58a989b3b0b788c721a08394aec825e3438c +Author: Yishai Hadas +Date: Thu Aug 8 11:43:55 2019 +0300 + + net/mlx5: Use debug message instead of warn + + As QP may be created by DEVX, it may be valid to not find the rsn in + mlx5 core tree, change the level to be debug. + + Signed-off-by: Yishai Hadas + Reviewed-by: Saeed Mahameed + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/qp.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c +index b8ba74de9555..f0f3abe331da 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c +@@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb, + + common = mlx5_get_rsc(table, rsn); + if (!common) { +- mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn); ++ mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn); + return NOTIFY_OK; + } + +-- +2.13.6 + diff --git a/SOURCES/0020-netdrv-net-mlx5-Add-XRQ-legacy-commands-opcodes.patch b/SOURCES/0020-netdrv-net-mlx5-Add-XRQ-legacy-commands-opcodes.patch new file mode 100644 index 0000000..4f3e62c --- /dev/null +++ b/SOURCES/0020-netdrv-net-mlx5-Add-XRQ-legacy-commands-opcodes.patch @@ -0,0 +1,75 @@ +From b1e3c3ee5f0ae27994321ff5513aba666bcc5813 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:41 -0400 +Subject: [PATCH 020/312] [netdrv] net/mlx5: Add XRQ legacy commands opcodes + +Message-id: <20200510145245.10054-19-ahleihel@redhat.com> +Patchwork-id: 306558 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 18/82] net/mlx5: Add XRQ legacy commands opcodes +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit b1635ee6120cbeb3de6ab270432b2a2b839c9c56 +Author: Yishai Hadas +Date: Thu Aug 8 11:43:56 2019 +0300 + + net/mlx5: Add XRQ legacy commands opcodes + + Add XRQ legacy commands opcodes, will be used via the DEVX interface. + + Signed-off-by: Yishai Hadas + Reviewed-by: Saeed Mahameed + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 ++++ + include/linux/mlx5/mlx5_ifc.h | 2 ++ + 2 files changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 4b7ca04ae25e..8242f96ab931 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, + case MLX5_CMD_OP_CREATE_UMEM: + case MLX5_CMD_OP_DESTROY_UMEM: + case MLX5_CMD_OP_ALLOC_MEMIC: ++ case MLX5_CMD_OP_MODIFY_XRQ: ++ case MLX5_CMD_OP_RELEASE_XRQ_ERROR: + *status = MLX5_DRIVER_STATUS_ABORTED; + *synd = MLX5_DRIVER_SYND; + return -EIO; +@@ -637,6 +639,8 @@ const char *mlx5_command_str(int command) + MLX5_COMMAND_STR_CASE(DESTROY_UCTX); + MLX5_COMMAND_STR_CASE(CREATE_UMEM); + MLX5_COMMAND_STR_CASE(DESTROY_UMEM); ++ MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); ++ MLX5_COMMAND_STR_CASE(MODIFY_XRQ); + default: return "unknown command opcode"; + } + } +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 7e6149895d87..03cb1cf0e285 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -172,6 +172,8 @@ enum { + MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, + MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, + MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, ++ MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729, ++ MLX5_CMD_OP_MODIFY_XRQ = 0x72a, + MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740, + MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, + MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, +-- +2.13.6 + diff --git a/SOURCES/0021-netdrv-net-mlx5e-Rename-reporter-header-file.patch b/SOURCES/0021-netdrv-net-mlx5e-Rename-reporter-header-file.patch new file mode 100644 index 0000000..96afe1c --- /dev/null +++ b/SOURCES/0021-netdrv-net-mlx5e-Rename-reporter-header-file.patch @@ -0,0 +1,89 @@ +From ab6346c0e2832d07aeba3f097ac796d14d198930 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:45 -0400 +Subject: [PATCH 021/312] [netdrv] net/mlx5e: Rename reporter header file + +Message-id: <20200510145245.10054-23-ahleihel@redhat.com> +Patchwork-id: 306565 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 22/82] net/mlx5e: Rename reporter header file +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 + +commit 4edc17fdfdf15c2971d15cbfa4d6f2f5f537ee5e +Author: Aya Levin +Date: Mon Jul 1 14:53:34 2019 +0300 + + net/mlx5e: Rename reporter header file + + Rename reporter.h -> health.h so patches in the set can use it for + health related functionality. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/{reporter.h => health.h} | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- + 3 files changed, 4 insertions(+), 4 deletions(-) + rename drivers/net/ethernet/mellanox/mlx5/core/en/{reporter.h => health.h} (84%) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +similarity index 84% +rename from drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h +rename to drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index ed7a3881d2c5..cee840e40a05 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -1,8 +1,8 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + /* Copyright (c) 2019 Mellanox Technologies. */ + +-#ifndef __MLX5E_EN_REPORTER_H +-#define __MLX5E_EN_REPORTER_H ++#ifndef __MLX5E_EN_HEALTH_H ++#define __MLX5E_EN_HEALTH_H + + #include "en.h" + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index 817c6ea7e349..9ff19d69619f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -1,7 +1,7 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + /* Copyright (c) 2019 Mellanox Technologies. */ + +-#include "reporter.h" ++#include "health.h" + #include "lib/eq.h" + + #define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 9ffcfa017d4f..118ad4717bfd 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -56,7 +56,7 @@ + #include "en/xdp.h" + #include "lib/eq.h" + #include "en/monitor_stats.h" +-#include "en/reporter.h" ++#include "en/health.h" + #include "en/params.h" + #include "en/xsk/umem.h" + #include "en/xsk/setup.h" +-- +2.13.6 + diff --git a/SOURCES/0022-netdrv-net-mlx5e-Change-naming-convention-for-report.patch b/SOURCES/0022-netdrv-net-mlx5e-Change-naming-convention-for-report.patch new file mode 100644 index 0000000..debca38 --- /dev/null +++ b/SOURCES/0022-netdrv-net-mlx5e-Change-naming-convention-for-report.patch @@ -0,0 +1,149 @@ +From 287e3c4357bff248a4b5228fd39588cc7d43c860 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:46 -0400 +Subject: [PATCH 022/312] [netdrv] net/mlx5e: Change naming convention for + reporter's functions + +Message-id: <20200510145245.10054-24-ahleihel@redhat.com> +Patchwork-id: 306563 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 23/82] net/mlx5e: Change naming convention for reporter's functions +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en_main.c + Context diff due to already backported commit + 3c14562663c6 ("net/mlx5e: Expose new function for TIS destroy loop") + ---> We now call mlx5e_destroy_tises instead of the for loop. + +commit 06293ae4fa0a1b62bf3bb8add8f9bbe8815b0aba +Author: Aya Levin +Date: Mon Jul 1 15:51:51 2019 +0300 + + net/mlx5e: Change naming convention for reporter's functions + + Change from mlx5e_tx_reporter_* to mlx5e_reporter_tx_*. In the following + patches in the set rx reporter is added, the new naming convention is + more uniformed. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/health.h | 8 ++++---- + drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 8 ++++---- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++++---- + 3 files changed, 12 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index cee840e40a05..c7a5a149011e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -6,9 +6,9 @@ + + #include "en.h" + +-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv); +-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv); +-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq); +-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq); ++int mlx5e_reporter_tx_create(struct mlx5e_priv *priv); ++void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); ++void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); ++int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); + + #endif +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index 9ff19d69619f..62b95f62e4dc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -123,7 +123,7 @@ static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, + return devlink_health_report(tx_reporter, err_str, err_ctx); + } + +-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq) ++void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) + { + char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; + struct mlx5e_tx_err_ctx err_ctx = {0}; +@@ -156,7 +156,7 @@ static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq) + return 0; + } + +-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq) ++int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) + { + char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; + struct mlx5e_tx_err_ctx err_ctx; +@@ -285,7 +285,7 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { + + #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500 + +-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) ++int mlx5e_reporter_tx_create(struct mlx5e_priv *priv) + { + struct devlink_health_reporter *reporter; + struct mlx5_core_dev *mdev = priv->mdev; +@@ -305,7 +305,7 @@ int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) + return 0; + } + +-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) ++void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) + { + if (!priv->tx_reporter) + return; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 118ad4717bfd..49f5dbab2b8e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -1373,7 +1373,7 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) + struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, + recover_work); + +- mlx5e_tx_reporter_err_cqe(sq); ++ mlx5e_reporter_tx_err_cqe(sq); + } + + int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, +@@ -3210,7 +3210,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv) + static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) + { + +- mlx5e_tx_reporter_destroy(priv); ++ mlx5e_reporter_tx_destroy(priv); + mlx5e_destroy_tises(priv); + } + +@@ -4283,7 +4283,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) + if (!netif_xmit_stopped(dev_queue)) + continue; + +- if (mlx5e_tx_reporter_timeout(sq)) ++ if (mlx5e_reporter_tx_timeout(sq)) + report_failed = true; + } + +@@ -5080,7 +5080,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_initialize(priv); + #endif +- mlx5e_tx_reporter_create(priv); ++ mlx5e_reporter_tx_create(priv); + return 0; + } + +-- +2.13.6 + diff --git a/SOURCES/0023-netdrv-net-mlx5e-Generalize-tx-reporter-s-functional.patch b/SOURCES/0023-netdrv-net-mlx5e-Generalize-tx-reporter-s-functional.patch new file mode 100644 index 0000000..92a8d7f --- /dev/null +++ b/SOURCES/0023-netdrv-net-mlx5e-Generalize-tx-reporter-s-functional.patch @@ -0,0 +1,399 @@ +From 6258b703b584c06c8f63788431a978bd4db8bb97 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:47 -0400 +Subject: [PATCH 023/312] [netdrv] net/mlx5e: Generalize tx reporter's + functionality + +Message-id: <20200510145245.10054-25-ahleihel@redhat.com> +Patchwork-id: 306564 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 24/82] net/mlx5e: Generalize tx reporter's functionality +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c + Conext diff due to already mereged commit: + e7a981050a7f ("devlink: propagate extack down to health reporter ops") + ---> Function mlx5e_tx_reporter_recover takes also extact parameter now. + +commit c50de4af1d635fab3a5c8bd358f55623c01f7ee5 +Author: Aya Levin +Date: Mon Jul 1 15:08:13 2019 +0300 + + net/mlx5e: Generalize tx reporter's functionality + + Prepare for code sharing with rx reporter, which is added in the + following patches in the set. Introduce a generic error_ctx for + agnostic recovery despatch. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 5 +- + .../net/ethernet/mellanox/mlx5/core/en/health.c | 82 ++++++++++++ + .../net/ethernet/mellanox/mlx5/core/en/health.h | 14 +++ + .../ethernet/mellanox/mlx5/core/en/reporter_tx.c | 140 ++++++--------------- + 4 files changed, 137 insertions(+), 104 deletions(-) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/health.c + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index 35079e1f1f6f..4369dfd04a34 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -23,8 +23,9 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ + # + mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ + en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ +- en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \ +- en/params.o en/xsk/umem.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o ++ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ ++ en/reporter_tx.o en/params.o en/xsk/umem.o en/xsk/setup.o \ ++ en/xsk/rx.o en/xsk/tx.o + + # + # Netdev extra +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +new file mode 100644 +index 000000000000..fc3112921bd3 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -0,0 +1,82 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (c) 2019 Mellanox Technologies. ++ ++#include "health.h" ++#include "lib/eq.h" ++ ++int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) ++{ ++ struct mlx5_core_dev *mdev = channel->mdev; ++ struct net_device *dev = channel->netdev; ++ struct mlx5e_modify_sq_param msp = {}; ++ int err; ++ ++ msp.curr_state = MLX5_SQC_STATE_ERR; ++ msp.next_state = MLX5_SQC_STATE_RST; ++ ++ err = mlx5e_modify_sq(mdev, sqn, &msp); ++ if (err) { ++ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sqn); ++ return err; ++ } ++ ++ memset(&msp, 0, sizeof(msp)); ++ msp.curr_state = MLX5_SQC_STATE_RST; ++ msp.next_state = MLX5_SQC_STATE_RDY; ++ ++ err = mlx5e_modify_sq(mdev, sqn, &msp); ++ if (err) { ++ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sqn); ++ return err; ++ } ++ ++ return 0; ++} ++ ++int mlx5e_health_recover_channels(struct mlx5e_priv *priv) ++{ ++ int err = 0; ++ ++ rtnl_lock(); ++ mutex_lock(&priv->state_lock); ++ ++ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) ++ goto out; ++ ++ err = mlx5e_safe_reopen_channels(priv); ++ ++out: ++ mutex_unlock(&priv->state_lock); ++ rtnl_unlock(); ++ ++ return err; ++} ++ ++int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel) ++{ ++ u32 eqe_count; ++ ++ netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", ++ eq->core.eqn, eq->core.cons_index, eq->core.irqn); ++ ++ eqe_count = mlx5_eq_poll_irq_disabled(eq); ++ if (!eqe_count) ++ return -EIO; ++ ++ netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n", ++ eqe_count, eq->core.eqn); ++ ++ channel->stats->eq_rearm++; ++ return 0; ++} ++ ++int mlx5e_health_report(struct mlx5e_priv *priv, ++ struct devlink_health_reporter *reporter, char *err_str, ++ struct mlx5e_err_ctx *err_ctx) ++{ ++ if (!reporter) { ++ netdev_err(priv->netdev, err_str); ++ return err_ctx->recover(&err_ctx->ctx); ++ } ++ return devlink_health_report(reporter, err_str, err_ctx); ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index c7a5a149011e..386bda6104aa 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -11,4 +11,18 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); + void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); + int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); + ++#define MLX5E_REPORTER_PER_Q_MAX_LEN 256 ++ ++struct mlx5e_err_ctx { ++ int (*recover)(void *ctx); ++ void *ctx; ++}; ++ ++int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn); ++int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel); ++int mlx5e_health_recover_channels(struct mlx5e_priv *priv); ++int mlx5e_health_report(struct mlx5e_priv *priv, ++ struct devlink_health_reporter *reporter, char *err_str, ++ struct mlx5e_err_ctx *err_ctx); ++ + #endif +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index 62b95f62e4dc..6f9f42ab3005 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -2,14 +2,6 @@ + /* Copyright (c) 2019 Mellanox Technologies. */ + + #include "health.h" +-#include "lib/eq.h" +- +-#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256 +- +-struct mlx5e_tx_err_ctx { +- int (*recover)(struct mlx5e_txqsq *sq); +- struct mlx5e_txqsq *sq; +-}; + + static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) + { +@@ -39,41 +31,20 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) + sq->pc = 0; + } + +-static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) ++static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) + { +- struct mlx5_core_dev *mdev = sq->channel->mdev; +- struct net_device *dev = sq->channel->netdev; +- struct mlx5e_modify_sq_param msp = {0}; ++ struct mlx5_core_dev *mdev; ++ struct net_device *dev; ++ struct mlx5e_txqsq *sq; ++ u8 state; + int err; + +- msp.curr_state = curr_state; +- msp.next_state = MLX5_SQC_STATE_RST; +- +- err = mlx5e_modify_sq(mdev, sq->sqn, &msp); +- if (err) { +- netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); +- return err; +- } +- +- memset(&msp, 0, sizeof(msp)); +- msp.curr_state = MLX5_SQC_STATE_RST; +- msp.next_state = MLX5_SQC_STATE_RDY; +- +- err = mlx5e_modify_sq(mdev, sq->sqn, &msp); +- if (err) { +- netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); +- return err; +- } +- +- return 0; +-} ++ sq = ctx; ++ mdev = sq->channel->mdev; ++ dev = sq->channel->netdev; + +-static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) +-{ +- struct mlx5_core_dev *mdev = sq->channel->mdev; +- struct net_device *dev = sq->channel->netdev; +- u8 state; +- int err; ++ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) ++ return 0; + + err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); + if (err) { +@@ -96,7 +67,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) + * pending WQEs. SQ can safely reset the SQ. + */ + +- err = mlx5e_sq_to_ready(sq, state); ++ err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn); + if (err) + goto out; + +@@ -111,102 +82,66 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) + return err; + } + +-static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, +- char *err_str, +- struct mlx5e_tx_err_ctx *err_ctx) +-{ +- if (!tx_reporter) { +- netdev_err(err_ctx->sq->channel->netdev, err_str); +- return err_ctx->recover(err_ctx->sq); +- } +- +- return devlink_health_report(tx_reporter, err_str, err_ctx); +-} +- + void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) + { +- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; +- struct mlx5e_tx_err_ctx err_ctx = {0}; ++ struct mlx5e_priv *priv = sq->channel->priv; ++ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; ++ struct mlx5e_err_ctx err_ctx = {0}; + +- err_ctx.sq = sq; +- err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; ++ err_ctx.ctx = sq; ++ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; + sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn); + +- mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str, +- &err_ctx); ++ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); + } + +-static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq) ++static int mlx5e_tx_reporter_timeout_recover(void *ctx) + { +- struct mlx5_eq_comp *eq = sq->cq.mcq.eq; +- u32 eqe_count; +- +- netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", +- eq->core.eqn, eq->core.cons_index, eq->core.irqn); ++ struct mlx5_eq_comp *eq; ++ struct mlx5e_txqsq *sq; ++ int err; + +- eqe_count = mlx5_eq_poll_irq_disabled(eq); +- if (!eqe_count) { ++ sq = ctx; ++ eq = sq->cq.mcq.eq; ++ err = mlx5e_health_channel_eq_recover(eq, sq->channel); ++ if (err) + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); +- return -EIO; +- } + +- netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n", +- eqe_count, eq->core.eqn); +- sq->channel->stats->eq_rearm++; +- return 0; ++ return err; + } + + int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) + { +- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; +- struct mlx5e_tx_err_ctx err_ctx; ++ struct mlx5e_priv *priv = sq->channel->priv; ++ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; ++ struct mlx5e_err_ctx err_ctx; + +- err_ctx.sq = sq; +- err_ctx.recover = mlx5e_tx_reporter_timeout_recover; ++ err_ctx.ctx = sq; ++ err_ctx.recover = mlx5e_tx_reporter_timeout_recover; + sprintf(err_str, + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", + sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, + jiffies_to_usecs(jiffies - sq->txq->trans_start)); + +- return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str, +- &err_ctx); ++ return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); + } + + /* state lock cannot be grabbed within this function. + * It can cause a dead lock or a read-after-free. + */ +-static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) +-{ +- return err_ctx->recover(err_ctx->sq); +-} +- +-static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) ++static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx) + { +- int err = 0; +- +- rtnl_lock(); +- mutex_lock(&priv->state_lock); +- +- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) +- goto out; +- +- err = mlx5e_safe_reopen_channels(priv); +- +-out: +- mutex_unlock(&priv->state_lock); +- rtnl_unlock(); +- +- return err; ++ return err_ctx->recover(err_ctx->ctx); + } + + static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, + void *context) + { + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); +- struct mlx5e_tx_err_ctx *err_ctx = context; ++ struct mlx5e_err_ctx *err_ctx = context; + + return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) : +- mlx5e_tx_reporter_recover_all(priv); ++ mlx5e_health_recover_channels(priv); + } + + static int +@@ -289,8 +224,9 @@ int mlx5e_reporter_tx_create(struct mlx5e_priv *priv) + { + struct devlink_health_reporter *reporter; + struct mlx5_core_dev *mdev = priv->mdev; +- struct devlink *devlink = priv_to_devlink(mdev); ++ struct devlink *devlink; + ++ devlink = priv_to_devlink(mdev); + reporter = + devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, + MLX5_REPORTER_TX_GRACEFUL_PERIOD, +-- +2.13.6 + diff --git a/SOURCES/0024-netdrv-net-mlx5e-Extend-tx-diagnose-function.patch b/SOURCES/0024-netdrv-net-mlx5e-Extend-tx-diagnose-function.patch new file mode 100644 index 0000000..43a3149 --- /dev/null +++ b/SOURCES/0024-netdrv-net-mlx5e-Extend-tx-diagnose-function.patch @@ -0,0 +1,89 @@ +From 0da733b2b08fa6c3c9036b1b45ed8fcbc50727ef Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:48 -0400 +Subject: [PATCH 024/312] [netdrv] net/mlx5e: Extend tx diagnose function + +Message-id: <20200510145245.10054-26-ahleihel@redhat.com> +Patchwork-id: 306566 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 25/82] net/mlx5e: Extend tx diagnose function +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 + +commit dd921fd24179e51fc8d8d7bd7978f369da5ba34a +Author: Aya Levin +Date: Mon Jun 24 21:41:21 2019 +0300 + + net/mlx5e: Extend tx diagnose function + + The following patches in the set enhance the diagnostics info of tx + reporter. Therefore, it is better to pass a pointer to the SQ for + further data extraction. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index 6f9f42ab3005..b9429ff8d9c4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -146,15 +146,22 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, + + static int + mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, +- u32 sqn, u8 state, bool stopped) ++ struct mlx5e_txqsq *sq) + { ++ struct mlx5e_priv *priv = sq->channel->priv; ++ bool stopped = netif_xmit_stopped(sq->txq); ++ u8 state; + int err; + ++ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); ++ if (err) ++ return err; ++ + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + +- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn); ++ err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); + if (err) + return err; + +@@ -191,15 +198,8 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, + for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; + i++) { + struct mlx5e_txqsq *sq = priv->txq2sq[i]; +- u8 state; +- +- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); +- if (err) +- goto unlock; + +- err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn, +- state, +- netif_xmit_stopped(sq->txq)); ++ err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq); + if (err) + goto unlock; + } +-- +2.13.6 + diff --git a/SOURCES/0025-netdrv-net-mlx5e-Extend-tx-reporter-diagnostics-outp.patch b/SOURCES/0025-netdrv-net-mlx5e-Extend-tx-reporter-diagnostics-outp.patch new file mode 100644 index 0000000..b564975 --- /dev/null +++ b/SOURCES/0025-netdrv-net-mlx5e-Extend-tx-reporter-diagnostics-outp.patch @@ -0,0 +1,270 @@ +From 22b79810283de893e445fec4710fd5645cf90237 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:49 -0400 +Subject: [PATCH 025/312] [netdrv] net/mlx5e: Extend tx reporter diagnostics + output + +Message-id: <20200510145245.10054-27-ahleihel@redhat.com> +Patchwork-id: 306567 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 26/82] net/mlx5e: Extend tx reporter diagnostics output +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 + +commit 2d708887a4b1cb142c3179b3b1030dab047467b6 +Author: Aya Levin +Date: Sun Jun 30 11:34:15 2019 +0300 + + net/mlx5e: Extend tx reporter diagnostics output + + Enhance tx reporter's diagnostics output to include: information common + to all SQs: SQ size, SQ stride size. + In addition add channel ix, tc, txq ix, cc and pc. + + $ devlink health diagnose pci/0000:00:0b.0 reporter tx + Common config: + SQ: + stride size: 64 size: 1024 + SQs: + channel ix: 0 tc: 0 txq ix: 0 sqn: 4307 HW state: 1 stopped: false cc: 0 pc: 0 + channel ix: 1 tc: 0 txq ix: 1 sqn: 4312 HW state: 1 stopped: false cc: 0 pc: 0 + channel ix: 2 tc: 0 txq ix: 2 sqn: 4317 HW state: 1 stopped: false cc: 0 pc: 0 + channel ix: 3 tc: 0 txq ix: 3 sqn: 4322 HW state: 1 stopped: false cc: 0 pc: 0 + + $ devlink health diagnose pci/0000:00:0b.0 reporter tx -jp + { + "Common config": { + "SQ": { + "stride size": 64, + "size": 1024 + } + }, + "SQs": [ { + "channel ix": 0, + "tc": 0, + "txq ix": 0, + "sqn": 4307, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0 + },{ + "channel ix": 1, + "tc": 0, + "txq ix": 1, + "sqn": 4312, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0 + },{ + "channel ix": 2, + "tc": 0, + "txq ix": 2, + "sqn": 4317, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0 + },{ + "channel ix": 3, + "tc": 0, + "txq ix": 3, + "sqn": 4322, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0 + } ] + } + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/health.c | 30 ++++++++++ + .../net/ethernet/mellanox/mlx5/core/en/health.h | 3 + + .../ethernet/mellanox/mlx5/core/en/reporter_tx.c | 69 +++++++++++++++++++--- + 3 files changed, 94 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +index fc3112921bd3..dab563f07157 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -4,6 +4,36 @@ + #include "health.h" + #include "lib/eq.h" + ++int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) ++{ ++ int err; ++ ++ err = devlink_fmsg_pair_nest_start(fmsg, name); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_obj_nest_start(fmsg); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg) ++{ ++ int err; ++ ++ err = devlink_fmsg_obj_nest_end(fmsg); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_pair_nest_end(fmsg); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ + int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) + { + struct mlx5_core_dev *mdev = channel->mdev; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index 386bda6104aa..112771ad516c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -11,6 +11,9 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); + void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); + int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); + ++int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); ++int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); ++ + #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 + + struct mlx5e_err_ctx { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index b9429ff8d9c4..a5d0fcbb85af 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -146,7 +146,7 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, + + static int + mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, +- struct mlx5e_txqsq *sq) ++ struct mlx5e_txqsq *sq, int tc) + { + struct mlx5e_priv *priv = sq->channel->priv; + bool stopped = netif_xmit_stopped(sq->txq); +@@ -161,6 +161,18 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, + if (err) + return err; + ++ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); ++ if (err) ++ return err; ++ + err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); + if (err) + return err; +@@ -173,6 +185,14 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, + if (err) + return err; + ++ err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); ++ if (err) ++ return err; ++ + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; +@@ -184,24 +204,57 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) + { + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); +- int i, err = 0; ++ struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; ++ u32 sq_stride, sq_sz; ++ ++ int i, tc, err = 0; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + ++ sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq); ++ sq_stride = MLX5_SEND_WQE_BB; ++ ++ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common Config"); ++ if (err) ++ goto unlock; ++ ++ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ"); ++ if (err) ++ goto unlock; ++ ++ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); ++ if (err) ++ goto unlock; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); ++ if (err) ++ goto unlock; ++ ++ err = mlx5e_reporter_named_obj_nest_end(fmsg); ++ if (err) ++ goto unlock; ++ ++ err = mlx5e_reporter_named_obj_nest_end(fmsg); ++ if (err) ++ goto unlock; ++ + err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); + if (err) + goto unlock; + +- for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; +- i++) { +- struct mlx5e_txqsq *sq = priv->txq2sq[i]; ++ for (i = 0; i < priv->channels.num; i++) { ++ struct mlx5e_channel *c = priv->channels.c[i]; ++ ++ for (tc = 0; tc < priv->channels.params.num_tc; tc++) { ++ struct mlx5e_txqsq *sq = &c->sq[tc]; + +- err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq); +- if (err) +- goto unlock; ++ err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); ++ if (err) ++ goto unlock; ++ } + } + err = devlink_fmsg_arr_pair_nest_end(fmsg); + if (err) +-- +2.13.6 + diff --git a/SOURCES/0026-netdrv-net-mlx5e-Add-cq-info-to-tx-reporter-diagnose.patch b/SOURCES/0026-netdrv-net-mlx5e-Add-cq-info-to-tx-reporter-diagnose.patch new file mode 100644 index 0000000..a0b04c2 --- /dev/null +++ b/SOURCES/0026-netdrv-net-mlx5e-Add-cq-info-to-tx-reporter-diagnose.patch @@ -0,0 +1,273 @@ +From 53141c2d2ece30134507bf0342288ed1340a8d83 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:50 -0400 +Subject: [PATCH 026/312] [netdrv] net/mlx5e: Add cq info to tx reporter + diagnose + +Message-id: <20200510145245.10054-28-ahleihel@redhat.com> +Patchwork-id: 306568 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 27/82] net/mlx5e: Add cq info to tx reporter diagnose +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 + +commit 2bf09e60ae5ef68c2282f97baf37b7dbd9cc1d48 +Author: Aya Levin +Date: Sun Jun 30 15:08:00 2019 +0300 + + net/mlx5e: Add cq info to tx reporter diagnose + + Add cq information to general diagnose output: CQ size and stride size. + Per SQ add information about the related CQ: cqn and CQ's HW status. + + $ devlink health diagnose pci/0000:00:0b.0 reporter tx + Common Config: + SQ: + stride size: 64 size: 1024 + CQ: + stride size: 64 size: 1024 + SQs: + channel ix: 0 tc: 0 txq ix: 0 sqn: 4307 HW state: 1 stopped: false cc: 0 pc: 0 + CQ: + cqn: 1030 HW status: 0 + channel ix: 1 tc: 0 txq ix: 1 sqn: 4312 HW state: 1 stopped: false cc: 0 pc: 0 + CQ: + cqn: 1034 HW status: 0 + channel ix: 2 tc: 0 txq ix: 2 sqn: 4317 HW state: 1 stopped: false cc: 0 pc: 0 + CQ: + cqn: 1038 HW status: 0 + channel ix: 3 tc: 0 txq ix: 3 sqn: 4322 HW state: 1 stopped: false cc: 0 pc: 0 + CQ: + cqn: 1042 HW status: 0 + + $ devlink health diagnose pci/0000:00:0b.0 reporter tx -jp + { + "Common Config": { + "SQ": { + "stride size": 64, + "size": 1024 + }, + "CQ": { + "stride size": 64, + "size": 1024 + } + }, + "SQs": [ { + "channel ix": 0, + "tc": 0, + "txq ix": 0, + "sqn": 4307, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0, + "CQ": { + "cqn": 1030, + "HW status": 0 + } + },{ + "channel ix": 1, + "tc": 0, + "txq ix": 1, + "sqn": 4312, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0, + "CQ": { + "cqn": 1034, + "HW status": 0 + } + },{ + "channel ix": 2, + "tc": 0, + "txq ix": 2, + "sqn": 4317, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0, + "CQ": { + "cqn": 1038, + "HW status": 0 + } + },{ + "channel ix": 3, + "tc": 0, + "txq ix": 3, + "sqn": 4322, + "HW state": 1, + "stopped": false, + "cc": 0, + "pc": 0, + "CQ": { + "cqn": 1042, + "HW status": 0 + } ] + } + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/health.c | 62 ++++++++++++++++++++++ + .../net/ethernet/mellanox/mlx5/core/en/health.h | 2 + + .../ethernet/mellanox/mlx5/core/en/reporter_tx.c | 8 +++ + drivers/net/ethernet/mellanox/mlx5/core/wq.c | 5 ++ + drivers/net/ethernet/mellanox/mlx5/core/wq.h | 1 + + 5 files changed, 78 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +index dab563f07157..ffd9a7a165a2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -34,6 +34,68 @@ int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg) + return 0; + } + ++int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) ++{ ++ struct mlx5e_priv *priv = cq->channel->priv; ++ u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; ++ u8 hw_status; ++ void *cqc; ++ int err; ++ ++ err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out)); ++ if (err) ++ return err; ++ ++ cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); ++ hw_status = MLX5_GET(cqc, cqc, status); ++ ++ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ"); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status); ++ if (err) ++ return err; ++ ++ err = mlx5e_reporter_named_obj_nest_end(fmsg); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) ++{ ++ u8 cq_log_stride; ++ u32 cq_sz; ++ int err; ++ ++ cq_sz = mlx5_cqwq_get_size(&cq->wq); ++ cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); ++ ++ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ"); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride)); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz); ++ if (err) ++ return err; ++ ++ err = mlx5e_reporter_named_obj_nest_end(fmsg); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ + int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) + { + struct mlx5_core_dev *mdev = channel->mdev; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index 112771ad516c..6725d417aaf5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -11,6 +11,8 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); + void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); + int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); + ++int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); ++int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); + int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); + int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +index a5d0fcbb85af..bfed558637c2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +@@ -193,6 +193,10 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, + if (err) + return err; + ++ err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg); ++ if (err) ++ return err; ++ + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; +@@ -233,6 +237,10 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, + if (err) + goto unlock; + ++ err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg); ++ if (err) ++ goto unlock; ++ + err = mlx5e_reporter_named_obj_nest_end(fmsg); + if (err) + goto unlock; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +index 953cc8efba69..dd2315ce4441 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +@@ -44,6 +44,11 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) + return wq->fbc.sz_m1 + 1; + } + ++u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq) ++{ ++ return wq->fbc.log_stride; ++} ++ + u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) + { + return (u32)wq->fbc.sz_m1 + 1; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +index f1ec58c9e9e3..55791f71a778 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +@@ -89,6 +89,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl); + u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); ++u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq); + + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, +-- +2.13.6 + diff --git a/SOURCES/0027-netdrv-net-mlx5e-Add-helper-functions-for-reporter-s.patch b/SOURCES/0027-netdrv-net-mlx5e-Add-helper-functions-for-reporter-s.patch new file mode 100644 index 0000000..8e9b522 --- /dev/null +++ b/SOURCES/0027-netdrv-net-mlx5e-Add-helper-functions-for-reporter-s.patch @@ -0,0 +1,142 @@ +From 713b69f0ad280204ad68ebe2cd6e185e213182f0 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:51 -0400 +Subject: [PATCH 027/312] [netdrv] net/mlx5e: Add helper functions for + reporter's basics + +Message-id: <20200510145245.10054-29-ahleihel@redhat.com> +Patchwork-id: 306569 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 28/82] net/mlx5e: Add helper functions for reporter's basics +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en_main.c + Context diff due to already backported commit + 3c14562663c6 ("net/mlx5e: Expose new function for TIS destroy loop") + ---> In function mlx5e_cleanup_nic_tx, we now call mlx5e_destroy_tises + instead of the for loop. + Also, in function mlx5e_nic_init we no longer call mlx5e_build_tc2txq_maps. + +commit 11af6a6d09e9a90e05f4a21564232b30c6c25d69 +Author: Aya Levin +Date: Thu Jul 11 17:17:36 2019 +0300 + + net/mlx5e: Add helper functions for reporter's basics + + Introduce helper functions for create and destroy reporters and update + channels. In the following patch, rx reporter is added and it will use + these helpers too. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/health.c | 17 +++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en/health.h | 4 ++++ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 9 +++------ + 3 files changed, 24 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +index ffd9a7a165a2..c11d0162eaf8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -96,6 +96,23 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg * + return 0; + } + ++int mlx5e_health_create_reporters(struct mlx5e_priv *priv) ++{ ++ return mlx5e_reporter_tx_create(priv); ++} ++ ++void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv) ++{ ++ mlx5e_reporter_tx_destroy(priv); ++} ++ ++void mlx5e_health_channels_update(struct mlx5e_priv *priv) ++{ ++ if (priv->tx_reporter) ++ devlink_health_reporter_state_update(priv->tx_reporter, ++ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); ++} ++ + int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) + { + struct mlx5_core_dev *mdev = channel->mdev; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index 6725d417aaf5..b2c0ccc79b22 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -29,5 +29,9 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv); + int mlx5e_health_report(struct mlx5e_priv *priv, + struct devlink_health_reporter *reporter, char *err_str, + struct mlx5e_err_ctx *err_ctx); ++int mlx5e_health_create_reporters(struct mlx5e_priv *priv); ++void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv); ++void mlx5e_health_channels_update(struct mlx5e_priv *priv); ++ + + #endif +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 49f5dbab2b8e..908b88891325 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2323,10 +2323,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, + goto err_close_channels; + } + +- if (priv->tx_reporter) +- devlink_health_reporter_state_update(priv->tx_reporter, +- DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); +- ++ mlx5e_health_channels_update(priv); + kvfree(cparam); + return 0; + +@@ -3210,7 +3207,6 @@ int mlx5e_create_tises(struct mlx5e_priv *priv) + static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) + { + +- mlx5e_reporter_tx_destroy(priv); + mlx5e_destroy_tises(priv); + } + +@@ -4972,12 +4968,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, + if (err) + mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); + mlx5e_build_nic_netdev(netdev); ++ mlx5e_health_create_reporters(priv); + + return 0; + } + + static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) + { ++ mlx5e_health_destroy_reporters(priv); + mlx5e_tls_cleanup(priv); + mlx5e_ipsec_cleanup(priv); + mlx5e_netdev_cleanup(priv->netdev, priv); +@@ -5080,7 +5078,6 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_initialize(priv); + #endif +- mlx5e_reporter_tx_create(priv); + return 0; + } + +-- +2.13.6 + diff --git a/SOURCES/0028-netdrv-net-mlx5e-Add-support-to-rx-reporter-diagnose.patch b/SOURCES/0028-netdrv-net-mlx5e-Add-support-to-rx-reporter-diagnose.patch new file mode 100644 index 0000000..8e06a62 --- /dev/null +++ b/SOURCES/0028-netdrv-net-mlx5e-Add-support-to-rx-reporter-diagnose.patch @@ -0,0 +1,481 @@ +From f89402f33560dd8e1f4cfb6a5d2b849e9fff7f47 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:52 -0400 +Subject: [PATCH 028/312] [netdrv] net/mlx5e: Add support to rx reporter + diagnose + +Message-id: <20200510145245.10054-30-ahleihel@redhat.com> +Patchwork-id: 306570 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 29/82] net/mlx5e: Add support to rx reporter diagnose +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c + Adapt mlx5e_rx_reporter_diagnose parameters to current API due to already + backported commit: + e7a981050a7f ("devlink: propagate extack down to health reporter ops") + ---> .diagnose callback now expects to get extact as well. + +commit 9032e7192eac8e657b52cf1c89fe730308b72c2a +Author: Aya Levin +Date: Tue Jun 25 16:26:46 2019 +0300 + + net/mlx5e: Add support to rx reporter diagnose + + Add rx reporter, which supports diagnose call-back. Diagnostics output + include: information common to all RQs: RQ type, RQ size, RQ stride + size, CQ size and CQ stride size. In addition advertise information per + RQ and its related icosq and attached CQ. + + $ devlink health diagnose pci/0000:00:0b.0 reporter rx + Common config: + RQ: + type: 2 stride size: 2048 size: 8 + CQ: + stride size: 64 size: 1024 + RQs: + channel ix: 0 rqn: 4308 HW state: 1 SW state: 3 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 + CQ: + cqn: 1032 HW status: 0 + channel ix: 1 rqn: 4313 HW state: 1 SW state: 3 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 + CQ: + cqn: 1036 HW status: 0 + channel ix: 2 rqn: 4318 HW state: 1 SW state: 3 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 + CQ: + cqn: 1040 HW status: 0 + channel ix: 3 rqn: 4323 HW state: 1 SW state: 3 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 + CQ: + cqn: 1044 HW status: 0 + + $ devlink health diagnose pci/0000:00:0b.0 reporter rx -jp + { + "Common config": { + "RQ": { + "type": 2, + "stride size": 2048, + "size": 8 + }, + "CQ": { + "stride size": 64, + "size": 1024 + } + }, + "RQs": [ { + "channel ix": 0, + "rqn": 4308, + "HW state": 1, + "SW state": 3, + "posted WQEs": 7, + "cc": 7, + "ICOSQ HW state": 1, + "CQ": { + "cqn": 1032, + "HW status": 0 + } + },{ + "channel ix": 1, + "rqn": 4313, + "HW state": 1, + "SW state": 3, + "posted WQEs": 7, + "cc": 7, + "ICOSQ HW state": 1, + "CQ": { + "cqn": 1036, + "HW status": 0 + } + },{ + "channel ix": 2, + "rqn": 4318, + "HW state": 1, + "SW state": 3, + "posted WQEs": 7, + "cc": 7, + "ICOSQ HW state": 1, + "CQ": { + "cqn": 1040, + "HW status": 0 + } + },{ + "channel ix": 3, + "rqn": 4323, + "HW state": 1, + "SW state": 3, + "posted WQEs": 7, + "cc": 7, + "ICOSQ HW state": 1, + "CQ": { + "cqn": 1044, + "HW status": 0 + } + } ] + } + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 4 +- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 21 +++ + .../net/ethernet/mellanox/mlx5/core/en/health.c | 16 +- + .../net/ethernet/mellanox/mlx5/core/en/health.h | 3 + + .../ethernet/mellanox/mlx5/core/en/reporter_rx.c | 197 +++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 20 --- + 6 files changed, 238 insertions(+), 23 deletions(-) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index 4369dfd04a34..bd2074d5eb87 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -24,8 +24,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ + mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ + en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ + en_selftest.o en/port.o en/monitor_stats.o en/health.o \ +- en/reporter_tx.o en/params.o en/xsk/umem.o en/xsk/setup.o \ +- en/xsk/rx.o en/xsk/tx.o ++ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \ ++ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o + + # + # Netdev extra +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 84575c0bcca6..3ba2dec04137 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -855,6 +855,7 @@ struct mlx5e_priv { + struct mlx5e_tls *tls; + #endif + struct devlink_health_reporter *tx_reporter; ++ struct devlink_health_reporter *rx_reporter; + struct mlx5e_xsk xsk; + }; + +@@ -899,6 +900,26 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); + int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); + void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); + ++static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) ++{ ++ switch (rq->wq_type) { ++ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: ++ return mlx5_wq_ll_get_size(&rq->mpwqe.wq); ++ default: ++ return mlx5_wq_cyc_get_size(&rq->wqe.wq); ++ } ++} ++ ++static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) ++{ ++ switch (rq->wq_type) { ++ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: ++ return rq->mpwqe.wq.cur_sz; ++ default: ++ return rq->wqe.wq.cur_sz; ++ } ++} ++ + bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); + bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +index c11d0162eaf8..1d6b58860da6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -98,11 +98,22 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg * + + int mlx5e_health_create_reporters(struct mlx5e_priv *priv) + { +- return mlx5e_reporter_tx_create(priv); ++ int err; ++ ++ err = mlx5e_reporter_tx_create(priv); ++ if (err) ++ return err; ++ ++ err = mlx5e_reporter_rx_create(priv); ++ if (err) ++ return err; ++ ++ return 0; + } + + void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv) + { ++ mlx5e_reporter_rx_destroy(priv); + mlx5e_reporter_tx_destroy(priv); + } + +@@ -111,6 +122,9 @@ void mlx5e_health_channels_update(struct mlx5e_priv *priv) + if (priv->tx_reporter) + devlink_health_reporter_state_update(priv->tx_reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); ++ if (priv->rx_reporter) ++ devlink_health_reporter_state_update(priv->rx_reporter, ++ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); + } + + int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index b2c0ccc79b22..a751c5316baf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -16,6 +16,9 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg * + int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); + int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); + ++int mlx5e_reporter_rx_create(struct mlx5e_priv *priv); ++void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); ++ + #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 + + struct mlx5e_err_ctx { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +new file mode 100644 +index 000000000000..7cd767f0b8c7 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +@@ -0,0 +1,197 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (c) 2019 Mellanox Technologies. ++ ++#include "health.h" ++#include "params.h" ++ ++static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) ++{ ++ int outlen = MLX5_ST_SZ_BYTES(query_rq_out); ++ void *out; ++ void *rqc; ++ int err; ++ ++ out = kvzalloc(outlen, GFP_KERNEL); ++ if (!out) ++ return -ENOMEM; ++ ++ err = mlx5_core_query_rq(dev, rqn, out); ++ if (err) ++ goto out; ++ ++ rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context); ++ *state = MLX5_GET(rqc, rqc, state); ++ ++out: ++ kvfree(out); ++ return err; ++} ++ ++static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, ++ struct devlink_fmsg *fmsg) ++{ ++ struct mlx5e_priv *priv = rq->channel->priv; ++ struct mlx5e_params *params; ++ struct mlx5e_icosq *icosq; ++ u8 icosq_hw_state; ++ int wqes_sz; ++ u8 hw_state; ++ u16 wq_head; ++ int err; ++ ++ params = &priv->channels.params; ++ icosq = &rq->channel->icosq; ++ err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state); ++ if (err) ++ return err; ++ ++ err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state); ++ if (err) ++ return err; ++ ++ wqes_sz = mlx5e_rqwq_get_cur_sz(rq); ++ wq_head = params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? ++ rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq); ++ ++ err = devlink_fmsg_obj_nest_start(fmsg); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_u8_pair_put(fmsg, "ICOSQ HW state", icosq_hw_state); ++ if (err) ++ return err; ++ ++ err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg); ++ if (err) ++ return err; ++ ++ err = devlink_fmsg_obj_nest_end(fmsg); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, ++ struct devlink_fmsg *fmsg, ++ struct netlink_ext_ack *extack) ++{ ++ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); ++ struct mlx5e_params *params = &priv->channels.params; ++ struct mlx5e_rq *generic_rq; ++ u32 rq_stride, rq_sz; ++ int i, err = 0; ++ ++ mutex_lock(&priv->state_lock); ++ ++ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) ++ goto unlock; ++ ++ generic_rq = &priv->channels.c[0]->rq; ++ rq_sz = mlx5e_rqwq_get_size(generic_rq); ++ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); ++ ++ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common config"); ++ if (err) ++ goto unlock; ++ ++ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ"); ++ if (err) ++ goto unlock; ++ ++ err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type); ++ if (err) ++ goto unlock; ++ ++ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride); ++ if (err) ++ goto unlock; ++ ++ err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz); ++ if (err) ++ goto unlock; ++ ++ err = mlx5e_reporter_named_obj_nest_end(fmsg); ++ if (err) ++ goto unlock; ++ ++ err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg); ++ if (err) ++ goto unlock; ++ ++ err = mlx5e_reporter_named_obj_nest_end(fmsg); ++ if (err) ++ goto unlock; ++ ++ err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); ++ if (err) ++ goto unlock; ++ ++ for (i = 0; i < priv->channels.num; i++) { ++ struct mlx5e_rq *rq = &priv->channels.c[i]->rq; ++ ++ err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg); ++ if (err) ++ goto unlock; ++ } ++ err = devlink_fmsg_arr_pair_nest_end(fmsg); ++ if (err) ++ goto unlock; ++unlock: ++ mutex_unlock(&priv->state_lock); ++ return err; ++} ++ ++static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { ++ .name = "rx", ++ .diagnose = mlx5e_rx_reporter_diagnose, ++}; ++ ++int mlx5e_reporter_rx_create(struct mlx5e_priv *priv) ++{ ++ struct devlink *devlink = priv_to_devlink(priv->mdev); ++ struct devlink_health_reporter *reporter; ++ ++ reporter = devlink_health_reporter_create(devlink, ++ &mlx5_rx_reporter_ops, ++ 0, false, priv); ++ if (IS_ERR(reporter)) { ++ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n", ++ PTR_ERR(reporter)); ++ return PTR_ERR(reporter); ++ } ++ priv->rx_reporter = reporter; ++ return 0; ++} ++ ++void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv) ++{ ++ if (!priv->rx_reporter) ++ return; ++ ++ devlink_health_reporter_destroy(priv->rx_reporter); ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 908b88891325..d78f60bc86ff 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -238,26 +238,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, + ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); + } + +-static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) +-{ +- switch (rq->wq_type) { +- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: +- return mlx5_wq_ll_get_size(&rq->mpwqe.wq); +- default: +- return mlx5_wq_cyc_get_size(&rq->wqe.wq); +- } +-} +- +-static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) +-{ +- switch (rq->wq_type) { +- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: +- return rq->mpwqe.wq.cur_sz; +- default: +- return rq->wqe.wq.cur_sz; +- } +-} +- + static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, + struct mlx5e_channel *c) + { +-- +2.13.6 + diff --git a/SOURCES/0029-netdrv-net-mlx5e-Split-open-close-ICOSQ-into-stages.patch b/SOURCES/0029-netdrv-net-mlx5e-Split-open-close-ICOSQ-into-stages.patch new file mode 100644 index 0000000..409ff8e --- /dev/null +++ b/SOURCES/0029-netdrv-net-mlx5e-Split-open-close-ICOSQ-into-stages.patch @@ -0,0 +1,159 @@ +From 3f66afcb58cbade919b064a2eee38d35bd9c64ad Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:53 -0400 +Subject: [PATCH 029/312] [netdrv] net/mlx5e: Split open/close ICOSQ into + stages + +Message-id: <20200510145245.10054-31-ahleihel@redhat.com> +Patchwork-id: 306572 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 30/82] net/mlx5e: Split open/close ICOSQ into stages +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en_main.c + drivers/net/ethernet/mellanox/mlx5/core/en.h + Take a couple of hunks from this commit to fix incremental build: + be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ") + ---> Expose function mlx5e_(de)activate_icosq to be used in other files. + +commit 9d18b5144a0a850e722e7c3d7b700eb1fba7b7e2 +Author: Aya Levin +Date: Tue Jul 2 15:47:29 2019 +0300 + + net/mlx5e: Split open/close ICOSQ into stages + + Align ICOSQ open/close behaviour with RQ and SQ. Split open flow into + open and activate where open handles creation and activate enables the + queue. Do a symmetric thing in close flow: split into close and + deactivate. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 ++ + .../net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | 2 ++ + drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | 7 +++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 19 +++++++++++++++---- + 4 files changed, 26 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 3ba2dec04137..21926cb209f9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1037,6 +1037,8 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, + void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); + void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); ++void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); ++void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); + + int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, + struct mlx5e_modify_sq_param *p); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +index 79060ee60c98..c28cbae42331 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +@@ -156,6 +156,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) + + void mlx5e_activate_xsk(struct mlx5e_channel *c) + { ++ mlx5e_activate_icosq(&c->xskicosq); + set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); + /* TX queue is created active. */ + +@@ -168,6 +169,7 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c) + { + mlx5e_deactivate_rq(&c->xskrq); + /* TX queue is disabled on close. */ ++ mlx5e_deactivate_icosq(&c->xskicosq); + } + + static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +index 19ae0e28fead..03abb8cb96be 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +@@ -26,6 +26,13 @@ int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid) + return -ENXIO; + + if (!napi_if_scheduled_mark_missed(&c->napi)) { ++ /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not ++ * active and not polled by NAPI. Return 0, because the upcoming ++ * activate will trigger the IRQ for us. ++ */ ++ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) ++ return 0; ++ + spin_lock(&c->xskicosq_lock); + mlx5e_trigger_irq(&c->xskicosq); + spin_unlock(&c->xskicosq_lock); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index d78f60bc86ff..7dde1be49f35 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -1369,7 +1369,6 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, + csp.cqn = sq->cq.mcq.cqn; + csp.wq_ctrl = &sq->wq_ctrl; + csp.min_inline_mode = params->tx_min_inline_mode; +- set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + if (err) + goto err_free_icosq; +@@ -1382,12 +1381,22 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, + return err; + } + +-void mlx5e_close_icosq(struct mlx5e_icosq *sq) ++void mlx5e_activate_icosq(struct mlx5e_icosq *icosq) + { +- struct mlx5e_channel *c = sq->channel; ++ set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); ++} + +- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); ++void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) ++{ ++ struct mlx5e_channel *c = icosq->channel; ++ ++ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); + napi_synchronize(&c->napi); ++} ++ ++void mlx5e_close_icosq(struct mlx5e_icosq *sq) ++{ ++ struct mlx5e_channel *c = sq->channel; + + mlx5e_destroy_sq(c->mdev, sq->sqn); + mlx5e_free_icosq(sq); +@@ -1971,6 +1980,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_activate_txqsq(&c->sq[tc]); ++ mlx5e_activate_icosq(&c->icosq); + mlx5e_activate_rq(&c->rq); + netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix); + +@@ -1986,6 +1996,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) + mlx5e_deactivate_xsk(c); + + mlx5e_deactivate_rq(&c->rq); ++ mlx5e_deactivate_icosq(&c->icosq); + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_deactivate_txqsq(&c->sq[tc]); + } +-- +2.13.6 + diff --git a/SOURCES/0030-netdrv-net-mlx5e-Report-and-recover-from-CQE-error-o.patch b/SOURCES/0030-netdrv-net-mlx5e-Report-and-recover-from-CQE-error-o.patch new file mode 100644 index 0000000..25d909a --- /dev/null +++ b/SOURCES/0030-netdrv-net-mlx5e-Report-and-recover-from-CQE-error-o.patch @@ -0,0 +1,360 @@ +From beae62dd1772b395964f8e73f82c202f1ad346d9 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:54 -0400 +Subject: [PATCH 030/312] [netdrv] net/mlx5e: Report and recover from CQE error + on ICOSQ + +Message-id: <20200510145245.10054-32-ahleihel@redhat.com> +Patchwork-id: 306571 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 31/82] net/mlx5e: Report and recover from CQE error on ICOSQ +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en_main.c + - drivers/net/ethernet/mellanox/mlx5/core/en.h + Dropped hunks that were previously applied for fixing incremental build. + + - drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c + Adapt mlx5e_rx_reporter_recover parameters to current API due to already + backported commit: + e7a981050a7f ("devlink: propagate extack down to health reporter ops") + ---> .recover callback now expects to get extact as well. + +commit be5323c8379f488f1de53206edeaf80fc20d7686 +Author: Aya Levin +Date: Tue Jun 25 17:44:28 2019 +0300 + + net/mlx5e: Report and recover from CQE error on ICOSQ + + Add support for report and recovery from error on completion on ICOSQ. + Deactivate RQ and flush, then deactivate ICOSQ. Set the queue back to + ready state (firmware) and reset the ICOSQ and the RQ (software + resources). Finally, activate the ICOSQ and the RQ. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 ++ + .../net/ethernet/mellanox/mlx5/core/en/health.h | 1 + + .../ethernet/mellanox/mlx5/core/en/reporter_rx.c | 110 ++++++++++++++++++++- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 18 +++- + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 + + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 + + drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 + + 7 files changed, 137 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 21926cb209f9..f0ba350579ae 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -559,6 +559,8 @@ struct mlx5e_icosq { + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5e_channel *channel; ++ ++ struct work_struct recover_work; + } ____cacheline_aligned_in_smp; + + struct mlx5e_wqe_frag_info { +@@ -1037,6 +1039,10 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, + void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); + void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); ++int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); ++void mlx5e_activate_rq(struct mlx5e_rq *rq); ++void mlx5e_deactivate_rq(struct mlx5e_rq *rq); ++void mlx5e_free_rx_descs(struct mlx5e_rq *rq); + void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); + void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index a751c5316baf..8acd9dc520cf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -18,6 +18,7 @@ int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); + + int mlx5e_reporter_rx_create(struct mlx5e_priv *priv); + void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); ++void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); + + #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +index 7cd767f0b8c7..661de567ca6c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +@@ -27,6 +27,110 @@ static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) + return err; + } + ++static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq) ++{ ++ unsigned long exp_time = jiffies + msecs_to_jiffies(2000); ++ ++ while (time_before(jiffies, exp_time)) { ++ if (icosq->cc == icosq->pc) ++ return 0; ++ ++ msleep(20); ++ } ++ ++ netdev_err(icosq->channel->netdev, ++ "Wait for ICOSQ 0x%x flush timeout (cc = 0x%x, pc = 0x%x)\n", ++ icosq->sqn, icosq->cc, icosq->pc); ++ ++ return -ETIMEDOUT; ++} ++ ++static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq) ++{ ++ WARN_ONCE(icosq->cc != icosq->pc, "ICOSQ 0x%x: cc (0x%x) != pc (0x%x)\n", ++ icosq->sqn, icosq->cc, icosq->pc); ++ icosq->cc = 0; ++ icosq->pc = 0; ++} ++ ++static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) ++{ ++ struct mlx5_core_dev *mdev; ++ struct mlx5e_icosq *icosq; ++ struct net_device *dev; ++ struct mlx5e_rq *rq; ++ u8 state; ++ int err; ++ ++ icosq = ctx; ++ rq = &icosq->channel->rq; ++ mdev = icosq->channel->mdev; ++ dev = icosq->channel->netdev; ++ err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state); ++ if (err) { ++ netdev_err(dev, "Failed to query ICOSQ 0x%x state. err = %d\n", ++ icosq->sqn, err); ++ goto out; ++ } ++ ++ if (state != MLX5_SQC_STATE_ERR) ++ goto out; ++ ++ mlx5e_deactivate_rq(rq); ++ err = mlx5e_wait_for_icosq_flush(icosq); ++ if (err) ++ goto out; ++ ++ mlx5e_deactivate_icosq(icosq); ++ ++ /* At this point, both the rq and the icosq are disabled */ ++ ++ err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn); ++ if (err) ++ goto out; ++ ++ mlx5e_reset_icosq_cc_pc(icosq); ++ mlx5e_free_rx_descs(rq); ++ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); ++ mlx5e_activate_icosq(icosq); ++ mlx5e_activate_rq(rq); ++ ++ rq->stats->recover++; ++ return 0; ++out: ++ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); ++ return err; ++} ++ ++void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq) ++{ ++ struct mlx5e_priv *priv = icosq->channel->priv; ++ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; ++ struct mlx5e_err_ctx err_ctx = {}; ++ ++ err_ctx.ctx = icosq; ++ err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover; ++ sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn); ++ ++ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); ++} ++ ++static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx) ++{ ++ return err_ctx->recover(err_ctx->ctx); ++} ++ ++static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter, ++ void *context, ++ struct netlink_ext_ack *extack) ++{ ++ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); ++ struct mlx5e_err_ctx *err_ctx = context; ++ ++ return err_ctx ? mlx5e_rx_reporter_recover_from_ctx(err_ctx) : ++ mlx5e_health_recover_channels(priv); ++} ++ + static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, + struct devlink_fmsg *fmsg) + { +@@ -168,9 +272,12 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, + + static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { + .name = "rx", ++ .recover = mlx5e_rx_reporter_recover, + .diagnose = mlx5e_rx_reporter_diagnose, + }; + ++#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500 ++ + int mlx5e_reporter_rx_create(struct mlx5e_priv *priv) + { + struct devlink *devlink = priv_to_devlink(priv->mdev); +@@ -178,7 +285,8 @@ int mlx5e_reporter_rx_create(struct mlx5e_priv *priv) + + reporter = devlink_health_reporter_create(devlink, + &mlx5_rx_reporter_ops, +- 0, false, priv); ++ MLX5E_REPORTER_RX_GRACEFUL_PERIOD, ++ true, priv); + if (IS_ERR(reporter)) { + netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n", + PTR_ERR(reporter)); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 7dde1be49f35..430fb04ea96f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -691,8 +691,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, + return err; + } + +-static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, +- int next_state) ++int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) + { + struct mlx5_core_dev *mdev = rq->mdev; + +@@ -803,7 +802,7 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) + return -ETIMEDOUT; + } + +-static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) ++void mlx5e_free_rx_descs(struct mlx5e_rq *rq) + { + __be16 wqe_ix_be; + u16 wqe_ix; +@@ -882,7 +881,7 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, + return err; + } + +-static void mlx5e_activate_rq(struct mlx5e_rq *rq) ++void mlx5e_activate_rq(struct mlx5e_rq *rq) + { + set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); + mlx5e_trigger_irq(&rq->channel->icosq); +@@ -897,6 +896,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) + void mlx5e_close_rq(struct mlx5e_rq *rq) + { + cancel_work_sync(&rq->dim.work); ++ cancel_work_sync(&rq->channel->icosq.recover_work); + mlx5e_destroy_rq(rq); + mlx5e_free_rx_descs(rq); + mlx5e_free_rq(rq); +@@ -1013,6 +1013,14 @@ static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) + return 0; + } + ++static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work) ++{ ++ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, ++ recover_work); ++ ++ mlx5e_reporter_icosq_cqe_err(sq); ++} ++ + static int mlx5e_alloc_icosq(struct mlx5e_channel *c, + struct mlx5e_sq_param *param, + struct mlx5e_icosq *sq) +@@ -1035,6 +1043,8 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, + if (err) + goto err_sq_wq_destroy; + ++ INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); ++ + return 0; + + err_sq_wq_destroy: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index a22b3a3db253..ce4d357188df 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -616,6 +616,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); ++ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) ++ queue_work(cq->channel->priv->wq, &sq->recover_work); + break; + } + do { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +index 3d993e2e7bea..79b3ec005f43 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +@@ -161,6 +161,7 @@ static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, +@@ -272,6 +273,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) + s->rx_cache_waive += rq_stats->cache_waive; + s->rx_congst_umr += rq_stats->congst_umr; + s->rx_arfs_err += rq_stats->arfs_err; ++ s->rx_recover += rq_stats->recover; + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_arm += ch_stats->arm; +@@ -1484,6 +1486,7 @@ static const struct counter_desc rq_stats_desc[] = { + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, ++ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, + }; + + static const struct counter_desc sq_stats_desc[] = { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +index a4a43613d026..ab1c3366ff7d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +@@ -167,6 +167,7 @@ struct mlx5e_sw_stats { + u64 rx_cache_waive; + u64 rx_congst_umr; + u64 rx_arfs_err; ++ u64 rx_recover; + u64 ch_events; + u64 ch_poll; + u64 ch_arm; +@@ -302,6 +303,7 @@ struct mlx5e_rq_stats { + u64 cache_waive; + u64 congst_umr; + u64 arfs_err; ++ u64 recover; + }; + + struct mlx5e_sq_stats { +-- +2.13.6 + diff --git a/SOURCES/0031-netdrv-net-mlx5e-Report-and-recover-from-rx-timeout.patch b/SOURCES/0031-netdrv-net-mlx5e-Report-and-recover-from-rx-timeout.patch new file mode 100644 index 0000000..20344d3 --- /dev/null +++ b/SOURCES/0031-netdrv-net-mlx5e-Report-and-recover-from-rx-timeout.patch @@ -0,0 +1,114 @@ +From ac9174fc02907c3b322b1cba4fe37b73ae29e71b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:55 -0400 +Subject: [PATCH 031/312] [netdrv] net/mlx5e: Report and recover from rx + timeout + +Message-id: <20200510145245.10054-33-ahleihel@redhat.com> +Patchwork-id: 306573 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 32/82] net/mlx5e: Report and recover from rx timeout +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 + +commit 32c57fb26863b48982e33aa95f3b5b23f24b1feb +Author: Aya Levin +Date: Tue Jun 25 21:42:27 2019 +0300 + + net/mlx5e: Report and recover from rx timeout + + Add support for report and recovery from rx timeout. On driver open we + post NOP work request on the rx channels to trigger napi in order to + fillup the rx rings. In case napi wasn't scheduled due to a lost + interrupt, perform EQ recovery. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/health.h | 1 + + .../ethernet/mellanox/mlx5/core/en/reporter_rx.c | 32 ++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 + + 3 files changed, 34 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index 8acd9dc520cf..b4a2d9be17d6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -19,6 +19,7 @@ int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); + int mlx5e_reporter_rx_create(struct mlx5e_priv *priv); + void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); + void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); ++void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); + + #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +index 661de567ca6c..4e933db759b2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +@@ -115,6 +115,38 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq) + mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); + } + ++static int mlx5e_rx_reporter_timeout_recover(void *ctx) ++{ ++ struct mlx5e_icosq *icosq; ++ struct mlx5_eq_comp *eq; ++ struct mlx5e_rq *rq; ++ int err; ++ ++ rq = ctx; ++ icosq = &rq->channel->icosq; ++ eq = rq->cq.mcq.eq; ++ err = mlx5e_health_channel_eq_recover(eq, rq->channel); ++ if (err) ++ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); ++ ++ return err; ++} ++ ++void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) ++{ ++ struct mlx5e_icosq *icosq = &rq->channel->icosq; ++ struct mlx5e_priv *priv = rq->channel->priv; ++ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; ++ struct mlx5e_err_ctx err_ctx = {}; ++ ++ err_ctx.ctx = rq; ++ err_ctx.recover = mlx5e_rx_reporter_timeout_recover; ++ sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n", ++ icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn); ++ ++ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); ++} ++ + static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx) + { + return err_ctx->recover(err_ctx->ctx); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 430fb04ea96f..c3eba55e8a21 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -799,6 +799,7 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) + netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", + c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); + ++ mlx5e_reporter_rx_timeout(rq); + return -ETIMEDOUT; + } + +-- +2.13.6 + diff --git a/SOURCES/0032-netdrv-net-mlx5e-RX-Handle-CQE-with-error-at-the-ear.patch b/SOURCES/0032-netdrv-net-mlx5e-RX-Handle-CQE-with-error-at-the-ear.patch new file mode 100644 index 0000000..219cb06 --- /dev/null +++ b/SOURCES/0032-netdrv-net-mlx5e-RX-Handle-CQE-with-error-at-the-ear.patch @@ -0,0 +1,181 @@ +From f0e7d22454ff73e1c1ecb37f5ff11a8a5eedbf74 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:56 -0400 +Subject: [PATCH 032/312] [netdrv] net/mlx5e: RX, Handle CQE with error at the + earliest stage + +Message-id: <20200510145245.10054-34-ahleihel@redhat.com> +Patchwork-id: 306574 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 33/82] net/mlx5e: RX, Handle CQE with error at the earliest stage +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 0a35ab3e138296cfe192628520e4d5f3ff23e730 +Author: Saeed Mahameed +Date: Fri Jun 14 15:21:15 2019 -0700 + + net/mlx5e: RX, Handle CQE with error at the earliest stage + + Just to be aligned with the MPWQE handlers, handle RX WQE with error + for legacy RQs in the top RX handlers, just before calling skb_from_cqe(). + + CQE error handling will now be called at the same stage regardless of + the RQ type or netdev mode NIC, Representor, IPoIB, etc .. + + This will be useful for down stream patch to improve error CQE + handling. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/health.h | 2 + + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 49 ++++++++++++---------- + 2 files changed, 30 insertions(+), 21 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index b4a2d9be17d6..52e9ca37cf46 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -6,6 +6,8 @@ + + #include "en.h" + ++#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) ++ + int mlx5e_reporter_tx_create(struct mlx5e_priv *priv); + void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); + void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index ce4d357188df..1c3da221ee69 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -49,6 +49,7 @@ + #include "lib/clock.h" + #include "en/xdp.h" + #include "en/xsk/rx.h" ++#include "en/health.h" + + static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) + { +@@ -1070,11 +1071,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + prefetchw(va); /* xdp_frame data area */ + prefetch(data); + +- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { +- rq->stats->wqe_err++; +- return NULL; +- } +- + rcu_read_lock(); + consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false); + rcu_read_unlock(); +@@ -1102,11 +1098,6 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + u16 byte_cnt = cqe_bcnt - headlen; + struct sk_buff *skb; + +- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { +- rq->stats->wqe_err++; +- return NULL; +- } +- + /* XDP is not supported in this configuration, as incoming packets + * might spread among multiple pages. + */ +@@ -1152,6 +1143,11 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ++ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { ++ rq->stats->wqe_err++; ++ goto free_wqe; ++ } ++ + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, + mlx5e_skb_from_cqe_linear, + mlx5e_skb_from_cqe_nonlinear, +@@ -1193,6 +1189,11 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ++ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { ++ rq->stats->wqe_err++; ++ goto free_wqe; ++ } ++ + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); + if (!skb) { + /* probably for XDP */ +@@ -1327,7 +1328,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + + wi->consumed_strides += cstrides; + +- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { ++ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + rq->stats->wqe_err++; + goto mpwrq_cqe_out; + } +@@ -1506,6 +1507,11 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ++ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { ++ rq->stats->wqe_err++; ++ goto wq_free_wqe; ++ } ++ + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, + mlx5e_skb_from_cqe_linear, + mlx5e_skb_from_cqe_nonlinear, +@@ -1541,26 +1547,27 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ++ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { ++ rq->stats->wqe_err++; ++ goto wq_free_wqe; ++ } ++ + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, + mlx5e_skb_from_cqe_linear, + mlx5e_skb_from_cqe_nonlinear, + rq, cqe, wi, cqe_bcnt); +- if (unlikely(!skb)) { +- /* a DROP, save the page-reuse checks */ +- mlx5e_free_rx_wqe(rq, wi, true); +- goto wq_cyc_pop; +- } ++ if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ ++ goto wq_free_wqe; ++ + skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); +- if (unlikely(!skb)) { +- mlx5e_free_rx_wqe(rq, wi, true); +- goto wq_cyc_pop; +- } ++ if (unlikely(!skb)) ++ goto wq_free_wqe; + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + napi_gro_receive(rq->cq.napi, skb); + ++wq_free_wqe: + mlx5e_free_rx_wqe(rq, wi, true); +-wq_cyc_pop: + mlx5_wq_cyc_pop(wq); + } + +-- +2.13.6 + diff --git a/SOURCES/0033-netdrv-net-mlx5e-Report-and-recover-from-CQE-with-er.patch b/SOURCES/0033-netdrv-net-mlx5e-Report-and-recover-from-CQE-with-er.patch new file mode 100644 index 0000000..78ff9fe --- /dev/null +++ b/SOURCES/0033-netdrv-net-mlx5e-Report-and-recover-from-CQE-with-er.patch @@ -0,0 +1,247 @@ +From e945df9ee0cc44e01807d66995d4aa0e458a52aa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:57 -0400 +Subject: [PATCH 033/312] [netdrv] net/mlx5e: Report and recover from CQE with + error on RQ + +Message-id: <20200510145245.10054-35-ahleihel@redhat.com> +Patchwork-id: 306575 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 34/82] net/mlx5e: Report and recover from CQE with error on RQ +Bugzilla: 1790198 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Bugzilla: http://bugzilla.redhat.com/1790198 +Upstream: v5.4-rc1 + +commit 8276ea1353a4968a212f04ddf16659223e5408d9 +Author: Aya Levin +Date: Wed Jun 26 23:21:40 2019 +0300 + + net/mlx5e: Report and recover from CQE with error on RQ + + Add support for report and recovery from error on completion on RQ by + setting the queue back to ready state. Handle only errors with a + syndrome indicating the RQ might enter error state and could be + recovered. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 + + .../net/ethernet/mellanox/mlx5/core/en/health.h | 9 +++ + .../ethernet/mellanox/mlx5/core/en/reporter_rx.c | 69 ++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 9 +++ + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 ++++ + 5 files changed, 101 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index f0ba350579ae..ada39a3f83a9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -308,6 +308,7 @@ struct mlx5e_dcbx_dp { + + enum { + MLX5E_RQ_STATE_ENABLED, ++ MLX5E_RQ_STATE_RECOVERING, + MLX5E_RQ_STATE_AM, + MLX5E_RQ_STATE_NO_CSUM_COMPLETE, + MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ +@@ -680,6 +681,8 @@ struct mlx5e_rq { + struct zero_copy_allocator zca; + struct xdp_umem *umem; + ++ struct work_struct recover_work; ++ + /* control */ + struct mlx5_wq_ctrl wq_ctrl; + __be32 mkey_be; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index 52e9ca37cf46..d3693fa547ac 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -8,6 +8,14 @@ + + #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) + ++static inline bool cqe_syndrome_needs_recover(u8 syndrome) ++{ ++ return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR || ++ syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || ++ syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || ++ syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR; ++} ++ + int mlx5e_reporter_tx_create(struct mlx5e_priv *priv); + void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); + void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); +@@ -21,6 +29,7 @@ int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); + int mlx5e_reporter_rx_create(struct mlx5e_priv *priv); + void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); + void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); ++void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); + void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); + + #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +index 4e933db759b2..6c72b592315b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +@@ -115,6 +115,75 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq) + mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); + } + ++static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) ++{ ++ struct net_device *dev = rq->netdev; ++ int err; ++ ++ err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST); ++ if (err) { ++ netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn); ++ return err; ++ } ++ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); ++ if (err) { ++ netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx) ++{ ++ struct mlx5_core_dev *mdev; ++ struct net_device *dev; ++ struct mlx5e_rq *rq; ++ u8 state; ++ int err; ++ ++ rq = ctx; ++ mdev = rq->mdev; ++ dev = rq->netdev; ++ err = mlx5e_query_rq_state(mdev, rq->rqn, &state); ++ if (err) { ++ netdev_err(dev, "Failed to query RQ 0x%x state. err = %d\n", ++ rq->rqn, err); ++ goto out; ++ } ++ ++ if (state != MLX5_RQC_STATE_ERR) ++ goto out; ++ ++ mlx5e_deactivate_rq(rq); ++ mlx5e_free_rx_descs(rq); ++ ++ err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR); ++ if (err) ++ goto out; ++ ++ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); ++ mlx5e_activate_rq(rq); ++ rq->stats->recover++; ++ return 0; ++out: ++ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); ++ return err; ++} ++ ++void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq) ++{ ++ struct mlx5e_priv *priv = rq->channel->priv; ++ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; ++ struct mlx5e_err_ctx err_ctx = {}; ++ ++ err_ctx.ctx = rq; ++ err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover; ++ sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn); ++ ++ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); ++} ++ + static int mlx5e_rx_reporter_timeout_recover(void *ctx) + { + struct mlx5e_icosq *icosq; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index c3eba55e8a21..13c1151bf60c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -353,6 +353,13 @@ static void mlx5e_free_di_list(struct mlx5e_rq *rq) + kvfree(rq->wqe.di); + } + ++static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work) ++{ ++ struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work); ++ ++ mlx5e_reporter_rq_cqe_err(rq); ++} ++ + static int mlx5e_alloc_rq(struct mlx5e_channel *c, + struct mlx5e_params *params, + struct mlx5e_xsk_param *xsk, +@@ -389,6 +396,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, + rq->stats = &c->priv->channel_stats[c->ix].xskrq; + else + rq->stats = &c->priv->channel_stats[c->ix].rq; ++ INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); + + rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; + if (IS_ERR(rq->xdp_prog)) { +@@ -898,6 +906,7 @@ void mlx5e_close_rq(struct mlx5e_rq *rq) + { + cancel_work_sync(&rq->dim.work); + cancel_work_sync(&rq->channel->icosq.recover_work); ++ cancel_work_sync(&rq->recover_work); + mlx5e_destroy_rq(rq); + mlx5e_free_rx_descs(rq); + mlx5e_free_rq(rq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 1c3da221ee69..64d6ecbece80 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1131,6 +1131,15 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + return skb; + } + ++static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ++{ ++ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; ++ ++ if (cqe_syndrome_needs_recover(err_cqe->syndrome) && ++ !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) ++ queue_work(rq->channel->priv->wq, &rq->recover_work); ++} ++ + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; +@@ -1144,6 +1153,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { ++ trigger_report(rq, cqe); + rq->stats->wqe_err++; + goto free_wqe; + } +@@ -1329,6 +1339,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + wi->consumed_strides += cstrides; + + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { ++ trigger_report(rq, cqe); + rq->stats->wqe_err++; + goto mpwrq_cqe_out; + } +-- +2.13.6 + diff --git a/SOURCES/0034-netdrv-net-mlx5-Improve-functions-documentation.patch b/SOURCES/0034-netdrv-net-mlx5-Improve-functions-documentation.patch new file mode 100644 index 0000000..2f45e71 --- /dev/null +++ b/SOURCES/0034-netdrv-net-mlx5-Improve-functions-documentation.patch @@ -0,0 +1,87 @@ +From 34060a4ab8c1af0bac3e6a229edce9e92ddeeb43 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:58 -0400 +Subject: [PATCH 034/312] [netdrv] net/mlx5: Improve functions documentation + +Message-id: <20200510145245.10054-36-ahleihel@redhat.com> +Patchwork-id: 306576 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 35/82] net/mlx5: Improve functions documentation +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 866ff8f22380a49d665ed72521704844bba6de08 +Author: Saeed Mahameed +Date: Thu Aug 15 19:46:09 2019 +0000 + + net/mlx5: Improve functions documentation + + Fix documentation of mlx5_eq_enable/disable to cleanup compiler warnings. + + drivers/net/ethernet/mellanox/mlx5/core//eq.c:334: + warning: Function parameter or member 'dev' not described in 'mlx5_eq_enable' + warning: Function parameter or member 'eq' not described in 'mlx5_eq_enable' + warning: Function parameter or member 'nb' not described in 'mlx5_eq_enable' + + drivers/net/ethernet/mellanox/mlx5/core//eq.c:355: + warning: Function parameter or member 'dev' not described in 'mlx5_eq_disable' + warning: Function parameter or member 'eq' not described in 'mlx5_eq_disable' + warning: Function parameter or member 'nb' not described in 'mlx5_eq_disable' + + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 22 +++++++++++++--------- + 1 file changed, 13 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +index 09d4c64b6e73..580c71cb9dfa 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +@@ -324,10 +324,13 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + + /** + * mlx5_eq_enable - Enable EQ for receiving EQEs +- * @dev - Device which owns the eq +- * @eq - EQ to enable +- * @nb - notifier call block +- * mlx5_eq_enable - must be called after EQ is created in device. ++ * @dev : Device which owns the eq ++ * @eq : EQ to enable ++ * @nb : Notifier call block ++ * ++ * Must be called after EQ is created in device. ++ * ++ * @return: 0 if no error + */ + int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct notifier_block *nb) +@@ -344,11 +347,12 @@ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + EXPORT_SYMBOL(mlx5_eq_enable); + + /** +- * mlx5_eq_disable - Enable EQ for receiving EQEs +- * @dev - Device which owns the eq +- * @eq - EQ to disable +- * @nb - notifier call block +- * mlx5_eq_disable - must be called before EQ is destroyed. ++ * mlx5_eq_disable - Disable EQ for receiving EQEs ++ * @dev : Device which owns the eq ++ * @eq : EQ to disable ++ * @nb : Notifier call block ++ * ++ * Must be called before EQ is destroyed. + */ + void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct notifier_block *nb) +-- +2.13.6 + diff --git a/SOURCES/0035-include-net-mlx5-Expose-IP-in-IP-capability-bit.patch b/SOURCES/0035-include-net-mlx5-Expose-IP-in-IP-capability-bit.patch new file mode 100644 index 0000000..54fc07b --- /dev/null +++ b/SOURCES/0035-include-net-mlx5-Expose-IP-in-IP-capability-bit.patch @@ -0,0 +1,56 @@ +From c3fc6a1251852a166487548deb89993b88d2ca87 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:51:59 -0400 +Subject: [PATCH 035/312] [include] net/mlx5: Expose IP-in-IP capability bit + +Message-id: <20200510145245.10054-37-ahleihel@redhat.com> +Patchwork-id: 306578 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 36/82] net/mlx5: Expose IP-in-IP capability bit +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit caa1854735449d7afac6781679621fb9142fe810 +Author: Aya Levin +Date: Thu Aug 15 19:46:14 2019 +0000 + + net/mlx5: Expose IP-in-IP capability bit + + Expose Fw indication that it supports Stateless Offloads for IP over IP + tunneled packets. The following offloads are supported for the inner + packets: RSS, RX & TX Checksum Offloads, LSO and Flow Steering. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 03cb1cf0e285..77c354384ce5 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -860,7 +860,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { + u8 swp_csum[0x1]; + u8 swp_lso[0x1]; + u8 cqe_checksum_full[0x1]; +- u8 reserved_at_24[0xc]; ++ u8 reserved_at_24[0x5]; ++ u8 tunnel_stateless_ip_over_ip[0x1]; ++ u8 reserved_at_2a[0x6]; + u8 max_vxlan_udp_ports[0x8]; + u8 reserved_at_38[0x6]; + u8 max_geneve_opt_len[0x1]; +-- +2.13.6 + diff --git a/SOURCES/0036-netdrv-net-mlx5-Add-per-namespace-flow-table-default.patch b/SOURCES/0036-netdrv-net-mlx5-Add-per-namespace-flow-table-default.patch new file mode 100644 index 0000000..54c5ae2 --- /dev/null +++ b/SOURCES/0036-netdrv-net-mlx5-Add-per-namespace-flow-table-default.patch @@ -0,0 +1,249 @@ +From a051b0b47d1666c94db81514fc8a5798ba552851 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:00 -0400 +Subject: [PATCH 036/312] [netdrv] net/mlx5: Add per-namespace flow table + default miss action support + +Message-id: <20200510145245.10054-38-ahleihel@redhat.com> +Patchwork-id: 306577 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 37/82] net/mlx5: Add per-namespace flow table default miss action support +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/fs_core.c + Context diff due to already backported commit: + 20f7b37ffc7d ("net/mlx5e: Introduce root ft concept for representors netdevs") + ---> We have FS_CHAINING_CAPS instead of empty element. + +commit f66ad830b11406cdff84e7d8656a0a9e34b0b606 +Author: Mark Zhang +Date: Mon Aug 19 14:36:24 2019 +0300 + + net/mlx5: Add per-namespace flow table default miss action support + + Currently all the namespaces under the same steering domain share the same + default table miss action, however in some situations (e.g., RDMA RX) + different actions are required. This patch adds a per-namespace default + table miss action instead of using the miss action of the steering domain. + + Signed-off-by: Mark Zhang + Reviewed-by: Mark Bloch + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 4 +- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 73 +++++++++++++---------- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 8 +++ + 3 files changed, 53 insertions(+), 32 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +index a848272a60a1..3c816e81f8d9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +@@ -226,7 +226,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, + } else { + MLX5_SET(create_flow_table_in, in, + flow_table_context.table_miss_action, +- ns->def_miss_action); ++ ft->def_miss_action); + } + break; + +@@ -306,7 +306,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, + } else { + MLX5_SET(modify_flow_table_in, in, + flow_table_context.table_miss_action, +- ns->def_miss_action); ++ ft->def_miss_action); + } + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 26d0333080e4..5ebd74d078f2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -60,7 +60,8 @@ + ADD_PRIO(num_prios_val, 0, num_levels_val, {},\ + __VA_ARGS__)\ + +-#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ ++#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \ ++ .def_miss_action = def_miss_act,\ + .children = (struct init_tree_node[]) {__VA_ARGS__},\ + .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ + } +@@ -131,33 +132,41 @@ static struct init_tree_node { + int num_leaf_prios; + int prio; + int num_levels; ++ enum mlx5_flow_table_miss_action def_miss_action; + } root_fs = { + .type = FS_TYPE_NAMESPACE, + .ar_size = 7, +- .children = (struct init_tree_node[]) { +- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, +- FS_CHAINING_CAPS, +- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, +- BY_PASS_PRIO_NUM_LEVELS))), +- ADD_PRIO(0, LAG_MIN_LEVEL, 0, +- FS_CHAINING_CAPS, +- ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, +- LAG_PRIO_NUM_LEVELS))), +- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS, +- ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), +- ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, +- FS_CHAINING_CAPS, +- ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, +- ETHTOOL_PRIO_NUM_LEVELS))), +- ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, +- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS), +- ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, +- KERNEL_NIC_PRIO_NUM_LEVELS))), +- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, +- FS_CHAINING_CAPS, +- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), +- ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, +- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), ++ .children = (struct init_tree_node[]){ ++ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, ++ BY_PASS_PRIO_NUM_LEVELS))), ++ ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, ++ LAG_PRIO_NUM_LEVELS))), ++ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, ++ OFFLOADS_MAX_FT))), ++ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, ++ ETHTOOL_PRIO_NUM_LEVELS))), ++ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, ++ KERNEL_NIC_TC_NUM_LEVELS), ++ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, ++ KERNEL_NIC_PRIO_NUM_LEVELS))), ++ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, ++ LEFTOVERS_NUM_LEVELS))), ++ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ++ ANCHOR_NUM_LEVELS))), + } + }; + +@@ -167,7 +176,8 @@ static struct init_tree_node egress_root_fs = { + .children = (struct init_tree_node[]) { + ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, + FS_CHAINING_CAPS_EGRESS, +- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + } + }; +@@ -1014,6 +1024,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa + tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); + log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; + next_ft = find_next_chained_ft(fs_prio); ++ ft->def_miss_action = ns->def_miss_action; + err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); + if (err) + goto free_ft; +@@ -2159,7 +2170,8 @@ static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace + return ns; + } + +-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) ++static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio, ++ int def_miss_act) + { + struct mlx5_flow_namespace *ns; + +@@ -2168,6 +2180,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) + return ERR_PTR(-ENOMEM); + + fs_init_namespace(ns); ++ ns->def_miss_action = def_miss_act; + tree_init_node(&ns->node, NULL, del_sw_ns); + tree_add_node(&ns->node, &prio->node); + list_add_tail(&ns->node.list, &prio->node.children); +@@ -2234,7 +2247,7 @@ static int init_root_tree_recursive(struct mlx5_flow_steering *steering, + base = &fs_prio->node; + } else if (init_node->type == FS_TYPE_NAMESPACE) { + fs_get_obj(fs_prio, fs_parent_node); +- fs_ns = fs_create_namespace(fs_prio); ++ fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action); + if (IS_ERR(fs_ns)) + return PTR_ERR(fs_ns); + base = &fs_ns->node; +@@ -2504,7 +2517,7 @@ static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) + if (!steering->rdma_rx_root_ns) + return -ENOMEM; + +- steering->rdma_rx_root_ns->def_miss_action = ++ steering->rdma_rx_root_ns->ns.def_miss_action = + MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN; + + /* Create single prio */ +@@ -2547,7 +2560,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + } + + for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) { +- ns = fs_create_namespace(maj_prio); ++ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); + if (IS_ERR(ns)) { + err = PTR_ERR(ns); + goto out_err; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +index 51e1bdb49ff8..c6221ccbdddf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +@@ -171,6 +171,9 @@ struct mlx5_flow_table { + struct list_head fwd_rules; + u32 flags; + struct rhltable fgs_hash; ++#ifndef __GENKSYMS__ ++ enum mlx5_flow_table_miss_action def_miss_action; ++#endif + }; + + struct mlx5_ft_underlay_qp { +@@ -218,6 +221,9 @@ struct fs_prio { + struct mlx5_flow_namespace { + /* parent == NULL => root ns */ + struct fs_node node; ++#ifndef __GENKSYMS__ ++ enum mlx5_flow_table_miss_action def_miss_action; ++#endif + }; + + struct mlx5_flow_group_mask { +@@ -249,7 +255,9 @@ struct mlx5_flow_root_namespace { + struct mutex chain_lock; + struct list_head underlay_qpns; + const struct mlx5_flow_cmds *cmds; ++#ifdef __GENKSYMS__ + enum mlx5_flow_table_miss_action def_miss_action; ++#endif + }; + + int mlx5_init_fc_stats(struct mlx5_core_dev *dev); +-- +2.13.6 + diff --git a/SOURCES/0037-netdrv-net-mlx5-Create-bypass-and-loopback-flow-stee.patch b/SOURCES/0037-netdrv-net-mlx5-Create-bypass-and-loopback-flow-stee.patch new file mode 100644 index 0000000..29029a4 --- /dev/null +++ b/SOURCES/0037-netdrv-net-mlx5-Create-bypass-and-loopback-flow-stee.patch @@ -0,0 +1,161 @@ +From d5e6f312b0c92828a91b795274a9fece4b45f953 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:01 -0400 +Subject: [PATCH 037/312] [netdrv] net/mlx5: Create bypass and loopback flow + steering namespaces for RDMA RX + +Message-id: <20200510145245.10054-39-ahleihel@redhat.com> +Patchwork-id: 306579 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 38/82] net/mlx5: Create bypass and loopback flow steering namespaces for RDMA RX +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit e6806e9a63a759e445383915bb9d2ec85a90aebf +Author: Mark Zhang +Date: Mon Aug 19 14:36:25 2019 +0300 + + net/mlx5: Create bypass and loopback flow steering namespaces for RDMA RX + + Use different namespaces for bypass and switchdev loopback because they + have different priorities and default table miss action requirement: + 1. bypass: with multiple priorities support, and + MLX5_FLOW_TABLE_MISS_ACTION_DEF as the default table miss action; + 2. switchdev loopback: with single priority support, and + MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN as the default table miss + action. + + Signed-off-by: Mark Zhang + Reviewed-by: Mark Bloch + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 49 ++++++++++++++++++----- + drivers/net/ethernet/mellanox/mlx5/core/rdma.c | 2 +- + include/linux/mlx5/fs.h | 3 ++ + 3 files changed, 43 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 5ebd74d078f2..495396f42153 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -182,6 +182,26 @@ static struct init_tree_node egress_root_fs = { + } + }; + ++#define RDMA_RX_BYPASS_PRIO 0 ++#define RDMA_RX_KERNEL_PRIO 1 ++static struct init_tree_node rdma_rx_root_fs = { ++ .type = FS_TYPE_NAMESPACE, ++ .ar_size = 2, ++ .children = (struct init_tree_node[]) { ++ [RDMA_RX_BYPASS_PRIO] = ++ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, ++ FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, ++ BY_PASS_PRIO_NUM_LEVELS))), ++ [RDMA_RX_KERNEL_PRIO] = ++ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, ++ FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, ++ ADD_MULTIPLE_PRIO(1, 1))), ++ } ++}; ++ + enum fs_i_lock_class { + FS_LOCK_GRANDPARENT, + FS_LOCK_PARENT, +@@ -2071,16 +2091,18 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + if (steering->sniffer_tx_root_ns) + return &steering->sniffer_tx_root_ns->ns; + return NULL; +- case MLX5_FLOW_NAMESPACE_RDMA_RX: +- if (steering->rdma_rx_root_ns) +- return &steering->rdma_rx_root_ns->ns; +- return NULL; + default: + break; + } + + if (type == MLX5_FLOW_NAMESPACE_EGRESS) { + root_ns = steering->egress_root_ns; ++ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) { ++ root_ns = steering->rdma_rx_root_ns; ++ prio = RDMA_RX_BYPASS_PRIO; ++ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) { ++ root_ns = steering->rdma_rx_root_ns; ++ prio = RDMA_RX_KERNEL_PRIO; + } else { /* Must be NIC RX */ + root_ns = steering->root_ns; + prio = type; +@@ -2511,18 +2533,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) + + static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) + { +- struct fs_prio *prio; ++ int err; + + steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX); + if (!steering->rdma_rx_root_ns) + return -ENOMEM; + +- steering->rdma_rx_root_ns->ns.def_miss_action = +- MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN; ++ err = init_root_tree(steering, &rdma_rx_root_fs, ++ &steering->rdma_rx_root_ns->ns.node); ++ if (err) ++ goto out_err; + +- /* Create single prio */ +- prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1); +- return PTR_ERR_OR_ZERO(prio); ++ set_prio_attrs(steering->rdma_rx_root_ns); ++ ++ return 0; ++ ++out_err: ++ cleanup_root_ns(steering->rdma_rx_root_ns); ++ steering->rdma_rx_root_ns = NULL; ++ return err; + } + static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +index c43f7dc43cea..0fc7de4aa572 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +@@ -48,7 +48,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) + return -ENOMEM; + } + +- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX); ++ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL); + if (!ns) { + mlx5_core_err(dev, "Failed to get RDMA RX namespace"); + err = -EOPNOTSUPP; +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index a008e9b63b78..948cba3389ff 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -75,6 +75,9 @@ enum mlx5_flow_namespace_type { + MLX5_FLOW_NAMESPACE_SNIFFER_TX, + MLX5_FLOW_NAMESPACE_EGRESS, + MLX5_FLOW_NAMESPACE_RDMA_RX, ++#ifndef __GENKSYMS__ ++ MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, ++#endif + }; + + enum { +-- +2.13.6 + diff --git a/SOURCES/0038-netdrv-net-mlx5e-Add-tc-flower-tracepoints.patch b/SOURCES/0038-netdrv-net-mlx5e-Add-tc-flower-tracepoints.patch new file mode 100644 index 0000000..02d0f2f --- /dev/null +++ b/SOURCES/0038-netdrv-net-mlx5e-Add-tc-flower-tracepoints.patch @@ -0,0 +1,262 @@ +From c4bef68d1ee7d83b186a264f290c8fdbf47abdae Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:04 -0400 +Subject: [PATCH 038/312] [netdrv] net/mlx5e: Add tc flower tracepoints + +Message-id: <20200510145245.10054-42-ahleihel@redhat.com> +Patchwork-id: 306582 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 41/82] net/mlx5e: Add tc flower tracepoints +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 +Conflicts: + - Documentation/networking/device_drivers/mellanox/mlx5.rst + Drop changes to doc file that doesn't exist in RHEL-8 tree. + +commit 7a978759b4e0e7a2ad3f10cbf9077915a85ec956 +Author: Dmytro Linkin +Date: Thu Jun 27 10:55:02 2019 +0000 + + net/mlx5e: Add tc flower tracepoints + + Implemented following tracepoints: + 1. Configure flower (mlx5e_configure_flower) + 2. Delete flower (mlx5e_delete_flower) + 3. Stats flower (mlx5e_stats_flower) + + Usage example: + ># cd /sys/kernel/debug/tracing + ># echo mlx5:mlx5e_configure_flower >> set_event + ># cat trace + ... + tc-6535 [019] ...1 2672.404466: mlx5e_configure_flower: cookie=0000000067874a55 actions= REDIRECT + + Added corresponding documentation in + Documentation/networking/device-driver/mellanox/mlx5.rst + + Signed-off-by: Dmytro Linkin + Reviewed-by: Vlad Buslov + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- + .../mellanox/mlx5/core/diag/en_tc_tracepoint.c | 58 +++++++++++++++ + .../mellanox/mlx5/core/diag/en_tc_tracepoint.h | 83 ++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++ + 4 files changed, 146 insertions(+), 1 deletion(-) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index bd2074d5eb87..3ac94d97cc24 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o + mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o + mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \ + lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ +- en/tc_tun_geneve.o ++ en/tc_tun_geneve.o diag/en_tc_tracepoint.o + + # + # Core extra +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c +new file mode 100644 +index 000000000000..c5dc6c50fa87 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c +@@ -0,0 +1,58 @@ ++// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB ++/* Copyright (c) 2019 Mellanox Technologies. */ ++ ++#define CREATE_TRACE_POINTS ++#include "en_tc_tracepoint.h" ++ ++void put_ids_to_array(int *ids, ++ const struct flow_action_entry *entries, ++ unsigned int num) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < num; i++) ++ ids[i] = entries[i].id; ++} ++ ++#define NAME_SIZE 16 ++ ++static const char FLOWACT2STR[NUM_FLOW_ACTIONS][NAME_SIZE] = { ++ [FLOW_ACTION_ACCEPT] = "ACCEPT", ++ [FLOW_ACTION_DROP] = "DROP", ++ [FLOW_ACTION_TRAP] = "TRAP", ++ [FLOW_ACTION_GOTO] = "GOTO", ++ [FLOW_ACTION_REDIRECT] = "REDIRECT", ++ [FLOW_ACTION_MIRRED] = "MIRRED", ++ [FLOW_ACTION_VLAN_PUSH] = "VLAN_PUSH", ++ [FLOW_ACTION_VLAN_POP] = "VLAN_POP", ++ [FLOW_ACTION_VLAN_MANGLE] = "VLAN_MANGLE", ++ [FLOW_ACTION_TUNNEL_ENCAP] = "TUNNEL_ENCAP", ++ [FLOW_ACTION_TUNNEL_DECAP] = "TUNNEL_DECAP", ++ [FLOW_ACTION_MANGLE] = "MANGLE", ++ [FLOW_ACTION_ADD] = "ADD", ++ [FLOW_ACTION_CSUM] = "CSUM", ++ [FLOW_ACTION_MARK] = "MARK", ++ [FLOW_ACTION_WAKE] = "WAKE", ++ [FLOW_ACTION_QUEUE] = "QUEUE", ++ [FLOW_ACTION_SAMPLE] = "SAMPLE", ++ [FLOW_ACTION_POLICE] = "POLICE", ++ [FLOW_ACTION_CT] = "CT", ++}; ++ ++const char *parse_action(struct trace_seq *p, ++ int *ids, ++ unsigned int num) ++{ ++ const char *ret = trace_seq_buffer_ptr(p); ++ unsigned int i; ++ ++ for (i = 0; i < num; i++) { ++ if (ids[i] < NUM_FLOW_ACTIONS) ++ trace_seq_printf(p, "%s ", FLOWACT2STR[ids[i]]); ++ else ++ trace_seq_printf(p, "UNKNOWN "); ++ } ++ ++ trace_seq_putc(p, 0); ++ return ret; ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h +new file mode 100644 +index 000000000000..a362100fe6d3 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h +@@ -0,0 +1,83 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ ++/* Copyright (c) 2019 Mellanox Technologies. */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM mlx5 ++ ++#if !defined(_MLX5_TC_TP_) || defined(TRACE_HEADER_MULTI_READ) ++#define _MLX5_TC_TP_ ++ ++#include ++#include ++#include ++ ++#define __parse_action(ids, num) parse_action(p, ids, num) ++ ++void put_ids_to_array(int *ids, ++ const struct flow_action_entry *entries, ++ unsigned int num); ++ ++const char *parse_action(struct trace_seq *p, ++ int *ids, ++ unsigned int num); ++ ++DECLARE_EVENT_CLASS(mlx5e_flower_template, ++ TP_PROTO(const struct flow_cls_offload *f), ++ TP_ARGS(f), ++ TP_STRUCT__entry(__field(void *, cookie) ++ __field(unsigned int, num) ++ __dynamic_array(int, ids, f->rule ? ++ f->rule->action.num_entries : 0) ++ ), ++ TP_fast_assign(__entry->cookie = (void *)f->cookie; ++ __entry->num = (f->rule ? ++ f->rule->action.num_entries : 0); ++ if (__entry->num) ++ put_ids_to_array(__get_dynamic_array(ids), ++ f->rule->action.entries, ++ f->rule->action.num_entries); ++ ), ++ TP_printk("cookie=%p actions= %s\n", ++ __entry->cookie, __entry->num ? ++ __parse_action(__get_dynamic_array(ids), ++ __entry->num) : "NULL" ++ ) ++); ++ ++DEFINE_EVENT(mlx5e_flower_template, mlx5e_configure_flower, ++ TP_PROTO(const struct flow_cls_offload *f), ++ TP_ARGS(f) ++ ); ++ ++DEFINE_EVENT(mlx5e_flower_template, mlx5e_delete_flower, ++ TP_PROTO(const struct flow_cls_offload *f), ++ TP_ARGS(f) ++ ); ++ ++TRACE_EVENT(mlx5e_stats_flower, ++ TP_PROTO(const struct flow_cls_offload *f), ++ TP_ARGS(f), ++ TP_STRUCT__entry(__field(void *, cookie) ++ __field(u64, bytes) ++ __field(u64, packets) ++ __field(u64, lastused) ++ ), ++ TP_fast_assign(__entry->cookie = (void *)f->cookie; ++ __entry->bytes = f->stats.bytes; ++ __entry->packets = f->stats.pkts; ++ __entry->lastused = f->stats.lastused; ++ ), ++ TP_printk("cookie=%p bytes=%llu packets=%llu lastused=%llu\n", ++ __entry->cookie, __entry->bytes, ++ __entry->packets, __entry->lastused ++ ) ++); ++ ++#endif /* _MLX5_TC_TP_ */ ++ ++/* This part must be outside protection */ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH ./diag ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE en_tc_tracepoint ++#include +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index d7d2151d1ef3..8d0cf434d16c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -56,6 +56,7 @@ + #include "en/tc_tun.h" + #include "lib/devcom.h" + #include "lib/geneve.h" ++#include "diag/en_tc_tracepoint.h" + + struct mlx5_nic_flow_attr { + u32 action; +@@ -3810,6 +3811,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, + goto out; + } + ++ trace_mlx5e_configure_flower(f); + err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); + if (err) + goto out; +@@ -3859,6 +3861,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, + rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); + rcu_read_unlock(); + ++ trace_mlx5e_delete_flower(f); + mlx5e_flow_put(priv, flow); + + return 0; +@@ -3928,6 +3931,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + out: + flow_stats_update(&f->stats, bytes, packets, lastuse); ++ trace_mlx5e_stats_flower(f); + errout: + mlx5e_flow_put(priv, flow); + return err; +-- +2.13.6 + diff --git a/SOURCES/0039-netdrv-net-mlx5e-Add-trace-point-for-neigh-used-valu.patch b/SOURCES/0039-netdrv-net-mlx5e-Add-trace-point-for-neigh-used-valu.patch new file mode 100644 index 0000000..25cc786 --- /dev/null +++ b/SOURCES/0039-netdrv-net-mlx5e-Add-trace-point-for-neigh-used-valu.patch @@ -0,0 +1,118 @@ +From e2600e33bb83fcfb5ee3505f069d5c469e1633ef Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:05 -0400 +Subject: [PATCH 039/312] [netdrv] net/mlx5e: Add trace point for neigh used + value update + +Message-id: <20200510145245.10054-43-ahleihel@redhat.com> +Patchwork-id: 306583 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 42/82] net/mlx5e: Add trace point for neigh used value update +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 +Conflicts: + - Documentation/networking/device_drivers/mellanox/mlx5.rst + Drop changes to doc file that doesn't exist. + +commit c786fe596bede275f887f212eebee74490043b84 +Author: Vlad Buslov +Date: Tue Jun 25 22:33:15 2019 +0300 + + net/mlx5e: Add trace point for neigh used value update + + Allow tracing result of neigh used value update task that is executed + periodically on workqueue. + + Usage example: + ># cd /sys/kernel/debug/tracing + ># echo mlx5:mlx5e_tc_update_neigh_used_value >> set_event + ># cat trace + ... + kworker/u48:4-8806 [009] ...1 55117.882428: mlx5e_tc_update_neigh_used_value: + netdev: ens1f0 IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_used=1 + + Added corresponding documentation in + Documentation/networking/device-driver/mellanox/mlx5.rst + + Signed-off-by: Vlad Buslov + Reviewed-by: Dmytro Linkin + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../mellanox/mlx5/core/diag/en_tc_tracepoint.h | 31 ++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 ++ + 2 files changed, 33 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h +index a362100fe6d3..d4e6cfaaade3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include "en_rep.h" + + #define __parse_action(ids, num) parse_action(p, ids, num) + +@@ -73,6 +74,36 @@ TRACE_EVENT(mlx5e_stats_flower, + ) + ); + ++TRACE_EVENT(mlx5e_tc_update_neigh_used_value, ++ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used), ++ TP_ARGS(nhe, neigh_used), ++ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name) ++ __array(u8, v4, 4) ++ __array(u8, v6, 16) ++ __field(bool, neigh_used) ++ ), ++ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh; ++ struct in6_addr *pin6; ++ __be32 *p32; ++ ++ __assign_str(devname, mn->dev->name); ++ __entry->neigh_used = neigh_used; ++ ++ p32 = (__be32 *)__entry->v4; ++ pin6 = (struct in6_addr *)__entry->v6; ++ if (mn->family == AF_INET) { ++ *p32 = mn->dst_ip.v4; ++ ipv6_addr_set_v4mapped(*p32, pin6); ++ } else if (mn->family == AF_INET6) { ++ *pin6 = mn->dst_ip.v6; ++ } ++ ), ++ TP_printk("netdev: %s IPv4: %pI4 IPv6: %pI6c neigh_used=%d\n", ++ __get_str(devname), __entry->v4, __entry->v6, ++ __entry->neigh_used ++ ) ++); ++ + #endif /* _MLX5_TC_TP_ */ + + /* This part must be outside protection */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 8d0cf434d16c..31d71e1f0545 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1536,6 +1536,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) + } + } + ++ trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used); ++ + if (neigh_used) { + nhe->reported_lastuse = jiffies; + +-- +2.13.6 + diff --git a/SOURCES/0040-netdrv-net-mlx5e-Add-trace-point-for-neigh-update.patch b/SOURCES/0040-netdrv-net-mlx5e-Add-trace-point-for-neigh-update.patch new file mode 100644 index 0000000..b05da2d --- /dev/null +++ b/SOURCES/0040-netdrv-net-mlx5e-Add-trace-point-for-neigh-update.patch @@ -0,0 +1,138 @@ +From 94744255e69bab4bcd94627d5255f75bc71f09e0 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:06 -0400 +Subject: [PATCH 040/312] [netdrv] net/mlx5e: Add trace point for neigh update + +Message-id: <20200510145245.10054-44-ahleihel@redhat.com> +Patchwork-id: 306584 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 43/82] net/mlx5e: Add trace point for neigh update +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 +Conflicts: + - Documentation/networking/device_drivers/mellanox/mlx5.rst + Drop changes to doc file that doesn't exist. + +commit 5970882a2510e8bffaef518a82ea207798187a93 +Author: Vlad Buslov +Date: Tue Jun 25 22:40:20 2019 +0300 + + net/mlx5e: Add trace point for neigh update + + Allow tracing neigh state during neigh update task that is executed on + workqueue and is scheduled by neigh state change event. + + Usage example: + ># cd /sys/kernel/debug/tracing + ># echo mlx5:mlx5e_rep_neigh_update >> set_event + ># cat trace + ... + kworker/u48:7-2221 [009] ...1 1475.387435: mlx5e_rep_neigh_update: + netdev: ens1f0 MAC: 24:8a:07:9a:17:9a IPv4: 1.1.1.10 IPv6: ::ffff:1.1.1.10 neigh_connected=1 + + Added corresponding documentation in + Documentation/networking/device-driver/mellanox/mlx5.rst + + Signed-off-by: Vlad Buslov + Reviewed-by: Dmytro Linkin + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../mellanox/mlx5/core/diag/en_rep_tracepoint.h | 54 ++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 4 ++ + 2 files changed, 58 insertions(+) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h +new file mode 100644 +index 000000000000..1177860a2ee4 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h +@@ -0,0 +1,54 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ ++/* Copyright (c) 2019 Mellanox Technologies. */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM mlx5 ++ ++#if !defined(_MLX5_EN_REP_TP_) || defined(TRACE_HEADER_MULTI_READ) ++#define _MLX5_EN_REP_TP_ ++ ++#include ++#include ++#include "en_rep.h" ++ ++TRACE_EVENT(mlx5e_rep_neigh_update, ++ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha, ++ bool neigh_connected), ++ TP_ARGS(nhe, ha, neigh_connected), ++ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name) ++ __array(u8, ha, ETH_ALEN) ++ __array(u8, v4, 4) ++ __array(u8, v6, 16) ++ __field(bool, neigh_connected) ++ ), ++ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh; ++ struct in6_addr *pin6; ++ __be32 *p32; ++ ++ __assign_str(devname, mn->dev->name); ++ __entry->neigh_connected = neigh_connected; ++ memcpy(__entry->ha, ha, ETH_ALEN); ++ ++ p32 = (__be32 *)__entry->v4; ++ pin6 = (struct in6_addr *)__entry->v6; ++ if (mn->family == AF_INET) { ++ *p32 = mn->dst_ip.v4; ++ ipv6_addr_set_v4mapped(*p32, pin6); ++ } else if (mn->family == AF_INET6) { ++ *pin6 = mn->dst_ip.v6; ++ } ++ ), ++ TP_printk("netdev: %s MAC: %pM IPv4: %pI4 IPv6: %pI6c neigh_connected=%d\n", ++ __get_str(devname), __entry->ha, ++ __entry->v4, __entry->v6, __entry->neigh_connected ++ ) ++); ++ ++#endif /* _MLX5_EN_REP_TP_ */ ++ ++/* This part must be outside protection */ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH ./diag ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE en_rep_tracepoint ++#include +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 66c8c2ace4b9..037983a8f149 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -46,6 +46,8 @@ + #include "en/tc_tun.h" + #include "fs_core.h" + #include "lib/port_tun.h" ++#define CREATE_TRACE_POINTS ++#include "diag/en_rep_tracepoint.h" + + #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ + max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) +@@ -633,6 +635,8 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) + + neigh_connected = (nud_state & NUD_VALID) && !dead; + ++ trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected); ++ + list_for_each_entry(e, &nhe->encap_list, encap_list) { + if (!mlx5e_encap_take(e)) + continue; +-- +2.13.6 + diff --git a/SOURCES/0041-netdrv-net-mlx5-Add-wrappers-for-HyperV-PCIe-operati.patch b/SOURCES/0041-netdrv-net-mlx5-Add-wrappers-for-HyperV-PCIe-operati.patch new file mode 100644 index 0000000..635faa2 --- /dev/null +++ b/SOURCES/0041-netdrv-net-mlx5-Add-wrappers-for-HyperV-PCIe-operati.patch @@ -0,0 +1,159 @@ +From c77866236e272b0520fba28dd04977c85c672167 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:07 -0400 +Subject: [PATCH 041/312] [netdrv] net/mlx5: Add wrappers for HyperV PCIe + operations + +Message-id: <20200510145245.10054-45-ahleihel@redhat.com> +Patchwork-id: 306586 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 44/82] net/mlx5: Add wrappers for HyperV PCIe operations +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 913d14e866573350de3adede3c90cefb81944b0c +Author: Eran Ben Elisha +Date: Thu Aug 22 05:05:47 2019 +0000 + + net/mlx5: Add wrappers for HyperV PCIe operations + + Add wrapper functions for HyperV PCIe read / write / + block_invalidate_register operations. This will be used as an + infrastructure in the downstream patch for software communication. + + This will be enabled by default if CONFIG_PCI_HYPERV_INTERFACE is set. + + Signed-off-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + Signed-off-by: Haiyang Zhang + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 1 + + drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c | 64 ++++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h | 22 ++++++++ + 3 files changed, 87 insertions(+) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index 3ac94d97cc24..d14a13557c0c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -45,6 +45,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo + mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o + mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o + mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o ++mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o + + # + # Ipoib netdev +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c +new file mode 100644 +index 000000000000..cf08d02703fb +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c +@@ -0,0 +1,64 @@ ++// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB ++// Copyright (c) 2018 Mellanox Technologies ++ ++#include ++#include "mlx5_core.h" ++#include "lib/hv.h" ++ ++static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len, ++ int offset, bool read) ++{ ++ int rc = -EOPNOTSUPP; ++ int bytes_returned; ++ int block_id; ++ ++ if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len % HV_CONFIG_BLOCK_SIZE_MAX) ++ return -EINVAL; ++ ++ block_id = offset / HV_CONFIG_BLOCK_SIZE_MAX; ++ ++ rc = read ? ++ hyperv_read_cfg_blk(dev->pdev, buf, ++ HV_CONFIG_BLOCK_SIZE_MAX, block_id, ++ &bytes_returned) : ++ hyperv_write_cfg_blk(dev->pdev, buf, ++ HV_CONFIG_BLOCK_SIZE_MAX, block_id); ++ ++ /* Make sure len bytes were read successfully */ ++ if (read) ++ rc |= !(len == bytes_returned); ++ ++ if (rc) { ++ mlx5_core_err(dev, "Failed to %s hv config, err = %d, len = %d, offset = %d\n", ++ read ? "read" : "write", rc, len, ++ offset); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len, ++ int offset) ++{ ++ return mlx5_hv_config_common(dev, buf, len, offset, true); ++} ++ ++int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len, ++ int offset) ++{ ++ return mlx5_hv_config_common(dev, buf, len, offset, false); ++} ++ ++int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context, ++ void (*block_invalidate)(void *context, ++ u64 block_mask)) ++{ ++ return hyperv_reg_block_invalidate(dev->pdev, context, ++ block_invalidate); ++} ++ ++void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev) ++{ ++ hyperv_reg_block_invalidate(dev->pdev, NULL, NULL); ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h +new file mode 100644 +index 000000000000..f9a45573f459 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h +@@ -0,0 +1,22 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ ++/* Copyright (c) 2019 Mellanox Technologies. */ ++ ++#ifndef __LIB_HV_H__ ++#define __LIB_HV_H__ ++ ++#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) ++ ++#include ++#include ++ ++int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len, ++ int offset); ++int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len, ++ int offset); ++int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context, ++ void (*block_invalidate)(void *context, ++ u64 block_mask)); ++void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev); ++#endif ++ ++#endif /* __LIB_HV_H__ */ +-- +2.13.6 + diff --git a/SOURCES/0042-netdrv-net-mlx5-Fix-return-code-in-case-of-hyperv-wr.patch b/SOURCES/0042-netdrv-net-mlx5-Fix-return-code-in-case-of-hyperv-wr.patch new file mode 100644 index 0000000..65d0629 --- /dev/null +++ b/SOURCES/0042-netdrv-net-mlx5-Fix-return-code-in-case-of-hyperv-wr.patch @@ -0,0 +1,70 @@ +From e64a826c128582b7af72680bd51b27f44803c829 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:11 -0400 +Subject: [PATCH 042/312] [netdrv] net/mlx5: Fix return code in case of hyperv + wrong size read + +Message-id: <20200510145245.10054-49-ahleihel@redhat.com> +Patchwork-id: 306590 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 48/82] net/mlx5: Fix return code in case of hyperv wrong size read +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 87cade2997c9210cfeb625957e44b865a89d0c13 +Author: Eran Ben Elisha +Date: Fri Aug 23 15:34:47 2019 +0300 + + net/mlx5: Fix return code in case of hyperv wrong size read + + Return code value could be non deterministic in case of wrong size read. + With this patch, if such error occurs, set rc to be -EIO. + + In addition, mlx5_hv_config_common() supports reading of + HV_CONFIG_BLOCK_SIZE_MAX bytes only, fix to early return error with + bad input. + + Fixes: 913d14e86657 ("net/mlx5: Add wrappers for HyperV PCIe operations") + Reported-by: Leon Romanovsky + Signed-off-by: Eran Ben Elisha + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c +index cf08d02703fb..583dc7e2aca8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c +@@ -12,7 +12,7 @@ static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len, + int bytes_returned; + int block_id; + +- if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len % HV_CONFIG_BLOCK_SIZE_MAX) ++ if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len != HV_CONFIG_BLOCK_SIZE_MAX) + return -EINVAL; + + block_id = offset / HV_CONFIG_BLOCK_SIZE_MAX; +@@ -25,8 +25,8 @@ static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len, + HV_CONFIG_BLOCK_SIZE_MAX, block_id); + + /* Make sure len bytes were read successfully */ +- if (read) +- rc |= !(len == bytes_returned); ++ if (read && !rc && len != bytes_returned) ++ rc = -EIO; + + if (rc) { + mlx5_core_err(dev, "Failed to %s hv config, err = %d, len = %d, offset = %d\n", +-- +2.13.6 + diff --git a/SOURCES/0043-netdrv-net-mlx5-Set-ODP-capabilities-for-DC-transpor.patch b/SOURCES/0043-netdrv-net-mlx5-Set-ODP-capabilities-for-DC-transpor.patch new file mode 100644 index 0000000..3548fff --- /dev/null +++ b/SOURCES/0043-netdrv-net-mlx5-Set-ODP-capabilities-for-DC-transpor.patch @@ -0,0 +1,73 @@ +From d8bf00ef12e6537c0c1c10982ce05a681526a0f5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:13 -0400 +Subject: [PATCH 043/312] [netdrv] net/mlx5: Set ODP capabilities for DC + transport to max + +Message-id: <20200510145245.10054-51-ahleihel@redhat.com> +Patchwork-id: 306592 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 50/82] net/mlx5: Set ODP capabilities for DC transport to max +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 00679b631eddaa0aa0ceba719fcb1f60c65da5a3 +Author: Michael Guralnik +Date: Mon Aug 19 15:08:13 2019 +0300 + + net/mlx5: Set ODP capabilities for DC transport to max + + In mlx5_core initialization, query max ODP capabilities for DC transport + from FW and set as current capabilities. + + Signed-off-by: Michael Guralnik + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 ++++++ + include/linux/mlx5/mlx5_ifc.h | 4 +++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 568d973725b6..490bd80c586a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -495,6 +495,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev) + ODP_CAP_SET_MAX(dev, xrc_odp_caps.write); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.read); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic); ++ ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive); ++ ODP_CAP_SET_MAX(dev, dc_odp_caps.send); ++ ODP_CAP_SET_MAX(dev, dc_odp_caps.receive); ++ ODP_CAP_SET_MAX(dev, dc_odp_caps.write); ++ ODP_CAP_SET_MAX(dev, dc_odp_caps.read); ++ ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic); + + if (do_set) + err = set_caps(dev, set_ctx, set_sz, +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 77c354384ce5..caa0bcd9dd0f 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1019,7 +1019,9 @@ struct mlx5_ifc_odp_cap_bits { + + struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; + +- u8 reserved_at_100[0x700]; ++ struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; ++ ++ u8 reserved_at_120[0x6E0]; + }; + + struct mlx5_ifc_calc_op { +-- +2.13.6 + diff --git a/SOURCES/0044-netdrv-net-mlx5e-Change-function-s-position-to-a-mor.patch b/SOURCES/0044-netdrv-net-mlx5e-Change-function-s-position-to-a-mor.patch new file mode 100644 index 0000000..d9830d3 --- /dev/null +++ b/SOURCES/0044-netdrv-net-mlx5e-Change-function-s-position-to-a-mor.patch @@ -0,0 +1,91 @@ +From d4eb0855820857638058bada0a1189f24b06010b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:16 -0400 +Subject: [PATCH 044/312] [netdrv] net/mlx5e: Change function's position to a + more fitting file + +Message-id: <20200510145245.10054-54-ahleihel@redhat.com> +Patchwork-id: 306594 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 53/82] net/mlx5e: Change function's position to a more fitting file +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit a49e1f31ae155d64355d0cd0e0afa5b2bc8544cd +Author: Aya Levin +Date: Thu Aug 8 16:16:28 2019 +0300 + + net/mlx5e: Change function's position to a more fitting file + + Move function which indicates whether tunnel inner flow table is + supported from en.h to en_fs.c. It fits better right after tunnel + protocol rules definitions. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 ------ + drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 2 ++ + drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 6 ++++++ + 3 files changed, 8 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index ada39a3f83a9..35cf78134737 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1054,12 +1054,6 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, + void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); + void mlx5e_tx_disable_queue(struct netdev_queue *txq); + +-static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) +-{ +- return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && +- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); +-} +- + static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) + { + return MLX5_CAP_ETH(mdev, swp) && +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +index ca2161b42c7f..5acd982ff228 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +@@ -98,6 +98,8 @@ enum mlx5e_tunnel_types { + MLX5E_NUM_TUNNEL_TT, + }; + ++bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); ++ + /* L3/L4 traffic type classifier */ + struct mlx5e_ttc_table { + struct mlx5e_flow_table ft; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +index 76cc10e44080..a8340e4fb0b9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +@@ -749,6 +749,12 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { + }, + }; + ++bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) ++{ ++ return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && ++ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); ++} ++ + static u8 mlx5e_etype_to_ipv(u16 ethertype) + { + if (ethertype == ETH_P_IP) +-- +2.13.6 + diff --git a/SOURCES/0045-netdrv-net-mlx5e-Support-RSS-for-IP-in-IP-and-IPv6-t.patch b/SOURCES/0045-netdrv-net-mlx5e-Support-RSS-for-IP-in-IP-and-IPv6-t.patch new file mode 100644 index 0000000..4edc020 --- /dev/null +++ b/SOURCES/0045-netdrv-net-mlx5e-Support-RSS-for-IP-in-IP-and-IPv6-t.patch @@ -0,0 +1,135 @@ +From 7afc70d063523d563f11360b6e8174d809efd3fc Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:17 -0400 +Subject: [PATCH 045/312] [netdrv] net/mlx5e: Support RSS for IP-in-IP and IPv6 + tunneled packets + +Message-id: <20200510145245.10054-55-ahleihel@redhat.com> +Patchwork-id: 306595 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 54/82] net/mlx5e: Support RSS for IP-in-IP and IPv6 tunneled packets +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit a795d8db2a6d3c6f80e7002dd6357e6736dad1b6 +Author: Aya Levin +Date: Mon Apr 29 17:45:52 2019 +0300 + + net/mlx5e: Support RSS for IP-in-IP and IPv6 tunneled packets + + Add support for inner header RSS on IP-in-IP and IPv6 tunneled packets. + + Add rules to the steering table regarding outer IP header, with + IPv4/6->IP-in-IP. Tunneled packets with protocol numbers: 0x4 (IP-in-IP) + and 0x29 (IPv6) are RSS-ed on the inner IP header. + Separate FW dependencies between flow table inner IP capabilities and + GRE offload support. Allowing this feature even if GRE offload is not + supported. Tested with multi stream TCP traffic tunneled with IPnIP. + Verified that: + Without this patch, only a single RX ring was processing the traffic. + With this patch, multiple RX rings were processing the traffic. + Verified with and without GRE offload support. + + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 4 +++ + drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 46 ++++++++++++++++++++++++- + 2 files changed, 49 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +index 5acd982ff228..5aae3a7a5497 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +@@ -95,6 +95,10 @@ struct mlx5e_tirc_config { + enum mlx5e_tunnel_types { + MLX5E_TT_IPV4_GRE, + MLX5E_TT_IPV6_GRE, ++ MLX5E_TT_IPV4_IPIP, ++ MLX5E_TT_IPV6_IPIP, ++ MLX5E_TT_IPV4_IPV6, ++ MLX5E_TT_IPV6_IPV6, + MLX5E_NUM_TUNNEL_TT, + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +index a8340e4fb0b9..b99b17957543 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +@@ -747,11 +747,52 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_GRE, + }, ++ [MLX5E_TT_IPV4_IPIP] = { ++ .etype = ETH_P_IP, ++ .proto = IPPROTO_IPIP, ++ }, ++ [MLX5E_TT_IPV6_IPIP] = { ++ .etype = ETH_P_IPV6, ++ .proto = IPPROTO_IPIP, ++ }, ++ [MLX5E_TT_IPV4_IPV6] = { ++ .etype = ETH_P_IP, ++ .proto = IPPROTO_IPV6, ++ }, ++ [MLX5E_TT_IPV6_IPV6] = { ++ .etype = ETH_P_IPV6, ++ .proto = IPPROTO_IPV6, ++ }, ++ + }; + ++static bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) ++{ ++ switch (proto_type) { ++ case IPPROTO_GRE: ++ return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); ++ case IPPROTO_IPIP: ++ case IPPROTO_IPV6: ++ return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip); ++ default: ++ return false; ++ } ++} ++ ++static bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) ++{ ++ int tt; ++ ++ for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { ++ if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto)) ++ return true; ++ } ++ return false; ++} ++ + bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) + { +- return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && ++ return (mlx5e_any_tunnel_proto_supported(mdev) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); + } + +@@ -844,6 +885,9 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = params->inner_ttc->ft.t; + for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { ++ if (!mlx5e_tunnel_proto_supported(priv->mdev, ++ ttc_tunnel_rules[tt].proto)) ++ continue; + rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, + ttc_tunnel_rules[tt].etype, + ttc_tunnel_rules[tt].proto); +-- +2.13.6 + diff --git a/SOURCES/0046-netdrv-net-mlx5e-Improve-stateless-offload-capabilit.patch b/SOURCES/0046-netdrv-net-mlx5e-Improve-stateless-offload-capabilit.patch new file mode 100644 index 0000000..0849547 --- /dev/null +++ b/SOURCES/0046-netdrv-net-mlx5e-Improve-stateless-offload-capabilit.patch @@ -0,0 +1,100 @@ +From c83f03ec3f1e21f96aadd5a4a0eb912541c08bb5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:18 -0400 +Subject: [PATCH 046/312] [netdrv] net/mlx5e: Improve stateless offload + capability check + +Message-id: <20200510145245.10054-56-ahleihel@redhat.com> +Patchwork-id: 306596 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 55/82] net/mlx5e: Improve stateless offload capability check +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit e3a53bc536fc279de2ace13b8d6d54b071afb722 +Author: Marina Varshaver +Date: Tue Aug 20 03:36:29 2019 +0300 + + net/mlx5e: Improve stateless offload capability check + + Use generic function for checking tunnel stateless offload capability + instead of separate macros. + + Signed-off-by: Marina Varshaver + Reviewed-by: Aya Levin + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 3 +++ + drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- + 3 files changed, 7 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +index 5aae3a7a5497..68d593074f6c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +@@ -238,5 +238,8 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); + int mlx5e_create_flow_steering(struct mlx5e_priv *priv); + void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); + ++bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type); ++bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev); ++ + #endif /* __MLX5E_FLOW_STEER_H__ */ + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +index b99b17957543..15b7f0f1427c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +@@ -766,7 +766,7 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { + + }; + +-static bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) ++bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) + { + switch (proto_type) { + case IPPROTO_GRE: +@@ -779,7 +779,7 @@ static bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_ty + } + } + +-static bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) ++bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) + { + int tt; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 13c1151bf60c..afe24002987d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4851,7 +4851,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || +- MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { ++ mlx5e_any_tunnel_proto_supported(mdev)) { + netdev->hw_enc_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_TSO; + netdev->hw_enc_features |= NETIF_F_TSO6; +@@ -4868,7 +4868,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) + NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + +- if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { ++ if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { + netdev->hw_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_GRE | +-- +2.13.6 + diff --git a/SOURCES/0047-netdrv-net-mlx5e-Support-TSO-and-TX-checksum-offload.patch b/SOURCES/0047-netdrv-net-mlx5e-Support-TSO-and-TX-checksum-offload.patch new file mode 100644 index 0000000..0f72ac6 --- /dev/null +++ b/SOURCES/0047-netdrv-net-mlx5e-Support-TSO-and-TX-checksum-offload.patch @@ -0,0 +1,72 @@ +From 4570a8510cd01423160448fa0d0362c1b605d07f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:19 -0400 +Subject: [PATCH 047/312] [netdrv] net/mlx5e: Support TSO and TX checksum + offloads for IP-in-IP tunnels + +Message-id: <20200510145245.10054-57-ahleihel@redhat.com> +Patchwork-id: 306597 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 56/82] net/mlx5e: Support TSO and TX checksum offloads for IP-in-IP tunnels +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 25948b87dda284664edeb3b3dab689df0a7dc889 +Author: Marina Varshaver +Date: Tue Aug 20 04:59:11 2019 +0300 + + net/mlx5e: Support TSO and TX checksum offloads for IP-in-IP + tunnels + + Add TX offloads support for IP-in-IP tunneled packets by reporting + the needed netdev features. + + Signed-off-by: Marina Varshaver + Signed-off-by: Avihu Hagag + Reviewed-by: Aya Levin + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index afe24002987d..7d9a526c6017 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4225,6 +4225,8 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, + + switch (proto) { + case IPPROTO_GRE: ++ case IPPROTO_IPIP: ++ case IPPROTO_IPV6: + return features; + case IPPROTO_UDP: + udph = udp_hdr(skb); +@@ -4877,6 +4879,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) + NETIF_F_GSO_GRE_CSUM; + } + ++ if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) { ++ netdev->hw_features |= NETIF_F_GSO_IPXIP4 | ++ NETIF_F_GSO_IPXIP6; ++ netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | ++ NETIF_F_GSO_IPXIP6; ++ netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 | ++ NETIF_F_GSO_IPXIP6; ++ } ++ + netdev->hw_features |= NETIF_F_GSO_PARTIAL; + netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; + netdev->hw_features |= NETIF_F_GSO_UDP_L4; +-- +2.13.6 + diff --git a/SOURCES/0048-netdrv-net-mlx5e-Remove-unlikely-from-WARN-condition.patch b/SOURCES/0048-netdrv-net-mlx5e-Remove-unlikely-from-WARN-condition.patch new file mode 100644 index 0000000..db73bd2 --- /dev/null +++ b/SOURCES/0048-netdrv-net-mlx5e-Remove-unlikely-from-WARN-condition.patch @@ -0,0 +1,60 @@ +From 3315feb7c1bc069a18103195cb16ba3d37f78adf Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:20 -0400 +Subject: [PATCH 048/312] [netdrv] net/mlx5e: Remove unlikely() from WARN*() + condition + +Message-id: <20200510145245.10054-58-ahleihel@redhat.com> +Patchwork-id: 306598 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 57/82] net/mlx5e: Remove unlikely() from WARN*() condition +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 7cf92ccb85554c9550bc0a8e892f68f92985024c +Author: Denis Efremov +Date: Thu Aug 29 19:50:17 2019 +0300 + + net/mlx5e: Remove unlikely() from WARN*() condition + + "unlikely(WARN_ON_ONCE(x))" is excessive. WARN_ON_ONCE() already uses + unlikely() internally. + + Signed-off-by: Denis Efremov + Cc: Boris Pismenny + Cc: Saeed Mahameed + Cc: Leon Romanovsky + Cc: Joe Perches + Cc: Andrew Morton + Cc: netdev@vger.kernel.org + Acked-by: Saeed Mahameed + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 7833ddef0427..e5222d17df35 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -408,7 +408,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, + goto out; + + tls_ctx = tls_get_ctx(skb->sk); +- if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev))) ++ if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) + goto err_out; + + priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); +-- +2.13.6 + diff --git a/SOURCES/0049-netdrv-net-mlx5-Kconfig-Fix-MLX5_CORE-dependency-wit.patch b/SOURCES/0049-netdrv-net-mlx5-Kconfig-Fix-MLX5_CORE-dependency-wit.patch new file mode 100644 index 0000000..4fd58de --- /dev/null +++ b/SOURCES/0049-netdrv-net-mlx5-Kconfig-Fix-MLX5_CORE-dependency-wit.patch @@ -0,0 +1,58 @@ +From 91eda209ba094c859befbe379805eac57bddd123 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:22 -0400 +Subject: [PATCH 049/312] [netdrv] net/mlx5: Kconfig: Fix MLX5_CORE dependency + with PCI_HYPERV_INTERFACE + +Message-id: <20200510145245.10054-60-ahleihel@redhat.com> +Patchwork-id: 306600 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 59/82] net/mlx5: Kconfig: Fix MLX5_CORE dependency with PCI_HYPERV_INTERFACE +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 4057a7652b74af25ba1197689fc144cdb766f423 +Author: Mao Wenan +Date: Tue Aug 27 11:12:51 2019 +0800 + + net/mlx5: Kconfig: Fix MLX5_CORE dependency with PCI_HYPERV_INTERFACE + + When MLX5_CORE=y and PCI_HYPERV_INTERFACE=m, below errors are found: + drivers/net/ethernet/mellanox/mlx5/core/en_main.o: In function `mlx5e_nic_enable': + en_main.c:(.text+0xb649): undefined reference to `mlx5e_hv_vhca_stats_create' + drivers/net/ethernet/mellanox/mlx5/core/en_main.o: In function `mlx5e_nic_disable': + en_main.c:(.text+0xb8c4): undefined reference to `mlx5e_hv_vhca_stats_destroy' + + Fix this by making MLX5_CORE imply PCI_HYPERV_INTERFACE. + + Fixes: cef35af34d6d ("net/mlx5e: Add mlx5e HV VHCA stats agent") + Signed-off-by: Mao Wenan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +index 92a561176705..ae7c28ba9f5a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +@@ -9,6 +9,7 @@ config MLX5_CORE + imply PTP_1588_CLOCK + imply VXLAN + imply MLXFW ++ imply PCI_HYPERV_INTERFACE + default n + ---help--- + Core driver for low level functionality of the ConnectX-4 and +-- +2.13.6 + diff --git a/SOURCES/0050-netdrv-net-mlx5e-Use-ipv6_stub-to-avoid-dependency-w.patch b/SOURCES/0050-netdrv-net-mlx5e-Use-ipv6_stub-to-avoid-dependency-w.patch new file mode 100644 index 0000000..5d9cdd9 --- /dev/null +++ b/SOURCES/0050-netdrv-net-mlx5e-Use-ipv6_stub-to-avoid-dependency-w.patch @@ -0,0 +1,126 @@ +From 157c8134fb32202e02e283e8c9be3fcaee9d2f66 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:23 -0400 +Subject: [PATCH 050/312] [netdrv] net/mlx5e: Use ipv6_stub to avoid dependency + with ipv6 being a module + +Message-id: <20200510145245.10054-61-ahleihel@redhat.com> +Patchwork-id: 306601 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 60/82] net/mlx5e: Use ipv6_stub to avoid dependency with ipv6 being a module +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 5cc3a8c66dd5ab18bacef5dd54ccdbae5182e003 +Author: Saeed Mahameed +Date: Tue Aug 27 14:06:23 2019 -0700 + + net/mlx5e: Use ipv6_stub to avoid dependency with ipv6 being a module + + mlx5 is dependent on IPv6 tristate since we use ipv6's nd_tbl directly, + alternatively we can use ipv6_stub->nd_tbl and remove the dependency. + + Reported-by: Walter Harms + Reviewed-by: Mark Bloch + Reviewed-by: Vlad Buslov + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 1 - + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 23 +++++++++++++---------- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- + 3 files changed, 14 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +index ae7c28ba9f5a..361c783ec9b5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +@@ -32,7 +32,6 @@ config MLX5_FPGA + config MLX5_CORE_EN + bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support" + depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE +- depends on IPV6=y || IPV6=n || MLX5_CORE=m + select PAGE_POOL + select DIMLIB + default n +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 037983a8f149..2681bd39eab2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + + #include "eswitch.h" + #include "en.h" +@@ -475,16 +476,18 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) + mlx5e_sqs2vport_stop(esw, rep); + } + ++static unsigned long mlx5e_rep_ipv6_interval(void) ++{ ++ if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl) ++ return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME); ++ ++ return ~0UL; ++} ++ + static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) + { +-#if IS_ENABLED(CONFIG_IPV6) +- unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms, +- DELAY_PROBE_TIME); +-#else +- unsigned long ipv6_interval = ~0UL; +-#endif +- unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, +- DELAY_PROBE_TIME); ++ unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); ++ unsigned long ipv6_interval = mlx5e_rep_ipv6_interval(); + struct net_device *netdev = rpriv->netdev; + struct mlx5e_priv *priv = netdev_priv(netdev); + +@@ -893,7 +896,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, + case NETEVENT_NEIGH_UPDATE: + n = ptr; + #if IS_ENABLED(CONFIG_IPV6) +- if (n->tbl != &nd_tbl && n->tbl != &arp_tbl) ++ if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl) + #else + if (n->tbl != &arp_tbl) + #endif +@@ -920,7 +923,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, + * done per device delay prob time parameter. + */ + #if IS_ENABLED(CONFIG_IPV6) +- if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl)) ++ if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl)) + #else + if (!p->dev || p->tbl != &arp_tbl) + #endif +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 31d71e1f0545..9a49ae5ac4ce 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1494,7 +1494,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) + tbl = &arp_tbl; + #if IS_ENABLED(CONFIG_IPV6) + else if (m_neigh->family == AF_INET6) +- tbl = &nd_tbl; ++ tbl = ipv6_stub->nd_tbl; + #endif + else + return; +-- +2.13.6 + diff --git a/SOURCES/0051-netdrv-net-mlx5-Use-PTR_ERR_OR_ZERO-rather-than-its-.patch b/SOURCES/0051-netdrv-net-mlx5-Use-PTR_ERR_OR_ZERO-rather-than-its-.patch new file mode 100644 index 0000000..22a6880 --- /dev/null +++ b/SOURCES/0051-netdrv-net-mlx5-Use-PTR_ERR_OR_ZERO-rather-than-its-.patch @@ -0,0 +1,57 @@ +From b2d6822ecd353c4d82679d6eee081130b40eac66 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:24 -0400 +Subject: [PATCH 051/312] [netdrv] net/mlx5: Use PTR_ERR_OR_ZERO rather than + its implementation + +Message-id: <20200510145245.10054-62-ahleihel@redhat.com> +Patchwork-id: 306602 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 61/82] net/mlx5: Use PTR_ERR_OR_ZERO rather than its implementation +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit a2b7189be6b5dd697c333beb91f988dfc3ca87fb +Author: zhong jiang +Date: Tue Sep 3 14:56:10 2019 +0800 + + net/mlx5: Use PTR_ERR_OR_ZERO rather than its implementation + + PTR_ERR_OR_ZERO contains if(IS_ERR(...)) + PTR_ERR. It is better + to use it directly. hence just replace it. + + Signed-off-by: zhong jiang + Acked-by: Saeed Mahameed + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 9a49ae5ac4ce..ac372993c9d8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -988,10 +988,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, + &flow_act, dest, dest_ix); + mutex_unlock(&priv->fs.tc.t_lock); + +- if (IS_ERR(flow->rule[0])) +- return PTR_ERR(flow->rule[0]); +- +- return 0; ++ return PTR_ERR_OR_ZERO(flow->rule[0]); + } + + static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, +-- +2.13.6 + diff --git a/SOURCES/0052-netdrv-net-mlx5e-kTLS-Remove-unused-function-paramet.patch b/SOURCES/0052-netdrv-net-mlx5e-kTLS-Remove-unused-function-paramet.patch new file mode 100644 index 0000000..da3e76d --- /dev/null +++ b/SOURCES/0052-netdrv-net-mlx5e-kTLS-Remove-unused-function-paramet.patch @@ -0,0 +1,65 @@ +From 877b42f26b6e9ec1f6377f186b0312d34bcd6aac Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:25 -0400 +Subject: [PATCH 052/312] [netdrv] net/mlx5e: kTLS, Remove unused function + parameter + +Message-id: <20200510145245.10054-63-ahleihel@redhat.com> +Patchwork-id: 306603 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 62/82] net/mlx5e: kTLS, Remove unused function parameter +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit fa9e01c89539ec1f4efde0adc1a69a527f5ecb1e +Author: Tariq Toukan +Date: Mon Sep 2 12:04:35 2019 +0300 + + net/mlx5e: kTLS, Remove unused function parameter + + SKB parameter is no longer used in tx_post_resync_dump(), + remove it. + + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index e5222d17df35..d195366461c9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -256,8 +256,7 @@ struct mlx5e_dump_wqe { + }; + + static int +-tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, +- skb_frag_t *frag, u32 tisn, bool first) ++tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first) + { + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_wqe_data_seg *dseg; +@@ -371,8 +370,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + tx_post_resync_params(sq, priv_tx, info.rcd_sn); + + for (i = 0; i < info.nr_frags; i++) +- if (tx_post_resync_dump(sq, skb, info.frags[i], +- priv_tx->tisn, !i)) ++ if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i)) + goto err_out; + + /* If no dump WQE was sent, we need to have a fence NOP WQE before the +-- +2.13.6 + diff --git a/SOURCES/0053-netdrv-net-mlx5-DR-Remove-useless-set-memory-to-zero.patch b/SOURCES/0053-netdrv-net-mlx5-DR-Remove-useless-set-memory-to-zero.patch new file mode 100644 index 0000000..6959214 --- /dev/null +++ b/SOURCES/0053-netdrv-net-mlx5-DR-Remove-useless-set-memory-to-zero.patch @@ -0,0 +1,52 @@ +From ec079f9d2196ec46943d99aa88a0af28e02724aa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:26 -0400 +Subject: [PATCH 053/312] [netdrv] net/mlx5: DR, Remove useless set memory to + zero use memset() + +Message-id: <20200510145245.10054-64-ahleihel@redhat.com> +Patchwork-id: 306604 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 63/82] net/mlx5: DR, Remove useless set memory to zero use memset() +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit f6a8cddfb50a5d530400f10c435f420b15962800 +Author: Wei Yongjun +Date: Thu Sep 5 09:53:26 2019 +0000 + + net/mlx5: DR, Remove useless set memory to zero use memset() + + The memory return by kzalloc() has already be set to zero, so + remove useless memset(0). + + Signed-off-by: Wei Yongjun + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +index e6c6bf4a9578..c7f10d4f8f8d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +@@ -902,7 +902,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) + goto clean_qp; + } + +- memset(dmn->send_ring->buf, 0, size); + dmn->send_ring->buf_size = size; + + dmn->send_ring->mr = dr_reg_mr(dmn->mdev, +-- +2.13.6 + diff --git a/SOURCES/0054-netdrv-net-mlx5-DR-Remove-redundant-dev_name-print-f.patch b/SOURCES/0054-netdrv-net-mlx5-DR-Remove-redundant-dev_name-print-f.patch new file mode 100644 index 0000000..3e306a3 --- /dev/null +++ b/SOURCES/0054-netdrv-net-mlx5-DR-Remove-redundant-dev_name-print-f.patch @@ -0,0 +1,89 @@ +From 162279e737c8768b4fc24255dd3786b7012d0945 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:27 -0400 +Subject: [PATCH 054/312] [netdrv] net/mlx5: DR, Remove redundant dev_name + print from err log + +Message-id: <20200510145245.10054-65-ahleihel@redhat.com> +Patchwork-id: 306605 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 64/82] net/mlx5: DR, Remove redundant dev_name print from err log +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 + +commit 63d67f3059291e24bd7a2fa3f5eb7395442e8f90 +Author: Saeed Mahameed +Date: Thu Sep 5 12:34:36 2019 -0700 + + net/mlx5: DR, Remove redundant dev_name print from err log + + mlx5_core_err already prints the name of the device. + + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/steering/dr_domain.c | 15 +++++---------- + 1 file changed, 5 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +index 791c3674aed1..a9da961d4d2f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +@@ -72,24 +72,21 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn) + + dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE); + if (!dmn->ste_icm_pool) { +- mlx5dr_err(dmn, "Couldn't get icm memory for %s\n", +- dev_name(dmn->mdev->device)); ++ mlx5dr_err(dmn, "Couldn't get icm memory\n"); + ret = -ENOMEM; + goto clean_uar; + } + + dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION); + if (!dmn->action_icm_pool) { +- mlx5dr_err(dmn, "Couldn't get action icm memory for %s\n", +- dev_name(dmn->mdev->device)); ++ mlx5dr_err(dmn, "Couldn't get action icm memory\n"); + ret = -ENOMEM; + goto free_ste_icm_pool; + } + + ret = mlx5dr_send_ring_alloc(dmn); + if (ret) { +- mlx5dr_err(dmn, "Couldn't create send-ring for %s\n", +- dev_name(dmn->mdev->device)); ++ mlx5dr_err(dmn, "Couldn't create send-ring\n"); + goto free_action_icm_pool; + } + +@@ -312,16 +309,14 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) + dmn->info.caps.log_icm_size); + + if (!dmn->info.supp_sw_steering) { +- mlx5dr_err(dmn, "SW steering not supported for %s\n", +- dev_name(mdev->device)); ++ mlx5dr_err(dmn, "SW steering is not supported\n"); + goto uninit_caps; + } + + /* Allocate resources */ + ret = dr_domain_init_resources(dmn); + if (ret) { +- mlx5dr_err(dmn, "Failed init domain resources for %s\n", +- dev_name(mdev->device)); ++ mlx5dr_err(dmn, "Failed init domain resources\n"); + goto uninit_caps; + } + +-- +2.13.6 + diff --git a/SOURCES/0055-netdrv-drivers-net-Fix-Kconfig-indentation.patch b/SOURCES/0055-netdrv-drivers-net-Fix-Kconfig-indentation.patch new file mode 100644 index 0000000..803edb0 --- /dev/null +++ b/SOURCES/0055-netdrv-drivers-net-Fix-Kconfig-indentation.patch @@ -0,0 +1,108 @@ +From 04f6b2f616074ee8524c017915640770c17e365a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:28 -0400 +Subject: [PATCH 055/312] [netdrv] drivers: net: Fix Kconfig indentation + +Message-id: <20200510145245.10054-66-ahleihel@redhat.com> +Patchwork-id: 306606 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 65/82] drivers: net: Fix Kconfig indentation +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc1 +Conflicts: + - Take mlx5 changes only. + +commit 02bc5eb990597796d8e8383d1b98e540af963bf1 +Author: Krzysztof Kozlowski +Date: Mon Sep 23 17:52:43 2019 +0200 + + drivers: net: Fix Kconfig indentation + + Adjust indentation from spaces to tab (+optional two spaces) as in + coding style with command like: + $ sed -e 's/^ /\t/' -i */Kconfig + + Signed-off-by: Krzysztof Kozlowski + Acked-by: Kalle Valo + Reviewed-by: Leon Romanovsky + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 36 ++++++++++++------------- + 1 file changed, 18 insertions(+), 18 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +index 361c783ec9b5..6919161c8f9b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +@@ -19,15 +19,15 @@ config MLX5_ACCEL + bool + + config MLX5_FPGA +- bool "Mellanox Technologies Innova support" +- depends on MLX5_CORE ++ bool "Mellanox Technologies Innova support" ++ depends on MLX5_CORE + select MLX5_ACCEL +- ---help--- +- Build support for the Innova family of network cards by Mellanox +- Technologies. Innova network cards are comprised of a ConnectX chip +- and an FPGA chip on one board. If you select this option, the +- mlx5_core driver will include the Innova FPGA core and allow building +- sandbox-specific client drivers. ++ ---help--- ++ Build support for the Innova family of network cards by Mellanox ++ Technologies. Innova network cards are comprised of a ConnectX chip ++ and an FPGA chip on one board. If you select this option, the ++ mlx5_core driver will include the Innova FPGA core and allow building ++ sandbox-specific client drivers. + + config MLX5_CORE_EN + bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support" +@@ -57,14 +57,14 @@ config MLX5_EN_RXNFC + API. + + config MLX5_MPFS +- bool "Mellanox Technologies MLX5 MPFS support" +- depends on MLX5_CORE_EN ++ bool "Mellanox Technologies MLX5 MPFS support" ++ depends on MLX5_CORE_EN + default y +- ---help--- ++ ---help--- + Mellanox Technologies Ethernet Multi-Physical Function Switch (MPFS) +- support in ConnectX NIC. MPFs is required for when multi-PF configuration +- is enabled to allow passing user configured unicast MAC addresses to the +- requesting PF. ++ support in ConnectX NIC. MPFs is required for when multi-PF configuration ++ is enabled to allow passing user configured unicast MAC addresses to the ++ requesting PF. + + config MLX5_ESWITCH + bool "Mellanox Technologies MLX5 SRIOV E-Switch support" +@@ -72,10 +72,10 @@ config MLX5_ESWITCH + default y + ---help--- + Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. +- E-Switch provides internal SRIOV packet steering and switching for the +- enabled VFs and PF in two available modes: +- Legacy SRIOV mode (L2 mac vlan steering based). +- Switchdev mode (eswitch offloads). ++ E-Switch provides internal SRIOV packet steering and switching for the ++ enabled VFs and PF in two available modes: ++ Legacy SRIOV mode (L2 mac vlan steering based). ++ Switchdev mode (eswitch offloads). + + config MLX5_CORE_EN_DCB + bool "Data Center Bridging (DCB) Support" +-- +2.13.6 + diff --git a/SOURCES/0056-netdrv-net-mlx5e-kTLS-Release-reference-on-DUMPed-fr.patch b/SOURCES/0056-netdrv-net-mlx5e-kTLS-Release-reference-on-DUMPed-fr.patch new file mode 100644 index 0000000..83bfd05 --- /dev/null +++ b/SOURCES/0056-netdrv-net-mlx5e-kTLS-Release-reference-on-DUMPed-fr.patch @@ -0,0 +1,156 @@ +From 0472c2b0a8bf58396dc7434fd8d96ce8f765f845 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:31 -0400 +Subject: [PATCH 056/312] [netdrv] net/mlx5e: kTLS, Release reference on DUMPed + fragments in shutdown flow + +Message-id: <20200510145245.10054-69-ahleihel@redhat.com> +Patchwork-id: 306611 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 68/82] net/mlx5e: kTLS, Release reference on DUMPed fragments in shutdown flow +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit 2c559361389b452ca23494080d0c65ab812706c1 +Author: Tariq Toukan +Date: Wed Sep 18 13:45:38 2019 +0300 + + net/mlx5e: kTLS, Release reference on DUMPed fragments in shutdown flow + + A call to kTLS completion handler was missing in the TXQSQ release + flow. Add it. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/en_accel/ktls.h | 7 +++++- + .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 11 +++++++-- + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 28 ++++++++++++---------- + 3 files changed, 30 insertions(+), 16 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +index b7298f9ee3d3..c4c128908b6e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +@@ -86,7 +86,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, + struct mlx5e_tx_wqe **wqe, u16 *pi); + void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe_info *wi, +- struct mlx5e_sq_dma *dma); ++ u32 *dma_fifo_cc); + + #else + +@@ -94,6 +94,11 @@ static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) + { + } + ++static inline void ++mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, ++ struct mlx5e_tx_wqe_info *wi, ++ u32 *dma_fifo_cc) {} ++ + #endif + + #endif /* __MLX5E_TLS_H__ */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index d195366461c9..90c6ce530a18 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -303,9 +303,16 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir + + void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe_info *wi, +- struct mlx5e_sq_dma *dma) ++ u32 *dma_fifo_cc) + { +- struct mlx5e_sq_stats *stats = sq->stats; ++ struct mlx5e_sq_stats *stats; ++ struct mlx5e_sq_dma *dma; ++ ++ if (!wi->resync_dump_frag) ++ return; ++ ++ dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); ++ stats = sq->stats; + + mlx5e_tx_dma_unmap(sq->pdev, dma); + __skb_frag_unref(wi->resync_dump_frag); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index 9cc22b62d73d..001752ace7f0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -483,14 +483,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) + skb = wi->skb; + + if (unlikely(!skb)) { +-#ifdef CONFIG_MLX5_EN_TLS +- if (wi->resync_dump_frag) { +- struct mlx5e_sq_dma *dma = +- mlx5e_dma_get(sq, dma_fifo_cc++); +- +- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma); +- } +-#endif ++ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); + sqcc += wi->num_wqebbs; + continue; + } +@@ -546,29 +539,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) + { + struct mlx5e_tx_wqe_info *wi; + struct sk_buff *skb; ++ u32 dma_fifo_cc; ++ u16 sqcc; + u16 ci; + int i; + +- while (sq->cc != sq->pc) { +- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); ++ sqcc = sq->cc; ++ dma_fifo_cc = sq->dma_fifo_cc; ++ ++ while (sqcc != sq->pc) { ++ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + if (!skb) { +- sq->cc += wi->num_wqebbs; ++ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); ++ sqcc += wi->num_wqebbs; + continue; + } + + for (i = 0; i < wi->num_dma; i++) { + struct mlx5e_sq_dma *dma = +- mlx5e_dma_get(sq, sq->dma_fifo_cc++); ++ mlx5e_dma_get(sq, dma_fifo_cc++); + + mlx5e_tx_dma_unmap(sq->pdev, dma); + } + + dev_kfree_skb_any(skb); +- sq->cc += wi->num_wqebbs; ++ sqcc += wi->num_wqebbs; + } ++ ++ sq->dma_fifo_cc = dma_fifo_cc; ++ sq->cc = sqcc; + } + + #ifdef CONFIG_MLX5_CORE_IPOIB +-- +2.13.6 + diff --git a/SOURCES/0057-netdrv-net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch b/SOURCES/0057-netdrv-net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch new file mode 100644 index 0000000..93264a7 --- /dev/null +++ b/SOURCES/0057-netdrv-net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch @@ -0,0 +1,133 @@ +From 4c84687ee7bae8c9bd1722d4159ed004a09d817d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:32 -0400 +Subject: [PATCH 057/312] [netdrv] net/mlx5e: kTLS, Size of a Dump WQE is fixed + +Message-id: <20200510145245.10054-70-ahleihel@redhat.com> +Patchwork-id: 306608 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 69/82] net/mlx5e: kTLS, Size of a Dump WQE is fixed +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit 9b1fef2f23c1141c9936debe633ff16e44c6137b +Author: Tariq Toukan +Date: Sun Sep 1 13:53:26 2019 +0300 + + net/mlx5e: kTLS, Size of a Dump WQE is fixed + + No Eth segment, so no dynamic inline headers. + The size of a Dump WQE is fixed, use constants and remove + unnecessary checks. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 9 ++++++++- + .../net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 17 +++-------------- + 3 files changed, 12 insertions(+), 16 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +index 182d5c5664eb..25f9dda578ac 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +@@ -23,7 +23,7 @@ + #define MLX5E_SQ_TLS_ROOM \ + (MLX5_SEND_WQE_MAX_WQEBBS + \ + MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \ +- MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS) ++ MAX_SKB_FRAGS * MLX5E_KTLS_DUMP_WQEBBS) + #endif + + #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +index c4c128908b6e..eb692feba4a6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +@@ -21,7 +21,14 @@ + MLX5_ST_SZ_BYTES(tls_progress_params)) + #define MLX5E_KTLS_PROGRESS_WQEBBS \ + (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) +-#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 ++ ++struct mlx5e_dump_wqe { ++ struct mlx5_wqe_ctrl_seg ctrl; ++ struct mlx5_wqe_data_seg data; ++}; ++ ++#define MLX5E_KTLS_DUMP_WQEBBS \ ++ (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB)) + + enum { + MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 90c6ce530a18..ac54767b7d86 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -250,11 +250,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, + mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); + } + +-struct mlx5e_dump_wqe { +- struct mlx5_wqe_ctrl_seg ctrl; +- struct mlx5_wqe_data_seg data; +-}; +- + static int + tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first) + { +@@ -262,7 +257,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir + struct mlx5_wqe_data_seg *dseg; + struct mlx5e_dump_wqe *wqe; + dma_addr_t dma_addr = 0; +- u8 num_wqebbs; + u16 ds_cnt; + int fsz; + u16 pi; +@@ -270,7 +264,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir + wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); + + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; +- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + + cseg = &wqe->ctrl; + dseg = &wqe->data; +@@ -291,12 +284,8 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir + dseg->byte_count = cpu_to_be32(fsz); + mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); + +- tx_fill_wi(sq, pi, num_wqebbs, frag, fsz); +- sq->pc += num_wqebbs; +- +- WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, +- "unexpected DUMP num_wqebbs, %d > %d", +- num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS); ++ tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, frag, fsz); ++ sq->pc += MLX5E_KTLS_DUMP_WQEBBS; + + return 0; + } +@@ -368,7 +357,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + stats->tls_ooo++; + + num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + +- (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1); ++ (info.nr_frags ? info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS : 1); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + if (unlikely(contig_wqebbs_room < num_wqebbs)) +-- +2.13.6 + diff --git a/SOURCES/0058-netdrv-net-mlx5e-kTLS-Save-only-the-frag-page-to-rel.patch b/SOURCES/0058-netdrv-net-mlx5e-kTLS-Save-only-the-frag-page-to-rel.patch new file mode 100644 index 0000000..9e364aa --- /dev/null +++ b/SOURCES/0058-netdrv-net-mlx5e-kTLS-Save-only-the-frag-page-to-rel.patch @@ -0,0 +1,148 @@ +From 48b3c320e5d5e9ca3cef28dbcef96f5a8dca4e7b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:33 -0400 +Subject: [PATCH 058/312] [netdrv] net/mlx5e: kTLS, Save only the frag page to + release at completion + +Message-id: <20200510145245.10054-71-ahleihel@redhat.com> +Patchwork-id: 306609 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 70/82] net/mlx5e: kTLS, Save only the frag page to release at completion +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit f45da3716fb2fb09e301a1b6edf200ff343dc06e +Author: Tariq Toukan +Date: Wed Sep 18 13:50:32 2019 +0300 + + net/mlx5e: kTLS, Save only the frag page to release at completion + + In TX resync flow where DUMP WQEs are posted, keep a pointer to + the fragment page to unref it upon completion, instead of saving + the whole fragment. + + In addition, move it the end of the arguments list in tx_fill_wi(). + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- + .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 27 +++++++++++----------- + 2 files changed, 14 insertions(+), 15 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 35cf78134737..25bf9f026641 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -344,7 +344,7 @@ struct mlx5e_tx_wqe_info { + u8 num_wqebbs; + u8 num_dma; + #ifdef CONFIG_MLX5_EN_TLS +- skb_frag_t *resync_dump_frag; ++ struct page *resync_dump_frag_page; + #endif + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index ac54767b7d86..6dfb22d705b2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -108,16 +108,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, + } + + static void tx_fill_wi(struct mlx5e_txqsq *sq, +- u16 pi, u8 num_wqebbs, +- skb_frag_t *resync_dump_frag, +- u32 num_bytes) ++ u16 pi, u8 num_wqebbs, u32 num_bytes, ++ struct page *page) + { + struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; + +- wi->skb = NULL; +- wi->num_wqebbs = num_wqebbs; +- wi->resync_dump_frag = resync_dump_frag; +- wi->num_bytes = num_bytes; ++ memset(wi, 0, sizeof(*wi)); ++ wi->num_wqebbs = num_wqebbs; ++ wi->num_bytes = num_bytes; ++ wi->resync_dump_frag_page = page; + } + + void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) +@@ -145,7 +144,7 @@ post_static_params(struct mlx5e_txqsq *sq, + + umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); + build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); +- tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0); ++ tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL); + sq->pc += MLX5E_KTLS_STATIC_WQEBBS; + } + +@@ -159,7 +158,7 @@ post_progress_params(struct mlx5e_txqsq *sq, + + wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); + build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); +- tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0); ++ tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL); + sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; + } + +@@ -211,7 +210,7 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, + while (remaining > 0) { + skb_frag_t *frag = &record->frags[i]; + +- __skb_frag_ref(frag); ++ get_page(skb_frag_page(frag)); + remaining -= skb_frag_size(frag); + info->frags[i++] = frag; + } +@@ -284,7 +283,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir + dseg->byte_count = cpu_to_be32(fsz); + mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); + +- tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, frag, fsz); ++ tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag)); + sq->pc += MLX5E_KTLS_DUMP_WQEBBS; + + return 0; +@@ -297,14 +296,14 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, + struct mlx5e_sq_stats *stats; + struct mlx5e_sq_dma *dma; + +- if (!wi->resync_dump_frag) ++ if (!wi->resync_dump_frag_page) + return; + + dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); + stats = sq->stats; + + mlx5e_tx_dma_unmap(sq->pdev, dma); +- __skb_frag_unref(wi->resync_dump_frag); ++ put_page(wi->resync_dump_frag_page); + stats->tls_dump_packets++; + stats->tls_dump_bytes += wi->num_bytes; + } +@@ -314,7 +313,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq) + struct mlx5_wq_cyc *wq = &sq->wq; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + +- tx_fill_wi(sq, pi, 1, NULL, 0); ++ tx_fill_wi(sq, pi, 1, 0, NULL); + + mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); + } +-- +2.13.6 + diff --git a/SOURCES/0059-netdrv-net-mlx5e-kTLS-Save-by-value-copy-of-the-reco.patch b/SOURCES/0059-netdrv-net-mlx5e-kTLS-Save-by-value-copy-of-the-reco.patch new file mode 100644 index 0000000..2e4c683 --- /dev/null +++ b/SOURCES/0059-netdrv-net-mlx5e-kTLS-Save-by-value-copy-of-the-reco.patch @@ -0,0 +1,79 @@ +From caac2c9de56837381f547ae1c0d9d180f1a1546c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:34 -0400 +Subject: [PATCH 059/312] [netdrv] net/mlx5e: kTLS, Save by-value copy of the + record frags + +Message-id: <20200510145245.10054-72-ahleihel@redhat.com> +Patchwork-id: 306613 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 71/82] net/mlx5e: kTLS, Save by-value copy of the record frags +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit 310d9b9d37220b590909e90e724fc5f346a98775 +Author: Tariq Toukan +Date: Wed Sep 18 13:57:40 2019 +0300 + + net/mlx5e: kTLS, Save by-value copy of the record frags + + Access the record fragments only under the TLS ctx lock. + In the resync flow, save a copy of them to be used when + preparing and posting the required DUMP WQEs. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 6dfb22d705b2..334808b1863b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -179,7 +179,7 @@ struct tx_sync_info { + u64 rcd_sn; + s32 sync_len; + int nr_frags; +- skb_frag_t *frags[MAX_SKB_FRAGS]; ++ skb_frag_t frags[MAX_SKB_FRAGS]; + }; + + static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, +@@ -212,11 +212,11 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, + + get_page(skb_frag_page(frag)); + remaining -= skb_frag_size(frag); +- info->frags[i++] = frag; ++ info->frags[i++] = *frag; + } + /* reduce the part which will be sent with the original SKB */ + if (remaining < 0) +- skb_frag_size_add(info->frags[i - 1], remaining); ++ skb_frag_size_add(&info->frags[i - 1], remaining); + info->nr_frags = i; + out: + spin_unlock_irqrestore(&tx_ctx->lock, flags); +@@ -365,7 +365,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + tx_post_resync_params(sq, priv_tx, info.rcd_sn); + + for (i = 0; i < info.nr_frags; i++) +- if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i)) ++ if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i)) + goto err_out; + + /* If no dump WQE was sent, we need to have a fence NOP WQE before the +-- +2.13.6 + diff --git a/SOURCES/0060-netdrv-net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-res.patch b/SOURCES/0060-netdrv-net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-res.patch new file mode 100644 index 0000000..9a7eb35 --- /dev/null +++ b/SOURCES/0060-netdrv-net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-res.patch @@ -0,0 +1,78 @@ +From dcc63af43b8f506960083abc7aa249415234c31b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:35 -0400 +Subject: [PATCH 060/312] [netdrv] net/mlx5e: kTLS, Fix page refcnt leak in TX + resync error flow + +Message-id: <20200510145245.10054-73-ahleihel@redhat.com> +Patchwork-id: 306612 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 72/82] net/mlx5e: kTLS, Fix page refcnt leak in TX resync error flow +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit b61b24bd135a7775a2839863bd1d58a462a5f1e5 +Author: Tariq Toukan +Date: Wed Sep 18 13:57:40 2019 +0300 + + net/mlx5e: kTLS, Fix page refcnt leak in TX resync error flow + + All references for frag pages that are obtained in tx_sync_info_get() + should be released. + Release usually occurs in the corresponding CQE of the WQE. + In error flows, not all fragments have a WQE posted for them, hence + no matching CQE will be generated. + For these pages, release the reference in the error flow. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 334808b1863b..5f1d18fb644e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -329,7 +329,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + struct tx_sync_info info = {}; + u16 contig_wqebbs_room, pi; + u8 num_wqebbs; +- int i; ++ int i = 0; + + if (!tx_sync_info_get(priv_tx, seq, &info)) { + /* We might get here if a retransmission reaches the driver +@@ -364,7 +364,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + + tx_post_resync_params(sq, priv_tx, info.rcd_sn); + +- for (i = 0; i < info.nr_frags; i++) ++ for (; i < info.nr_frags; i++) + if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i)) + goto err_out; + +@@ -377,6 +377,9 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + return skb; + + err_out: ++ for (; i < info.nr_frags; i++) ++ put_page(skb_frag_page(&info.frags[i])); ++ + dev_kfree_skb_any(skb); + return NULL; + } +-- +2.13.6 + diff --git a/SOURCES/0061-netdrv-net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch b/SOURCES/0061-netdrv-net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch new file mode 100644 index 0000000..9df2b2e --- /dev/null +++ b/SOURCES/0061-netdrv-net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch @@ -0,0 +1,99 @@ +From 16c3d368f72223cdfc308be9d40852d1d3cea81b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:36 -0400 +Subject: [PATCH 061/312] [netdrv] net/mlx5e: kTLS, Fix missing SQ edge fill + +Message-id: <20200510145245.10054-74-ahleihel@redhat.com> +Patchwork-id: 306614 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 73/82] net/mlx5e: kTLS, Fix missing SQ edge fill +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit 700ec497424069fa4d8f3715759c4aaec016e840 +Author: Tariq Toukan +Date: Mon Oct 7 13:59:11 2019 +0300 + + net/mlx5e: kTLS, Fix missing SQ edge fill + + Before posting the context params WQEs, make sure there is enough + contiguous room for them, and fill frag edge if needed. + + When posting only a nop, no need for room check, as it needs a single + WQEBB, meaning no contiguity issue. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 28 +++++++++++++++------- + 1 file changed, 20 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 5f1d18fb644e..59e3f48470d9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -168,6 +168,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, + bool skip_static_post, bool fence_first_post) + { + bool progress_fence = skip_static_post || !fence_first_post; ++ struct mlx5_wq_cyc *wq = &sq->wq; ++ u16 contig_wqebbs_room, pi; ++ ++ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); ++ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); ++ if (unlikely(contig_wqebbs_room < ++ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)) ++ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); + + if (!skip_static_post) + post_static_params(sq, priv_tx, fence_first_post); +@@ -355,10 +363,20 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + + stats->tls_ooo++; + +- num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + +- (info.nr_frags ? info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS : 1); ++ tx_post_resync_params(sq, priv_tx, info.rcd_sn); ++ ++ /* If no dump WQE was sent, we need to have a fence NOP WQE before the ++ * actual data xmit. ++ */ ++ if (!info.nr_frags) { ++ tx_post_fence_nop(sq); ++ return skb; ++ } ++ ++ num_wqebbs = info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS; + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); ++ + if (unlikely(contig_wqebbs_room < num_wqebbs)) + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); + +@@ -368,12 +386,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i)) + goto err_out; + +- /* If no dump WQE was sent, we need to have a fence NOP WQE before the +- * actual data xmit. +- */ +- if (!info.nr_frags) +- tx_post_fence_nop(sq); +- + return skb; + + err_out: +-- +2.13.6 + diff --git a/SOURCES/0062-netdrv-net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch b/SOURCES/0062-netdrv-net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch new file mode 100644 index 0000000..f07b648 --- /dev/null +++ b/SOURCES/0062-netdrv-net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch @@ -0,0 +1,195 @@ +From 60eadaf04867375c4fc1dddc16aa6bd274efdc67 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:37 -0400 +Subject: [PATCH 062/312] [netdrv] net/mlx5e: kTLS, Limit DUMP wqe size + +Message-id: <20200510145245.10054-75-ahleihel@redhat.com> +Patchwork-id: 306616 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 74/82] net/mlx5e: kTLS, Limit DUMP wqe size +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit 84d1bb2b139e0184b1754aa1b5776186b475fce8 +Author: Tariq Toukan +Date: Mon Oct 7 14:01:29 2019 +0300 + + net/mlx5e: kTLS, Limit DUMP wqe size + + HW expects the data size in DUMP WQEs to be up to MTU. + Make sure they are in range. + + We elevate the frag page refcount by 'n-1', in addition to the + one obtained in tx_sync_info_get(), having an overall of 'n' + references. We bulk increments by using a single page_ref_add() + command, to optimize perfermance. + The refcounts are released one by one, by the corresponding completions. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 11 ++++--- + .../ethernet/mellanox/mlx5/core/en_accel/ktls.h | 11 ++++++- + .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 34 +++++++++++++++++++--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 ++++- + 5 files changed, 52 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 25bf9f026641..319797f42105 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -409,6 +409,7 @@ struct mlx5e_txqsq { + struct device *pdev; + __be32 mkey_be; + unsigned long state; ++ unsigned int hw_mtu; + struct hwtstamp_config *tstamp; + struct mlx5_clock *clock; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +index 25f9dda578ac..7c8796d9743f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +@@ -15,15 +15,14 @@ + #else + /* TLS offload requires additional stop_room for: + * - a resync SKB. +- * kTLS offload requires additional stop_room for: +- * - static params WQE, +- * - progress params WQE, and +- * - resync DUMP per frag. ++ * kTLS offload requires fixed additional stop_room for: ++ * - a static params WQE, and a progress params WQE. ++ * The additional MTU-depending room for the resync DUMP WQEs ++ * will be calculated and added in runtime. + */ + #define MLX5E_SQ_TLS_ROOM \ + (MLX5_SEND_WQE_MAX_WQEBBS + \ +- MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \ +- MAX_SKB_FRAGS * MLX5E_KTLS_DUMP_WQEBBS) ++ MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS) + #endif + + #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +index eb692feba4a6..929966e6fbc4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +@@ -94,7 +94,16 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, + void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe_info *wi, + u32 *dma_fifo_cc); +- ++static inline u8 ++mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags, ++ unsigned int sync_len) ++{ ++ /* Given the MTU and sync_len, calculates an upper bound for the ++ * number of WQEBBs needed for the TX resync DUMP WQEs of a record. ++ */ ++ return MLX5E_KTLS_DUMP_WQEBBS * ++ (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu)); ++} + #else + + static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 59e3f48470d9..e10b0bb696da 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -373,7 +373,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + return skb; + } + +- num_wqebbs = info.nr_frags * MLX5E_KTLS_DUMP_WQEBBS; ++ num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + +@@ -382,14 +382,40 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + + tx_post_resync_params(sq, priv_tx, info.rcd_sn); + +- for (; i < info.nr_frags; i++) +- if (tx_post_resync_dump(sq, &info.frags[i], priv_tx->tisn, !i)) +- goto err_out; ++ for (; i < info.nr_frags; i++) { ++ unsigned int orig_fsz, frag_offset = 0, n = 0; ++ skb_frag_t *f = &info.frags[i]; ++ ++ orig_fsz = skb_frag_size(f); ++ ++ do { ++ bool fence = !(i || frag_offset); ++ unsigned int fsz; ++ ++ n++; ++ fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset); ++ skb_frag_size_set(f, fsz); ++ if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) { ++ page_ref_add(skb_frag_page(f), n - 1); ++ goto err_out; ++ } ++ ++ skb_frag_off_add(f, fsz); ++ frag_offset += fsz; ++ } while (frag_offset < orig_fsz); ++ ++ page_ref_add(skb_frag_page(f), n - 1); ++ } + + return skb; + + err_out: + for (; i < info.nr_frags; i++) ++ /* The put_page() here undoes the page ref obtained in tx_sync_info_get(). ++ * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be ++ * released only upon their completions (or in mlx5e_free_txqsq_descs, ++ * if channel closes). ++ */ + put_page(skb_frag_page(&info.frags[i])); + + dev_kfree_skb_any(skb); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 7d9a526c6017..7cd3ac6a23a8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -1118,6 +1118,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, + sq->txq_ix = txq_ix; + sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->min_inline_mode = params->tx_min_inline_mode; ++ sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + sq->stop_room = MLX5E_SQ_STOP_ROOM; + INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); +@@ -1125,10 +1126,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, + set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); + if (MLX5_IPSEC_DEV(c->priv->mdev)) + set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); ++#ifdef CONFIG_MLX5_EN_TLS + if (mlx5_accel_is_tls_device(c->priv->mdev)) { + set_bit(MLX5E_SQ_STATE_TLS, &sq->state); +- sq->stop_room += MLX5E_SQ_TLS_ROOM; ++ sq->stop_room += MLX5E_SQ_TLS_ROOM + ++ mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS, ++ TLS_MAX_PAYLOAD_SIZE); + } ++#endif + + param->wq.db_numa_node = cpu_to_node(c->cpu); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); +-- +2.13.6 + diff --git a/SOURCES/0063-netdrv-net-mlx5e-kTLS-Remove-unneeded-cipher-type-ch.patch b/SOURCES/0063-netdrv-net-mlx5e-kTLS-Remove-unneeded-cipher-type-ch.patch new file mode 100644 index 0000000..ff1cf71 --- /dev/null +++ b/SOURCES/0063-netdrv-net-mlx5e-kTLS-Remove-unneeded-cipher-type-ch.patch @@ -0,0 +1,66 @@ +From d84c54a3976bc805815e5a4a85f711f483ab3157 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:38 -0400 +Subject: [PATCH 063/312] [netdrv] net/mlx5e: kTLS, Remove unneeded cipher type + checks + +Message-id: <20200510145245.10054-76-ahleihel@redhat.com> +Patchwork-id: 306617 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 75/82] net/mlx5e: kTLS, Remove unneeded cipher type checks +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit ecdc65a3ec5d45725355479d63c23a20f4582104 +Author: Tariq Toukan +Date: Sun Oct 6 18:25:17 2019 +0300 + + net/mlx5e: kTLS, Remove unneeded cipher type checks + + Cipher type is checked upon connection addition. + No need to recheck it per every TX resync invocation. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index e10b0bb696da..1bfeb558ff78 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -31,9 +31,6 @@ fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) + char *salt, *rec_seq; + u8 tls_version; + +- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128)) +- return; +- + info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + EXTRACT_INFO_FIELDS; + +@@ -243,9 +240,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, + u16 rec_seq_sz; + char *rec_seq; + +- if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128)) +- return; +- + info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + rec_seq = info->rec_seq; + rec_seq_sz = sizeof(info->rec_seq); +-- +2.13.6 + diff --git a/SOURCES/0064-netdrv-net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch b/SOURCES/0064-netdrv-net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch new file mode 100644 index 0000000..f5bee78 --- /dev/null +++ b/SOURCES/0064-netdrv-net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch @@ -0,0 +1,107 @@ +From 1bf2b8f0c26bc563683d7b063778bd6e532247f9 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:39 -0400 +Subject: [PATCH 064/312] [netdrv] net/mlx5e: kTLS, Save a copy of the crypto + info + +Message-id: <20200510145245.10054-77-ahleihel@redhat.com> +Patchwork-id: 306615 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 76/82] net/mlx5e: kTLS, Save a copy of the crypto info +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit af11a7a42454b17c77da5fa55b6b6325b11d60e5 +Author: Tariq Toukan +Date: Sun Sep 22 14:05:24 2019 +0300 + + net/mlx5e: kTLS, Save a copy of the crypto info + + Do not assume the crypto info is accessible during the + connection lifetime. Save a copy of it in the private + TX context. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 8 ++------ + 3 files changed, 4 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +index d2ff74d52720..46725cd743a3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +@@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, + return -ENOMEM; + + tx_priv->expected_seq = start_offload_tcp_sn; +- tx_priv->crypto_info = crypto_info; ++ tx_priv->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv); + + /* tc and underlay_qpn values are not in use for tls tis */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +index 929966e6fbc4..a3efa29a4629 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +@@ -44,7 +44,7 @@ enum { + + struct mlx5e_ktls_offload_context_tx { + struct tls_offload_context_tx *tx_ctx; +- struct tls_crypto_info *crypto_info; ++ struct tls12_crypto_info_aes_gcm_128 crypto_info; + u32 expected_seq; + u32 tisn; + u32 key_id; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 1bfeb558ff78..badc6fd26a14 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -24,14 +24,12 @@ enum { + static void + fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) + { +- struct tls_crypto_info *crypto_info = priv_tx->crypto_info; +- struct tls12_crypto_info_aes_gcm_128 *info; ++ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info; + char *initial_rn, *gcm_iv; + u16 salt_sz, rec_seq_sz; + char *salt, *rec_seq; + u8 tls_version; + +- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + EXTRACT_INFO_FIELDS; + + gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv); +@@ -233,14 +231,12 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, + struct mlx5e_ktls_offload_context_tx *priv_tx, + u64 rcd_sn) + { +- struct tls_crypto_info *crypto_info = priv_tx->crypto_info; +- struct tls12_crypto_info_aes_gcm_128 *info; ++ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info; + __be64 rn_be = cpu_to_be64(rcd_sn); + bool skip_static_post; + u16 rec_seq_sz; + char *rec_seq; + +- info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + rec_seq = info->rec_seq; + rec_seq_sz = sizeof(info->rec_seq); + +-- +2.13.6 + diff --git a/SOURCES/0065-netdrv-net-mlx5e-kTLS-Enhance-TX-resync-flow.patch b/SOURCES/0065-netdrv-net-mlx5e-kTLS-Enhance-TX-resync-flow.patch new file mode 100644 index 0000000..256a5ff --- /dev/null +++ b/SOURCES/0065-netdrv-net-mlx5e-kTLS-Enhance-TX-resync-flow.patch @@ -0,0 +1,275 @@ +From dc53981deab557df58bbed93789ad82b019d94b5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:40 -0400 +Subject: [PATCH 065/312] [netdrv] net/mlx5e: kTLS, Enhance TX resync flow + +Message-id: <20200510145245.10054-78-ahleihel@redhat.com> +Patchwork-id: 306619 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 77/82] net/mlx5e: kTLS, Enhance TX resync flow +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit 46a3ea98074e2a7731ab9b84ec60fc18a2f909e5 +Author: Tariq Toukan +Date: Thu Oct 3 10:48:10 2019 +0300 + + net/mlx5e: kTLS, Enhance TX resync flow + + Once the kTLS TX resync function is called, it used to return + a binary value, for success or failure. + + However, in case the TLS SKB is a retransmission of the connection + handshake, it initiates the resync flow (as the tcp seq check holds), + while regular packet handle is expected. + + In this patch, we identify this case and skip the resync operation + accordingly. + + Counters: + - Add a counter (tls_skip_no_sync_data) to monitor this. + - Bump the dump counters up as they are used more frequently. + - Add a missing counter descriptor declaration for tls_resync_bytes + in sq_stats_desc. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 58 +++++++++++++--------- + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 16 +++--- + drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 10 ++-- + 3 files changed, 51 insertions(+), 33 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index badc6fd26a14..778dab1af8fc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -185,26 +185,33 @@ struct tx_sync_info { + skb_frag_t frags[MAX_SKB_FRAGS]; + }; + +-static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, +- u32 tcp_seq, struct tx_sync_info *info) ++enum mlx5e_ktls_sync_retval { ++ MLX5E_KTLS_SYNC_DONE, ++ MLX5E_KTLS_SYNC_FAIL, ++ MLX5E_KTLS_SYNC_SKIP_NO_DATA, ++}; ++ ++static enum mlx5e_ktls_sync_retval ++tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, ++ u32 tcp_seq, struct tx_sync_info *info) + { + struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; ++ enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; + struct tls_record_info *record; + int remaining, i = 0; + unsigned long flags; +- bool ret = true; + + spin_lock_irqsave(&tx_ctx->lock, flags); + record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); + + if (unlikely(!record)) { +- ret = false; ++ ret = MLX5E_KTLS_SYNC_FAIL; + goto out; + } + + if (unlikely(tcp_seq < tls_record_start_seq(record))) { +- if (!tls_record_is_start_marker(record)) +- ret = false; ++ ret = tls_record_is_start_marker(record) ? ++ MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + goto out; + } + +@@ -316,20 +323,26 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq) + mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); + } + +-static struct sk_buff * ++static enum mlx5e_ktls_sync_retval + mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + struct mlx5e_txqsq *sq, +- struct sk_buff *skb, ++ int datalen, + u32 seq) + { + struct mlx5e_sq_stats *stats = sq->stats; + struct mlx5_wq_cyc *wq = &sq->wq; ++ enum mlx5e_ktls_sync_retval ret; + struct tx_sync_info info = {}; + u16 contig_wqebbs_room, pi; + u8 num_wqebbs; + int i = 0; + +- if (!tx_sync_info_get(priv_tx, seq, &info)) { ++ ret = tx_sync_info_get(priv_tx, seq, &info); ++ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { ++ if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { ++ stats->tls_skip_no_sync_data++; ++ return MLX5E_KTLS_SYNC_SKIP_NO_DATA; ++ } + /* We might get here if a retransmission reaches the driver + * after the relevant record is acked. + * It should be safe to drop the packet in this case +@@ -339,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + } + + if (unlikely(info.sync_len < 0)) { +- u32 payload; +- int headln; +- +- headln = skb_transport_offset(skb) + tcp_hdrlen(skb); +- payload = skb->len - headln; +- if (likely(payload <= -info.sync_len)) +- return skb; ++ if (likely(datalen <= -info.sync_len)) ++ return MLX5E_KTLS_SYNC_DONE; + + stats->tls_drop_bypass_req++; + goto err_out; +@@ -360,7 +368,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + */ + if (!info.nr_frags) { + tx_post_fence_nop(sq); +- return skb; ++ return MLX5E_KTLS_SYNC_DONE; + } + + num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len); +@@ -397,7 +405,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + page_ref_add(skb_frag_page(f), n - 1); + } + +- return skb; ++ return MLX5E_KTLS_SYNC_DONE; + + err_out: + for (; i < info.nr_frags; i++) +@@ -408,8 +416,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + */ + put_page(skb_frag_page(&info.frags[i])); + +- dev_kfree_skb_any(skb); +- return NULL; ++ return MLX5E_KTLS_SYNC_FAIL; + } + + struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, +@@ -445,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, + + seq = ntohl(tcp_hdr(skb)->seq); + if (unlikely(priv_tx->expected_seq != seq)) { +- skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq); +- if (unlikely(!skb)) ++ enum mlx5e_ktls_sync_retval ret = ++ mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); ++ ++ if (likely(ret == MLX5E_KTLS_SYNC_DONE)) ++ *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); ++ else if (ret == MLX5E_KTLS_SYNC_FAIL) ++ goto err_out; ++ else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */ + goto out; +- *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); + } + + priv_tx->expected_seq = seq + datalen; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +index 79b3ec005f43..23587f55fad7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +@@ -104,11 +104,12 @@ static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, ++ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, +- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, +- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, + #endif + + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, +@@ -340,11 +341,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) + s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; + s->tx_tls_ctx += sq_stats->tls_ctx; + s->tx_tls_ooo += sq_stats->tls_ooo; ++ s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; ++ s->tx_tls_dump_packets += sq_stats->tls_dump_packets; + s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; ++ s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; + s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; + s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; +- s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; +- s->tx_tls_dump_packets += sq_stats->tls_dump_packets; + #endif + s->tx_cqes += sq_stats->cqes; + } +@@ -1505,10 +1507,12 @@ static const struct counter_desc sq_stats_desc[] = { + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, +- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, +- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, ++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, ++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, ++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, ++ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, + #endif + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +index ab1c3366ff7d..092b39ffa32a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +@@ -180,11 +180,12 @@ struct mlx5e_sw_stats { + u64 tx_tls_encrypted_bytes; + u64 tx_tls_ctx; + u64 tx_tls_ooo; ++ u64 tx_tls_dump_packets; ++ u64 tx_tls_dump_bytes; + u64 tx_tls_resync_bytes; ++ u64 tx_tls_skip_no_sync_data; + u64 tx_tls_drop_no_sync_data; + u64 tx_tls_drop_bypass_req; +- u64 tx_tls_dump_packets; +- u64 tx_tls_dump_bytes; + #endif + + u64 rx_xsk_packets; +@@ -324,11 +325,12 @@ struct mlx5e_sq_stats { + u64 tls_encrypted_bytes; + u64 tls_ctx; + u64 tls_ooo; ++ u64 tls_dump_packets; ++ u64 tls_dump_bytes; + u64 tls_resync_bytes; ++ u64 tls_skip_no_sync_data; + u64 tls_drop_no_sync_data; + u64 tls_drop_bypass_req; +- u64 tls_dump_packets; +- u64 tls_dump_bytes; + #endif + /* less likely accessed in data path */ + u64 csum_none; +-- +2.13.6 + diff --git a/SOURCES/0066-netdrv-net-mlx5e-Remove-incorrect-match-criteria-ass.patch b/SOURCES/0066-netdrv-net-mlx5e-Remove-incorrect-match-criteria-ass.patch new file mode 100644 index 0000000..53cf95c --- /dev/null +++ b/SOURCES/0066-netdrv-net-mlx5e-Remove-incorrect-match-criteria-ass.patch @@ -0,0 +1,56 @@ +From c667484c074aea8fe652eeb7a9e5e24438436a69 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:41 -0400 +Subject: [PATCH 066/312] [netdrv] net/mlx5e: Remove incorrect match criteria + assignment line + +Message-id: <20200510145245.10054-79-ahleihel@redhat.com> +Patchwork-id: 306620 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 78/82] net/mlx5e: Remove incorrect match criteria assignment line +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc6 + +commit 752d3dc06d6936d5a357a18b6b51d91c7e134e88 +Author: Dmytro Linkin +Date: Thu Aug 29 15:24:27 2019 +0000 + + net/mlx5e: Remove incorrect match criteria assignment line + + Driver have function, which enable match criteria for misc parameters + in dependence of eswitch capabilities. + + Fixes: 4f5d1beadc10 ("Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux") + Signed-off-by: Dmytro Linkin + Reviewed-by: Jianbo Liu + Reviewed-by: Roi Dayan + Reviewed-by: Saeed Mahameed + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 93501e3c8b28..fa3249964ee9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + + mlx5_eswitch_set_rule_source_port(esw, spec, attr); + +- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + if (attr->outer_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + +-- +2.13.6 + diff --git a/SOURCES/0067-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch b/SOURCES/0067-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch new file mode 100644 index 0000000..3a47da1 --- /dev/null +++ b/SOURCES/0067-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch @@ -0,0 +1,77 @@ +From 356f9793df0411479e5b156d637c2c5bcce95935 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:43 -0400 +Subject: [PATCH 067/312] [netdrv] mlx5: reject unsupported external timestamp + flags + +Message-id: <20200510145245.10054-81-ahleihel@redhat.com> +Patchwork-id: 306621 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 80/82] mlx5: reject unsupported external timestamp flags +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4-rc8 + +commit 2e0645a00e25f7122cad6da57ce3cc855df49ddd +Author: Jacob Keller +Date: Thu Nov 14 10:45:00 2019 -0800 + + mlx5: reject unsupported external timestamp flags + + Fix the mlx5 core PTP support to explicitly reject any future flags that + get added to the external timestamp request ioctl. + + In order to maintain currently functioning code, this patch accepts all + three current flags. This is because the PTP_RISING_EDGE and + PTP_FALLING_EDGE flags have unclear semantics and each driver seems to + have interpreted them slightly differently. + + [ RC: I'm not 100% sure what this driver does, but if I'm not wrong it + follows the dp83640: + + flags Meaning + ---------------------------------------------------- -------------------------- + PTP_ENABLE_FEATURE Time stamp rising edge + PTP_ENABLE_FEATURE|PTP_RISING_EDGE Time stamp rising edge + PTP_ENABLE_FEATURE|PTP_FALLING_EDGE Time stamp falling edge + PTP_ENABLE_FEATURE|PTP_RISING_EDGE|PTP_FALLING_EDGE Time stamp falling edge + ] + + Cc: Feras Daoud + Cc: Eugenia Emantayev + Signed-off-by: Jacob Keller + Reviewed-by: Richard Cochran + Reviewed-by: Saeed Mahameed + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +index 9a40f24e3193..34190e888521 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +@@ -242,6 +242,12 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, + PTP_FALLING_EDGE)) + return -EOPNOTSUPP; + ++ /* Reject requests with unsupported flags */ ++ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | ++ PTP_RISING_EDGE | ++ PTP_FALLING_EDGE)) ++ return -EOPNOTSUPP; ++ + if (rq->extts.index >= clock->ptp_info.n_pins) + return -EINVAL; + +-- +2.13.6 + diff --git a/SOURCES/0068-netdrv-net-mlx5e-Fix-ingress-rate-configuration-for-.patch b/SOURCES/0068-netdrv-net-mlx5e-Fix-ingress-rate-configuration-for-.patch new file mode 100644 index 0000000..9062f19 --- /dev/null +++ b/SOURCES/0068-netdrv-net-mlx5e-Fix-ingress-rate-configuration-for-.patch @@ -0,0 +1,58 @@ +From a5c0c1565d8c1a0284297a0a757bdbd9e4bace22 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:44 -0400 +Subject: [PATCH 068/312] [netdrv] net/mlx5e: Fix ingress rate configuration + for representors + +Message-id: <20200510145245.10054-82-ahleihel@redhat.com> +Patchwork-id: 306622 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 81/82] net/mlx5e: Fix ingress rate configuration for representors +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4 + +commit 7b83355f6df9ead2f8c4b06c105505a2999f5dc1 +Author: Eli Cohen +Date: Thu Nov 7 09:07:34 2019 +0200 + + net/mlx5e: Fix ingress rate configuration for representors + + Current code uses the old method of prio encoding in + flow_cls_common_offload. Fix to follow the changes introduced in + commit ef01adae0e43 ("net: sched: use major priority number as hardware priority"). + + Fixes: fcb64c0f5640 ("net/mlx5: E-Switch, add ingress rate support") + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index ac372993c9d8..ece33ff718a4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -4003,9 +4003,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) + { + struct netlink_ext_ack *extack = ma->common.extack; +- int prio = TC_H_MAJ(ma->common.prio) >> 16; + +- if (prio != 1) { ++ if (ma->common.prio != 1) { + NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); + return -EINVAL; + } +-- +2.13.6 + diff --git a/SOURCES/0069-netdrv-net-mlx5e-Add-missing-capability-bit-check-fo.patch b/SOURCES/0069-netdrv-net-mlx5e-Add-missing-capability-bit-check-fo.patch new file mode 100644 index 0000000..4dd4484 --- /dev/null +++ b/SOURCES/0069-netdrv-net-mlx5e-Add-missing-capability-bit-check-fo.patch @@ -0,0 +1,61 @@ +From 5e5a9d6b5e750e39e8f5bb8837d4f22dc2d9867a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 14:52:45 -0400 +Subject: [PATCH 069/312] [netdrv] net/mlx5e: Add missing capability bit check + for IP-in-IP + +Message-id: <20200510145245.10054-83-ahleihel@redhat.com> +Patchwork-id: 306623 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789378 v2 82/82] net/mlx5e: Add missing capability bit check for IP-in-IP +Bugzilla: 1789378 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789378 +Upstream: v5.4 + +commit 9c98f7ec01d78b5c12db97d1e5edb7022eefa398 +Author: Marina Varshaver +Date: Tue Nov 19 18:52:13 2019 +0200 + + net/mlx5e: Add missing capability bit check for IP-in-IP + + Device that doesn't support IP-in-IP offloads has to filter csum and gso + offload support, otherwise kernel will conclude that device is capable of + offloading csum and gso for IP-in-IP tunnels and that might result in + IP-in-IP tunnel not functioning. + + Fixes: 25948b87dda2 ("net/mlx5e: Support TSO and TX checksum offloads for IP-in-IP") + Signed-off-by: Marina Varshaver + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 7cd3ac6a23a8..2f337a70e157 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4230,9 +4230,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, + + switch (proto) { + case IPPROTO_GRE: ++ return features; + case IPPROTO_IPIP: + case IPPROTO_IPV6: +- return features; ++ if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) ++ return features; ++ break; + case IPPROTO_UDP: + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); +-- +2.13.6 + diff --git a/SOURCES/0070-include-net-mlx5-Expose-optimal-performance-scatter-.patch b/SOURCES/0070-include-net-mlx5-Expose-optimal-performance-scatter-.patch new file mode 100644 index 0000000..bb2c9aa --- /dev/null +++ b/SOURCES/0070-include-net-mlx5-Expose-optimal-performance-scatter-.patch @@ -0,0 +1,54 @@ +From b3ef775e164cb586b2967356b0a9c03582920495 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:29 -0400 +Subject: [PATCH 070/312] [include] net/mlx5: Expose optimal performance + scatter entries capability + +Message-id: <20200510150452.10307-5-ahleihel@redhat.com> +Patchwork-id: 306628 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 04/87] net/mlx5: Expose optimal performance scatter entries capability +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 7d47433cf74f942a414171867d89c08640cfef45 +Author: Yamin Friedman +Date: Mon Oct 7 16:59:31 2019 +0300 + + net/mlx5: Expose optimal performance scatter entries capability + + Expose maximum scatter entries per RDMA READ for optimal performance. + + Signed-off-by: Yamin Friedman + Reviewed-by: Or Gerlitz + Reviewed-by: Christoph Hellwig + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index caa0bcd9dd0f..a77ca587c3cc 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1156,7 +1156,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { + u8 log_max_srq[0x5]; + u8 reserved_at_b0[0x10]; + +- u8 reserved_at_c0[0x8]; ++ u8 max_sgl_for_optimized_performance[0x8]; + u8 log_max_cq_sz[0x8]; + u8 reserved_at_d0[0xb]; + u8 log_max_cq[0x5]; +-- +2.13.6 + diff --git a/SOURCES/0071-netdrv-net-Fix-misspellings-of-configure-and-configu.patch b/SOURCES/0071-netdrv-net-Fix-misspellings-of-configure-and-configu.patch new file mode 100644 index 0000000..997ec38 --- /dev/null +++ b/SOURCES/0071-netdrv-net-Fix-misspellings-of-configure-and-configu.patch @@ -0,0 +1,66 @@ +From 7ae07e19237187f7fa84def13d5538e1015c20c7 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:47 -0400 +Subject: [PATCH 071/312] [netdrv] net: Fix misspellings of "configure" and + "configuration" + +Message-id: <20200510150452.10307-23-ahleihel@redhat.com> +Patchwork-id: 306646 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 22/87] net: Fix misspellings of "configure" and "configuration" +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 +Conflicts: + - Take mlx5 changes only. + - drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c + Small context diff due to missing empty line in the comment section, + apply the needed hunk as well as adding back the missing empty line + to avoid more conflicts. + +commit c199ce4f9dd896c716aece33e6750be34aea1151 +Author: Geert Uytterhoeven +Date: Thu Oct 24 17:22:01 2019 +0200 + + net: Fix misspellings of "configure" and "configuration" + + Fix various misspellings of "configuration" and "configure". + + Signed-off-by: Geert Uytterhoeven + Acked-by: Kalle Valo + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +index 1fc4641077fd..ae99fac08b53 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +@@ -177,12 +177,14 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, + * @xoff: xoff value + * @port_buffer: port receive buffer configuration + * @change: +- * Update buffer configuration based on pfc configuraiton and ++ * ++ * Update buffer configuration based on pfc configuration and + * priority to buffer mapping. + * Buffer's lossy bit is changed to: + * lossless if there is at least one PFC enabled priority + * mapped to this buffer lossy if all priorities mapped to + * this buffer are PFC disabled ++ * + * @return: 0 if no error, + * sets change to true if buffer configuration was modified. + */ +-- +2.13.6 + diff --git a/SOURCES/0072-netdrv-net-mlx5-E-Switch-Rename-egress-config-to-gen.patch b/SOURCES/0072-netdrv-net-mlx5-E-Switch-Rename-egress-config-to-gen.patch new file mode 100644 index 0000000..14c0a94 --- /dev/null +++ b/SOURCES/0072-netdrv-net-mlx5-E-Switch-Rename-egress-config-to-gen.patch @@ -0,0 +1,137 @@ +From 68272c4fdf21f6aa6e587a2b4eb9e8ed14a7b7d6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:49 -0400 +Subject: [PATCH 072/312] [netdrv] net/mlx5: E-Switch, Rename egress config to + generic name + +Message-id: <20200510150452.10307-25-ahleihel@redhat.com> +Patchwork-id: 306648 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 24/87] net/mlx5: E-Switch, Rename egress config to generic name +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 6d94e610e4b6a77007d50952d3c859d3e300c0ab +Author: Vu Pham +Date: Mon Oct 28 23:34:58 2019 +0000 + + net/mlx5: E-Switch, Rename egress config to generic name + + Refactor vport egress config in offloads mode + + Refactoring vport egress configuration in offloads mode that + includes egress prio tag configuration. + This makes code symmetric to ingress configuration. + + Signed-off-by: Vu Pham + Reviewed-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 50 +++++++++++----------- + 1 file changed, 26 insertions(+), 24 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index fa3249964ee9..b41b0c868099 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1866,32 +1866,16 @@ static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec; + int err = 0; + +- if (!MLX5_CAP_GEN(esw->dev, prio_tag_required)) +- return 0; +- + /* For prio tag mode, there is only 1 FTEs: + * 1) prio tag packets - pop the prio tag VLAN, allow + * Unmatched traffic is allowed by default + */ +- +- esw_vport_cleanup_egress_rules(esw, vport); +- +- err = esw_vport_enable_egress_acl(esw, vport); +- if (err) { +- mlx5_core_warn(esw->dev, +- "failed to enable egress acl (%d) on vport[%d]\n", +- err, vport->vport); +- return err; +- } +- + esw_debug(esw->dev, + "vport[%d] configure prio tag egress rules\n", vport->vport); + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); +- if (!spec) { +- err = -ENOMEM; +- goto out_no_mem; +- } ++ if (!spec) ++ return -ENOMEM; + + /* prio tag vlan rule - pop it so VF receives untagged packets */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); +@@ -1911,14 +1895,9 @@ static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw, + "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n", + vport->vport, err); + vport->egress.allowed_vlan = NULL; +- goto out; + } + +-out: + kvfree(spec); +-out_no_mem: +- if (err) +- esw_vport_cleanup_egress_rules(esw, vport); + return err; + } + +@@ -1963,6 +1942,29 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw, + return err; + } + ++static int esw_vport_egress_config(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++{ ++ int err; ++ ++ if (!MLX5_CAP_GEN(esw->dev, prio_tag_required)) ++ return 0; ++ ++ esw_vport_cleanup_egress_rules(esw, vport); ++ ++ err = esw_vport_enable_egress_acl(esw, vport); ++ if (err) ++ return err; ++ ++ esw_debug(esw->dev, "vport(%d) configure egress rules\n", vport->vport); ++ ++ err = esw_vport_egress_prio_tag_config(esw, vport); ++ if (err) ++ esw_vport_disable_egress_acl(esw, vport); ++ ++ return err; ++} ++ + static bool + esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) + { +@@ -2010,7 +2012,7 @@ static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) + goto err_ingress; + + if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { +- err = esw_vport_egress_prio_tag_config(esw, vport); ++ err = esw_vport_egress_config(esw, vport); + if (err) + goto err_egress; + } +-- +2.13.6 + diff --git a/SOURCES/0073-netdrv-net-mlx5-E-Switch-Rename-ingress-acl-config-i.patch b/SOURCES/0073-netdrv-net-mlx5-E-Switch-Rename-ingress-acl-config-i.patch new file mode 100644 index 0000000..819b99d --- /dev/null +++ b/SOURCES/0073-netdrv-net-mlx5-E-Switch-Rename-ingress-acl-config-i.patch @@ -0,0 +1,66 @@ +From f9d7ea58030ab80031731d50631b3f19503006f7 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:50 -0400 +Subject: [PATCH 073/312] [netdrv] net/mlx5: E-Switch, Rename ingress acl + config in offloads mode + +Message-id: <20200510150452.10307-26-ahleihel@redhat.com> +Patchwork-id: 306649 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 25/87] net/mlx5: E-Switch, Rename ingress acl config in offloads mode +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit b1a3380aa709082761c1dba89234ac16c19037c6 +Author: Vu Pham +Date: Mon Oct 28 23:35:00 2019 +0000 + + net/mlx5: E-Switch, Rename ingress acl config in offloads mode + + Changing the function name esw_ingress_acl_common_config() to + esw_ingress_acl_config() to be consistent with egress config + function naming in offloads mode. + + Signed-off-by: Vu Pham + Reviewed-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index b41b0c868099..9e64bdf17861 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1901,8 +1901,8 @@ static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw, + return err; + } + +-static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport) ++static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) + { + int err; + +@@ -2007,7 +2007,7 @@ static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + + mlx5_esw_for_all_vports(esw, i, vport) { +- err = esw_vport_ingress_common_config(esw, vport); ++ err = esw_vport_ingress_config(esw, vport); + if (err) + goto err_ingress; + +-- +2.13.6 + diff --git a/SOURCES/0074-netdrv-net-mlx5-E-switch-Introduce-and-use-vlan-rule.patch b/SOURCES/0074-netdrv-net-mlx5-E-switch-Introduce-and-use-vlan-rule.patch new file mode 100644 index 0000000..e51b405 --- /dev/null +++ b/SOURCES/0074-netdrv-net-mlx5-E-switch-Introduce-and-use-vlan-rule.patch @@ -0,0 +1,225 @@ +From 3f285c020ca420cf7657c4a51da96573ae038f06 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:51 -0400 +Subject: [PATCH 074/312] [netdrv] net/mlx5: E-switch, Introduce and use vlan + rule config helper + +Message-id: <20200510150452.10307-27-ahleihel@redhat.com> +Patchwork-id: 306650 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 26/87] net/mlx5: E-switch, Introduce and use vlan rule config helper +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit fdde49e00b9d2041086568b52670043a8def96ff +Author: Parav Pandit +Date: Mon Oct 28 23:35:03 2019 +0000 + + net/mlx5: E-switch, Introduce and use vlan rule config helper + + Between legacy mode and switchdev mode, only two fields are changed, + vlan_tag and flow action. + Hence to avoid duplicte code between two modes, introduce and and use + helper function to configure allowed VLAN rule. + + While at it, get rid of duplicate debug message. + + Signed-off-by: Parav Pandit + Reviewed-by: Vu Pham + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 68 ++++++++++++++-------- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 ++ + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 54 ++++------------- + 3 files changed, 58 insertions(+), 68 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 386e82850ed5..773246f8e9c4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1323,6 +1323,43 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + return err; + } + ++int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport, ++ u16 vlan_id, u32 flow_action) ++{ ++ struct mlx5_flow_act flow_act = {}; ++ struct mlx5_flow_spec *spec; ++ int err = 0; ++ ++ if (vport->egress.allowed_vlan) ++ return -EEXIST; ++ ++ spec = kvzalloc(sizeof(*spec), GFP_KERNEL); ++ if (!spec) ++ return -ENOMEM; ++ ++ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); ++ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); ++ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); ++ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id); ++ ++ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; ++ flow_act.action = flow_action; ++ vport->egress.allowed_vlan = ++ mlx5_add_flow_rules(vport->egress.acl, spec, ++ &flow_act, NULL, 0); ++ if (IS_ERR(vport->egress.allowed_vlan)) { ++ err = PTR_ERR(vport->egress.allowed_vlan); ++ esw_warn(esw->dev, ++ "vport[%d] configure egress vlan rule failed, err(%d)\n", ++ vport->vport, err); ++ vport->egress.allowed_vlan = NULL; ++ } ++ ++ kvfree(spec); ++ return err; ++} ++ + static int esw_vport_egress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +@@ -1353,34 +1390,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, + "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", + vport->vport, vport->info.vlan, vport->info.qos); + +- spec = kvzalloc(sizeof(*spec), GFP_KERNEL); +- if (!spec) { +- err = -ENOMEM; +- goto out; +- } +- + /* Allowed vlan rule */ +- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); +- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); +- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); +- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); ++ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan, ++ MLX5_FLOW_CONTEXT_ACTION_ALLOW); ++ if (err) ++ return err; + +- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; +- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; +- vport->egress.allowed_vlan = +- mlx5_add_flow_rules(vport->egress.acl, spec, +- &flow_act, NULL, 0); +- if (IS_ERR(vport->egress.allowed_vlan)) { +- err = PTR_ERR(vport->egress.allowed_vlan); +- esw_warn(esw->dev, +- "vport[%d] configure egress allowed vlan rule failed, err(%d)\n", +- vport->vport, err); +- vport->egress.allowed_vlan = NULL; ++ /* Drop others rule (star rule) */ ++ spec = kvzalloc(sizeof(*spec), GFP_KERNEL); ++ if (!spec) + goto out; +- } + +- /* Drop others rule (star rule) */ +- memset(spec, 0, sizeof(*spec)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + /* Attach egress drop flow counter */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 436c633407d6..0cba334270d9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -423,6 +423,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, + int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, + u16 vport, u16 vlan, u8 qos, u8 set_flags); + ++int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport, ++ u16 vlan_id, u32 flow_action); ++ + static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, + u8 vlan_depth) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 9e64bdf17861..657aeea3f879 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1859,48 +1859,6 @@ void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + } + } + +-static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport) +-{ +- struct mlx5_flow_act flow_act = {0}; +- struct mlx5_flow_spec *spec; +- int err = 0; +- +- /* For prio tag mode, there is only 1 FTEs: +- * 1) prio tag packets - pop the prio tag VLAN, allow +- * Unmatched traffic is allowed by default +- */ +- esw_debug(esw->dev, +- "vport[%d] configure prio tag egress rules\n", vport->vport); +- +- spec = kvzalloc(sizeof(*spec), GFP_KERNEL); +- if (!spec) +- return -ENOMEM; +- +- /* prio tag vlan rule - pop it so VF receives untagged packets */ +- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); +- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag); +- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); +- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0); +- +- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; +- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | +- MLX5_FLOW_CONTEXT_ACTION_ALLOW; +- vport->egress.allowed_vlan = +- mlx5_add_flow_rules(vport->egress.acl, spec, +- &flow_act, NULL, 0); +- if (IS_ERR(vport->egress.allowed_vlan)) { +- err = PTR_ERR(vport->egress.allowed_vlan); +- esw_warn(esw->dev, +- "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n", +- vport->vport, err); +- vport->egress.allowed_vlan = NULL; +- } +- +- kvfree(spec); +- return err; +-} +- + static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +@@ -1956,9 +1914,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, + if (err) + return err; + +- esw_debug(esw->dev, "vport(%d) configure egress rules\n", vport->vport); ++ /* For prio tag mode, there is only 1 FTEs: ++ * 1) prio tag packets - pop the prio tag VLAN, allow ++ * Unmatched traffic is allowed by default ++ */ ++ esw_debug(esw->dev, ++ "vport[%d] configure prio tag egress rules\n", vport->vport); + +- err = esw_vport_egress_prio_tag_config(esw, vport); ++ /* prio tag vlan rule - pop it so VF receives untagged packets */ ++ err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0, ++ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | ++ MLX5_FLOW_CONTEXT_ACTION_ALLOW); + if (err) + esw_vport_disable_egress_acl(esw, vport); + +-- +2.13.6 + diff --git a/SOURCES/0075-netdrv-net-mlx5-Introduce-and-use-mlx5_esw_is_manage.patch b/SOURCES/0075-netdrv-net-mlx5-Introduce-and-use-mlx5_esw_is_manage.patch new file mode 100644 index 0000000..f92c3d0 --- /dev/null +++ b/SOURCES/0075-netdrv-net-mlx5-Introduce-and-use-mlx5_esw_is_manage.patch @@ -0,0 +1,123 @@ +From e537fcf35c72c352a3428f5ec0978fd66002f11f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:52 -0400 +Subject: [PATCH 075/312] [netdrv] net/mlx5: Introduce and use + mlx5_esw_is_manager_vport() + +Message-id: <20200510150452.10307-28-ahleihel@redhat.com> +Patchwork-id: 306652 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 27/87] net/mlx5: Introduce and use mlx5_esw_is_manager_vport() +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit ea2300e02a71207b11111a44cbe7185a94f78a72 +Author: Parav Pandit +Date: Mon Oct 28 23:35:05 2019 +0000 + + net/mlx5: Introduce and use mlx5_esw_is_manager_vport() + + Currently esw_enable_vport() does vport check for zero to enable drop + counters regardless of execution on ECPF/PF. + While esw_disable_vport() considers such scenario. + + To keep consistency across code for checking for manager_vport, + introduce and use mlx5_esw_is_manager_vport() to check if a specified + vport is eswitch manager vport or not. + + Signed-off-by: Parav Pandit + Reviewed-by: Vu Pham + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 13 +++++++------ + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 6 ++++++ + 2 files changed, 13 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 773246f8e9c4..76e2d5cba48b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -496,7 +496,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) + /* Skip mlx5_mpfs_add_mac for eswitch_managers, + * it is already done by its netdev in mlx5e_execute_l2_action + */ +- if (esw->manager_vport == vport) ++ if (mlx5_esw_is_manager_vport(esw, vport)) + goto fdb_add; + + err = mlx5_mpfs_add_mac(esw->dev, mac); +@@ -528,7 +528,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) + /* Skip mlx5_mpfs_del_mac for eswitch managers, + * it is already done by its netdev in mlx5e_execute_l2_action + */ +- if (!vaddr->mpfs || esw->manager_vport == vport) ++ if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport)) + goto fdb_del; + + err = mlx5_mpfs_del_mac(esw->dev, mac); +@@ -1634,7 +1634,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, + u16 vport_num = vport->vport; + int flags; + +- if (esw->manager_vport == vport_num) ++ if (mlx5_esw_is_manager_vport(esw, vport_num)) + return; + + mlx5_modify_vport_admin_state(esw->dev, +@@ -1708,7 +1708,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); + + /* Create steering drop counters for ingress and egress ACLs */ +- if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY) ++ if (!mlx5_esw_is_manager_vport(esw, vport_num) && ++ esw->mode == MLX5_ESWITCH_LEGACY) + esw_vport_create_drop_counters(vport); + + /* Restore old vport configuration */ +@@ -1726,7 +1727,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well + * in smartNIC as it's a vport group manager. + */ +- if (esw->manager_vport == vport_num || ++ if (mlx5_esw_is_manager_vport(esw, vport_num) || + (!vport_num && mlx5_core_is_ecpf(esw->dev))) + vport->info.trusted = true; + +@@ -1761,7 +1762,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, + esw_vport_change_handle_locked(vport); + vport->enabled_events = 0; + esw_vport_disable_qos(esw, vport); +- if (esw->manager_vport != vport_num && ++ if (!mlx5_esw_is_manager_vport(esw, vport_num) && + esw->mode == MLX5_ESWITCH_LEGACY) { + mlx5_modify_vport_admin_state(esw->dev, + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 0cba334270d9..a90af41d8220 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -468,6 +468,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) + /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ + void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); + ++static inline bool ++mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) ++{ ++ return esw->manager_vport == vport_num; ++} ++ + static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) + { + return mlx5_core_is_ecpf_esw_manager(dev) ? +-- +2.13.6 + diff --git a/SOURCES/0076-netdrv-net-mlx5-Move-metdata-fields-under-offloads-s.patch b/SOURCES/0076-netdrv-net-mlx5-Move-metdata-fields-under-offloads-s.patch new file mode 100644 index 0000000..952e4ee --- /dev/null +++ b/SOURCES/0076-netdrv-net-mlx5-Move-metdata-fields-under-offloads-s.patch @@ -0,0 +1,142 @@ +From 18c6aef4724e84bf5304789fc51ce44c76cccd72 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:53 -0400 +Subject: [PATCH 076/312] [netdrv] net/mlx5: Move metdata fields under offloads + structure + +Message-id: <20200510150452.10307-29-ahleihel@redhat.com> +Patchwork-id: 306651 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 28/87] net/mlx5: Move metdata fields under offloads structure +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit d68316b5a1046b489097c5e5e24139548b79971f +Author: Parav Pandit +Date: Mon Oct 28 23:35:10 2019 +0000 + + net/mlx5: Move metdata fields under offloads structure + + Metadata fields are offload mode specific. + To improve code readability, move metadata under offloads structure. + + Signed-off-by: Parav Pandit + Reviewed-by: Vu Pham + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 8 ++++++ + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 33 +++++++++++----------- + 2 files changed, 25 insertions(+), 16 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index a90af41d8220..f21d528057fa 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -69,11 +69,19 @@ struct vport_ingress { + struct mlx5_flow_group *allow_spoofchk_only_grp; + struct mlx5_flow_group *allow_untagged_only_grp; + struct mlx5_flow_group *drop_grp; ++#ifdef __GENKSYMS__ + struct mlx5_modify_hdr *modify_metadata; + struct mlx5_flow_handle *modify_metadata_rule; ++#endif + struct mlx5_flow_handle *allow_rule; + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; ++#ifndef __GENKSYMS__ ++ struct { ++ struct mlx5_modify_hdr *modify_metadata; ++ struct mlx5_flow_handle *modify_metadata_rule; ++ } offloads; ++#endif + }; + + struct vport_egress { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 657aeea3f879..00d126fa6e02 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1780,9 +1780,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, + flow_act.vlan[0].vid = 0; + flow_act.vlan[0].prio = 0; + +- if (vport->ingress.modify_metadata_rule) { ++ if (vport->ingress.offloads.modify_metadata_rule) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; +- flow_act.modify_hdr = vport->ingress.modify_metadata; ++ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; + } + + vport->ingress.allow_rule = +@@ -1818,11 +1818,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + MLX5_SET(set_action_in, action, data, + mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport)); + +- vport->ingress.modify_metadata = ++ vport->ingress.offloads.modify_metadata = + mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, + 1, action); +- if (IS_ERR(vport->ingress.modify_metadata)) { +- err = PTR_ERR(vport->ingress.modify_metadata); ++ if (IS_ERR(vport->ingress.offloads.modify_metadata)) { ++ err = PTR_ERR(vport->ingress.offloads.modify_metadata); + esw_warn(esw->dev, + "failed to alloc modify header for vport %d ingress acl (%d)\n", + vport->vport, err); +@@ -1830,32 +1830,33 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; +- flow_act.modify_hdr = vport->ingress.modify_metadata; +- vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl, +- &spec, &flow_act, NULL, 0); +- if (IS_ERR(vport->ingress.modify_metadata_rule)) { +- err = PTR_ERR(vport->ingress.modify_metadata_rule); ++ flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; ++ vport->ingress.offloads.modify_metadata_rule = ++ mlx5_add_flow_rules(vport->ingress.acl, ++ &spec, &flow_act, NULL, 0); ++ if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) { ++ err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule); + esw_warn(esw->dev, + "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", + vport->vport, err); +- vport->ingress.modify_metadata_rule = NULL; ++ vport->ingress.offloads.modify_metadata_rule = NULL; + goto out; + } + + out: + if (err) +- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); ++ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); + return err; + } + + void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +- if (vport->ingress.modify_metadata_rule) { +- mlx5_del_flow_rules(vport->ingress.modify_metadata_rule); +- mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata); ++ if (vport->ingress.offloads.modify_metadata_rule) { ++ mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule); ++ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); + +- vport->ingress.modify_metadata_rule = NULL; ++ vport->ingress.offloads.modify_metadata_rule = NULL; + } + } + +-- +2.13.6 + diff --git a/SOURCES/0077-netdrv-net-mlx5-Move-legacy-drop-counter-and-rule-un.patch b/SOURCES/0077-netdrv-net-mlx5-Move-legacy-drop-counter-and-rule-un.patch new file mode 100644 index 0000000..e5eae25 --- /dev/null +++ b/SOURCES/0077-netdrv-net-mlx5-Move-legacy-drop-counter-and-rule-un.patch @@ -0,0 +1,275 @@ +From 51391126c3b108d32bcfbd30f7bce65ae5049097 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:54 -0400 +Subject: [PATCH 077/312] [netdrv] net/mlx5: Move legacy drop counter and rule + under legacy structure + +Message-id: <20200510150452.10307-30-ahleihel@redhat.com> +Patchwork-id: 306653 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 29/87] net/mlx5: Move legacy drop counter and rule under legacy structure +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 853b53520c9d11db7652e3603665b0ad475741a5 +Author: Parav Pandit +Date: Mon Oct 28 23:35:11 2019 +0000 + + net/mlx5: Move legacy drop counter and rule under legacy structure + + To improve code readability, move legacy drop counters and droup rule + under legacy structure. + + While at it, + (a) prefix drop flow counters helper with legacy_. + (b) nullify the rule pointers only if they were valid. + + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 82 ++++++++++++----------- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 13 ++++ + 2 files changed, 55 insertions(+), 40 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 76e2d5cba48b..54b5f290ab9d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1035,14 +1035,15 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, + void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +- if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) ++ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { + mlx5_del_flow_rules(vport->egress.allowed_vlan); ++ vport->egress.allowed_vlan = NULL; ++ } + +- if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) +- mlx5_del_flow_rules(vport->egress.drop_rule); +- +- vport->egress.allowed_vlan = NULL; +- vport->egress.drop_rule = NULL; ++ if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) { ++ mlx5_del_flow_rules(vport->egress.legacy.drop_rule); ++ vport->egress.legacy.drop_rule = NULL; ++ } + } + + void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, +@@ -1197,14 +1198,15 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +- if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) +- mlx5_del_flow_rules(vport->ingress.drop_rule); ++ if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_rule)) { ++ mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); ++ vport->ingress.legacy.drop_rule = NULL; ++ } + +- if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) ++ if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) { + mlx5_del_flow_rules(vport->ingress.allow_rule); +- +- vport->ingress.drop_rule = NULL; +- vport->ingress.allow_rule = NULL; ++ vport->ingress.allow_rule = NULL; ++ } + + esw_vport_del_ingress_acl_modify_metadata(esw, vport); + } +@@ -1233,7 +1235,7 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, + static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +- struct mlx5_fc *counter = vport->ingress.drop_counter; ++ struct mlx5_fc *counter = vport->ingress.legacy.drop_counter; + struct mlx5_flow_destination drop_ctr_dst = {0}; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_flow_act flow_act = {0}; +@@ -1304,15 +1306,15 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + dst = &drop_ctr_dst; + dest_num++; + } +- vport->ingress.drop_rule = ++ vport->ingress.legacy.drop_rule = + mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, dst, dest_num); +- if (IS_ERR(vport->ingress.drop_rule)) { +- err = PTR_ERR(vport->ingress.drop_rule); ++ if (IS_ERR(vport->ingress.legacy.drop_rule)) { ++ err = PTR_ERR(vport->ingress.legacy.drop_rule); + esw_warn(esw->dev, + "vport[%d] configure ingress drop rule, err(%d)\n", + vport->vport, err); +- vport->ingress.drop_rule = NULL; ++ vport->ingress.legacy.drop_rule = NULL; + goto out; + } + +@@ -1363,7 +1365,7 @@ int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, + static int esw_vport_egress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +- struct mlx5_fc *counter = vport->egress.drop_counter; ++ struct mlx5_fc *counter = vport->egress.legacy.drop_counter; + struct mlx5_flow_destination drop_ctr_dst = {0}; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_flow_act flow_act = {0}; +@@ -1411,15 +1413,15 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, + dst = &drop_ctr_dst; + dest_num++; + } +- vport->egress.drop_rule = ++ vport->egress.legacy.drop_rule = + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, dst, dest_num); +- if (IS_ERR(vport->egress.drop_rule)) { +- err = PTR_ERR(vport->egress.drop_rule); ++ if (IS_ERR(vport->egress.legacy.drop_rule)) { ++ err = PTR_ERR(vport->egress.legacy.drop_rule); + esw_warn(esw->dev, + "vport[%d] configure egress drop rule failed, err(%d)\n", + vport->vport, err); +- vport->egress.drop_rule = NULL; ++ vport->egress.legacy.drop_rule = NULL; + } + out: + kvfree(spec); +@@ -1662,39 +1664,39 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, + } + } + +-static void esw_vport_create_drop_counters(struct mlx5_vport *vport) ++static void esw_legacy_vport_create_drop_counters(struct mlx5_vport *vport) + { + struct mlx5_core_dev *dev = vport->dev; + + if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) { +- vport->ingress.drop_counter = mlx5_fc_create(dev, false); +- if (IS_ERR(vport->ingress.drop_counter)) { ++ vport->ingress.legacy.drop_counter = mlx5_fc_create(dev, false); ++ if (IS_ERR(vport->ingress.legacy.drop_counter)) { + esw_warn(dev, + "vport[%d] configure ingress drop rule counter failed\n", + vport->vport); +- vport->ingress.drop_counter = NULL; ++ vport->ingress.legacy.drop_counter = NULL; + } + } + + if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) { +- vport->egress.drop_counter = mlx5_fc_create(dev, false); +- if (IS_ERR(vport->egress.drop_counter)) { ++ vport->egress.legacy.drop_counter = mlx5_fc_create(dev, false); ++ if (IS_ERR(vport->egress.legacy.drop_counter)) { + esw_warn(dev, + "vport[%d] configure egress drop rule counter failed\n", + vport->vport); +- vport->egress.drop_counter = NULL; ++ vport->egress.legacy.drop_counter = NULL; + } + } + } + +-static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) ++static void esw_legacy_vport_destroy_drop_counters(struct mlx5_vport *vport) + { + struct mlx5_core_dev *dev = vport->dev; + +- if (vport->ingress.drop_counter) +- mlx5_fc_destroy(dev, vport->ingress.drop_counter); +- if (vport->egress.drop_counter) +- mlx5_fc_destroy(dev, vport->egress.drop_counter); ++ if (vport->ingress.legacy.drop_counter) ++ mlx5_fc_destroy(dev, vport->ingress.legacy.drop_counter); ++ if (vport->egress.legacy.drop_counter) ++ mlx5_fc_destroy(dev, vport->egress.legacy.drop_counter); + } + + static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, +@@ -1710,7 +1712,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + /* Create steering drop counters for ingress and egress ACLs */ + if (!mlx5_esw_is_manager_vport(esw, vport_num) && + esw->mode == MLX5_ESWITCH_LEGACY) +- esw_vport_create_drop_counters(vport); ++ esw_legacy_vport_create_drop_counters(vport); + + /* Restore old vport configuration */ + esw_apply_vport_conf(esw, vport); +@@ -1770,7 +1772,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, + MLX5_VPORT_ADMIN_STATE_DOWN); + esw_vport_disable_egress_acl(esw, vport); + esw_vport_disable_ingress_acl(esw, vport); +- esw_vport_destroy_drop_counters(vport); ++ esw_legacy_vport_destroy_drop_counters(vport); + } + esw->enabled_vports--; + mutex_unlock(&esw->state_lock); +@@ -2503,12 +2505,12 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, + if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY) + return 0; + +- if (vport->egress.drop_counter) +- mlx5_fc_query(dev, vport->egress.drop_counter, ++ if (vport->egress.legacy.drop_counter) ++ mlx5_fc_query(dev, vport->egress.legacy.drop_counter, + &stats->rx_dropped, &bytes); + +- if (vport->ingress.drop_counter) +- mlx5_fc_query(dev, vport->ingress.drop_counter, ++ if (vport->ingress.legacy.drop_counter) ++ mlx5_fc_query(dev, vport->ingress.legacy.drop_counter, + &stats->tx_dropped, &bytes); + + if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index f21d528057fa..f12d446e2c87 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -74,10 +74,16 @@ struct vport_ingress { + struct mlx5_flow_handle *modify_metadata_rule; + #endif + struct mlx5_flow_handle *allow_rule; ++#ifdef __GENKSYMS__ + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; ++#endif + #ifndef __GENKSYMS__ + struct { ++ struct mlx5_flow_handle *drop_rule; ++ struct mlx5_fc *drop_counter; ++ } legacy; ++ struct { + struct mlx5_modify_hdr *modify_metadata; + struct mlx5_flow_handle *modify_metadata_rule; + } offloads; +@@ -89,8 +95,15 @@ struct vport_egress { + struct mlx5_flow_group *allowed_vlans_grp; + struct mlx5_flow_group *drop_grp; + struct mlx5_flow_handle *allowed_vlan; ++#ifdef __GENKSYMS__ + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; ++#else ++ struct { ++ struct mlx5_flow_handle *drop_rule; ++ struct mlx5_fc *drop_counter; ++ } legacy; ++#endif + }; + + struct mlx5_vport_drop_stats { +-- +2.13.6 + diff --git a/SOURCES/0078-netdrv-net-mlx5-Tide-up-state_lock-and-vport-enabled.patch b/SOURCES/0078-netdrv-net-mlx5-Tide-up-state_lock-and-vport-enabled.patch new file mode 100644 index 0000000..8d429eb --- /dev/null +++ b/SOURCES/0078-netdrv-net-mlx5-Tide-up-state_lock-and-vport-enabled.patch @@ -0,0 +1,119 @@ +From c5c504f4dc8c98a1c62d8cee2cf175097fb68ff9 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:55 -0400 +Subject: [PATCH 078/312] [netdrv] net/mlx5: Tide up state_lock and vport + enabled flag usage + +Message-id: <20200510150452.10307-31-ahleihel@redhat.com> +Patchwork-id: 306654 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 30/87] net/mlx5: Tide up state_lock and vport enabled flag usage +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 77b094305b1ba23e716bb34d3e33c8fe30a5f487 +Author: Parav Pandit +Date: Mon Oct 28 23:35:13 2019 +0000 + + net/mlx5: Tide up state_lock and vport enabled flag usage + + When eswitch is disabled, vport event handler is unregistered. + This unregistration already synchronizes with running EQ event handler + in below code flow. + + mlx5_eswitch_disable() + mlx5_eswitch_event_handlers_unregister() + mlx5_eq_notifier_unregister() + atomic_notifier_chain_unregister() + synchronize_rcu() + + notifier_callchain + eswitch_vport_event() + queue_work() + + Additionally vport->enabled flag is set under state_lock during + esw_enable_vport() but is not read under state_lock in + (a) esw_disable_vport() and (b) under atomic context + eswitch_vport_event(). + + It is also necessary to synchronize with already scheduled vport event. + This is already achieved using below sequence. + + mlx5_eswitch_event_handlers_unregister() + [..] + flush_workqueue() + + Hence, + (a) Remove vport->enabled check in eswitch_vport_event() which + doesn't make any sense. + (b) Remove redundant flush_workqueue() on every vport disable. + (c) Keep esw_disable_vport() symmetric with esw_enable_vport() for + state_lock. + + Signed-off-by: Parav Pandit + Reviewed-by: Vu Pham + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 54b5f290ab9d..8067667fd59e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1745,18 +1745,16 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, + { + u16 vport_num = vport->vport; + ++ mutex_lock(&esw->state_lock); + if (!vport->enabled) +- return; ++ goto done; + + esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); + /* Mark this vport as disabled to discard new events */ + vport->enabled = false; + +- /* Wait for current already scheduled events to complete */ +- flush_workqueue(esw->work_queue); + /* Disable events from this vport */ + arm_vport_context_events_cmd(esw->dev, vport->vport, 0); +- mutex_lock(&esw->state_lock); + /* We don't assume VFs will cleanup after themselves. + * Calling vport change handler while vport is disabled will cleanup + * the vport resources. +@@ -1775,6 +1773,8 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, + esw_legacy_vport_destroy_drop_counters(vport); + } + esw->enabled_vports--; ++ ++done: + mutex_unlock(&esw->state_lock); + } + +@@ -1788,12 +1788,8 @@ static int eswitch_vport_event(struct notifier_block *nb, + + vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); + vport = mlx5_eswitch_get_vport(esw, vport_num); +- if (IS_ERR(vport)) +- return NOTIFY_OK; +- +- if (vport->enabled) ++ if (!IS_ERR(vport)) + queue_work(esw->work_queue, &vport->vport_change_handler); +- + return NOTIFY_OK; + } + +-- +2.13.6 + diff --git a/SOURCES/0079-netdrv-net-mlx5-E-switch-Prepare-code-to-handle-vpor.patch b/SOURCES/0079-netdrv-net-mlx5-E-switch-Prepare-code-to-handle-vpor.patch new file mode 100644 index 0000000..77480b7 --- /dev/null +++ b/SOURCES/0079-netdrv-net-mlx5-E-switch-Prepare-code-to-handle-vpor.patch @@ -0,0 +1,194 @@ +From a549a6c259fc46e49b55d5b7ce5ad1478d9a80b8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:56 -0400 +Subject: [PATCH 079/312] [netdrv] net/mlx5: E-switch, Prepare code to handle + vport enable error + +Message-id: <20200510150452.10307-32-ahleihel@redhat.com> +Patchwork-id: 306655 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 31/87] net/mlx5: E-switch, Prepare code to handle vport enable error +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 925a6acc77a70f8b5bfd0df75e36557aa400b0a0 +Author: Parav Pandit +Date: Mon Oct 28 23:35:15 2019 +0000 + + net/mlx5: E-switch, Prepare code to handle vport enable error + + In subsequent patch, esw_enable_vport() could fail and return error. + Prepare code to handle such error. + + Signed-off-by: Parav Pandit + Reviewed-by: Vu Pham + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 62 ++++++++++++++++------ + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 +- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 5 +- + 3 files changed, 50 insertions(+), 19 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 8067667fd59e..2ecb993545f9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -443,6 +443,13 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) + return err; + } + ++static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) ++{ ++ esw_cleanup_vepa_rules(esw); ++ esw_destroy_legacy_fdb_table(esw); ++ esw_destroy_legacy_vepa_table(esw); ++} ++ + #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \ + MLX5_VPORT_MC_ADDR_CHANGE | \ + MLX5_VPORT_PROMISC_CHANGE) +@@ -459,15 +466,10 @@ static int esw_legacy_enable(struct mlx5_eswitch *esw) + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + +- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); +- return 0; +-} +- +-static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) +-{ +- esw_cleanup_vepa_rules(esw); +- esw_destroy_legacy_fdb_table(esw); +- esw_destroy_legacy_vepa_table(esw); ++ ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); ++ if (ret) ++ esw_destroy_legacy_table(esw); ++ return ret; + } + + static void esw_legacy_disable(struct mlx5_eswitch *esw) +@@ -1699,8 +1701,8 @@ static void esw_legacy_vport_destroy_drop_counters(struct mlx5_vport *vport) + mlx5_fc_destroy(dev, vport->egress.legacy.drop_counter); + } + +-static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, +- enum mlx5_eswitch_vport_event enabled_events) ++static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, ++ enum mlx5_eswitch_vport_event enabled_events) + { + u16 vport_num = vport->vport; + +@@ -1738,6 +1740,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + esw->enabled_vports++; + esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); + mutex_unlock(&esw->state_lock); ++ return 0; + } + + static void esw_disable_vport(struct mlx5_eswitch *esw, +@@ -1862,26 +1865,51 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) + /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs + * whichever are present on the eswitch. + */ +-void ++int + mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events) + { + struct mlx5_vport *vport; ++ int num_vfs; ++ int ret; + int i; + + /* Enable PF vport */ + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); +- esw_enable_vport(esw, vport, enabled_events); ++ ret = esw_enable_vport(esw, vport, enabled_events); ++ if (ret) ++ return ret; + +- /* Enable ECPF vports */ ++ /* Enable ECPF vport */ + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); +- esw_enable_vport(esw, vport, enabled_events); ++ ret = esw_enable_vport(esw, vport, enabled_events); ++ if (ret) ++ goto ecpf_err; + } + + /* Enable VF vports */ +- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) +- esw_enable_vport(esw, vport, enabled_events); ++ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { ++ ret = esw_enable_vport(esw, vport, enabled_events); ++ if (ret) ++ goto vf_err; ++ } ++ return 0; ++ ++vf_err: ++ num_vfs = i - 1; ++ mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs) ++ esw_disable_vport(esw, vport); ++ ++ if (mlx5_ecpf_vport_exists(esw->dev)) { ++ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); ++ esw_disable_vport(esw, vport); ++ } ++ ++ecpf_err: ++ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); ++ esw_disable_vport(esw, vport); ++ return ret; + } + + /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index f12d446e2c87..d29df0c302f2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -626,7 +626,7 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); + void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); + int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); + +-void ++int + mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events); + void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 00d126fa6e02..b33543c5f68f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -2158,7 +2158,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; + +- mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); ++ err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); ++ if (err) ++ goto err_vports; + + err = esw_offloads_load_all_reps(esw); + if (err) +@@ -2171,6 +2173,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) + + err_reps: + mlx5_eswitch_disable_pf_vf_vports(esw); ++err_vports: + esw_set_passing_vport_metadata(esw, false); + err_vport_metadata: + esw_offloads_steering_cleanup(esw); +-- +2.13.6 + diff --git a/SOURCES/0080-netdrv-net-mlx5-E-switch-Legacy-introduce-and-use-pe.patch b/SOURCES/0080-netdrv-net-mlx5-E-switch-Legacy-introduce-and-use-pe.patch new file mode 100644 index 0000000..58716c5 --- /dev/null +++ b/SOURCES/0080-netdrv-net-mlx5-E-switch-Legacy-introduce-and-use-pe.patch @@ -0,0 +1,162 @@ +From 13d9574432dadefa6a706a4d523082b56bb4d200 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:57 -0400 +Subject: [PATCH 080/312] [netdrv] net/mlx5: E-switch, Legacy introduce and use + per vport acl tables APIs + +Message-id: <20200510150452.10307-33-ahleihel@redhat.com> +Patchwork-id: 306657 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 32/87] net/mlx5: E-switch, Legacy introduce and use per vport acl tables APIs +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit f5d0c01d65adba2b898836894d200e85c8a8def3 +Author: Parav Pandit +Date: Mon Oct 28 23:35:17 2019 +0000 + + net/mlx5: E-switch, Legacy introduce and use per vport acl tables APIs + + Introduce and use per vport ACL tables creation and destroy APIs, so that + subsequently patch can use them during enabling/disabling a vport in + unified way for legacy vs offloads mode. + + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 73 +++++++++++++++++++---- + 1 file changed, 60 insertions(+), 13 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 2ecb993545f9..f854750a15c5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1658,12 +1658,6 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, + SET_VLAN_STRIP | SET_VLAN_INSERT : 0; + modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos, + flags); +- +- /* Only legacy mode needs ACLs */ +- if (esw->mode == MLX5_ESWITCH_LEGACY) { +- esw_vport_ingress_config(esw, vport); +- esw_vport_egress_config(esw, vport); +- } + } + + static void esw_legacy_vport_create_drop_counters(struct mlx5_vport *vport) +@@ -1701,10 +1695,59 @@ static void esw_legacy_vport_destroy_drop_counters(struct mlx5_vport *vport) + mlx5_fc_destroy(dev, vport->egress.legacy.drop_counter); + } + ++static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++{ ++ int ret; ++ ++ /* Only non manager vports need ACL in legacy mode */ ++ if (mlx5_esw_is_manager_vport(esw, vport->vport)) ++ return 0; ++ ++ ret = esw_vport_ingress_config(esw, vport); ++ if (ret) ++ return ret; ++ ++ ret = esw_vport_egress_config(esw, vport); ++ if (ret) ++ esw_vport_disable_ingress_acl(esw, vport); ++ ++ return ret; ++} ++ ++static int esw_vport_setup_acl(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++{ ++ if (esw->mode == MLX5_ESWITCH_LEGACY) ++ return esw_vport_create_legacy_acl_tables(esw, vport); ++ ++ return 0; ++} ++ ++static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++ ++{ ++ if (mlx5_esw_is_manager_vport(esw, vport->vport)) ++ return; ++ ++ esw_vport_disable_egress_acl(esw, vport); ++ esw_vport_disable_ingress_acl(esw, vport); ++ esw_legacy_vport_destroy_drop_counters(vport); ++} ++ ++static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++{ ++ if (esw->mode == MLX5_ESWITCH_LEGACY) ++ esw_vport_destroy_legacy_acl_tables(esw, vport); ++} ++ + static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + enum mlx5_eswitch_vport_event enabled_events) + { + u16 vport_num = vport->vport; ++ int ret; + + mutex_lock(&esw->state_lock); + WARN_ON(vport->enabled); +@@ -1719,6 +1762,10 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + /* Restore old vport configuration */ + esw_apply_vport_conf(esw, vport); + ++ ret = esw_vport_setup_acl(esw, vport); ++ if (ret) ++ goto done; ++ + /* Attach vport to the eswitch rate limiter */ + if (esw_vport_enable_qos(esw, vport, vport->info.max_rate, + vport->qos.bw_share)) +@@ -1739,8 +1786,9 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + + esw->enabled_vports++; + esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); ++done: + mutex_unlock(&esw->state_lock); +- return 0; ++ return ret; + } + + static void esw_disable_vport(struct mlx5_eswitch *esw, +@@ -1765,16 +1813,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, + esw_vport_change_handle_locked(vport); + vport->enabled_events = 0; + esw_vport_disable_qos(esw, vport); +- if (!mlx5_esw_is_manager_vport(esw, vport_num) && +- esw->mode == MLX5_ESWITCH_LEGACY) { ++ ++ if (!mlx5_esw_is_manager_vport(esw, vport->vport) && ++ esw->mode == MLX5_ESWITCH_LEGACY) + mlx5_modify_vport_admin_state(esw->dev, + MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, + vport_num, 1, + MLX5_VPORT_ADMIN_STATE_DOWN); +- esw_vport_disable_egress_acl(esw, vport); +- esw_vport_disable_ingress_acl(esw, vport); +- esw_legacy_vport_destroy_drop_counters(vport); +- } ++ ++ esw_vport_cleanup_acl(esw, vport); + esw->enabled_vports--; + + done: +-- +2.13.6 + diff --git a/SOURCES/0081-netdrv-net-mlx5-Move-ACL-drop-counters-life-cycle-cl.patch b/SOURCES/0081-netdrv-net-mlx5-Move-ACL-drop-counters-life-cycle-cl.patch new file mode 100644 index 0000000..6e8b794 --- /dev/null +++ b/SOURCES/0081-netdrv-net-mlx5-Move-ACL-drop-counters-life-cycle-cl.patch @@ -0,0 +1,161 @@ +From 5e82bd06ba83b431da61f9c9b735dd9f427973ec Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:58 -0400 +Subject: [PATCH 081/312] [netdrv] net/mlx5: Move ACL drop counters life cycle + close to ACL lifecycle + +Message-id: <20200510150452.10307-34-ahleihel@redhat.com> +Patchwork-id: 306656 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 33/87] net/mlx5: Move ACL drop counters life cycle close to ACL lifecycle +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit b7752f8341c4fecc4720fbd58f868e114a57fdea +Author: Parav Pandit +Date: Mon Oct 28 23:35:19 2019 +0000 + + net/mlx5: Move ACL drop counters life cycle close to ACL lifecycle + + It is better to create/destroy ACL related drop counters where the actual + drop rule ACLs are created/destroyed, so that ACL configuration is self + contained for ingress and egress. + + Signed-off-by: Parav Pandit + Reviewed-by: Vu Pham + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 74 +++++++++++------------ + 1 file changed, 35 insertions(+), 39 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index f854750a15c5..2d094bb7b8a1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1660,58 +1660,55 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, + flags); + } + +-static void esw_legacy_vport_create_drop_counters(struct mlx5_vport *vport) ++static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) + { +- struct mlx5_core_dev *dev = vport->dev; ++ int ret; + +- if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) { +- vport->ingress.legacy.drop_counter = mlx5_fc_create(dev, false); ++ /* Only non manager vports need ACL in legacy mode */ ++ if (mlx5_esw_is_manager_vport(esw, vport->vport)) ++ return 0; ++ ++ if (!mlx5_esw_is_manager_vport(esw, vport->vport) && ++ MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { ++ vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); + if (IS_ERR(vport->ingress.legacy.drop_counter)) { +- esw_warn(dev, ++ esw_warn(esw->dev, + "vport[%d] configure ingress drop rule counter failed\n", + vport->vport); + vport->ingress.legacy.drop_counter = NULL; + } + } + +- if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) { +- vport->egress.legacy.drop_counter = mlx5_fc_create(dev, false); ++ ret = esw_vport_ingress_config(esw, vport); ++ if (ret) ++ goto ingress_err; ++ ++ if (!mlx5_esw_is_manager_vport(esw, vport->vport) && ++ MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) { ++ vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false); + if (IS_ERR(vport->egress.legacy.drop_counter)) { +- esw_warn(dev, ++ esw_warn(esw->dev, + "vport[%d] configure egress drop rule counter failed\n", + vport->vport); + vport->egress.legacy.drop_counter = NULL; + } + } +-} +- +-static void esw_legacy_vport_destroy_drop_counters(struct mlx5_vport *vport) +-{ +- struct mlx5_core_dev *dev = vport->dev; +- +- if (vport->ingress.legacy.drop_counter) +- mlx5_fc_destroy(dev, vport->ingress.legacy.drop_counter); +- if (vport->egress.legacy.drop_counter) +- mlx5_fc_destroy(dev, vport->egress.legacy.drop_counter); +-} +- +-static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport) +-{ +- int ret; +- +- /* Only non manager vports need ACL in legacy mode */ +- if (mlx5_esw_is_manager_vport(esw, vport->vport)) +- return 0; +- +- ret = esw_vport_ingress_config(esw, vport); +- if (ret) +- return ret; + + ret = esw_vport_egress_config(esw, vport); + if (ret) +- esw_vport_disable_ingress_acl(esw, vport); ++ goto egress_err; ++ ++ return 0; + ++egress_err: ++ esw_vport_disable_ingress_acl(esw, vport); ++ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); ++ vport->egress.legacy.drop_counter = NULL; ++ ++ingress_err: ++ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); ++ vport->ingress.legacy.drop_counter = NULL; + return ret; + } + +@@ -1732,8 +1729,12 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, + return; + + esw_vport_disable_egress_acl(esw, vport); ++ mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); ++ vport->egress.legacy.drop_counter = NULL; ++ + esw_vport_disable_ingress_acl(esw, vport); +- esw_legacy_vport_destroy_drop_counters(vport); ++ mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); ++ vport->ingress.legacy.drop_counter = NULL; + } + + static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, +@@ -1754,11 +1755,6 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + + esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); + +- /* Create steering drop counters for ingress and egress ACLs */ +- if (!mlx5_esw_is_manager_vport(esw, vport_num) && +- esw->mode == MLX5_ESWITCH_LEGACY) +- esw_legacy_vport_create_drop_counters(vport); +- + /* Restore old vport configuration */ + esw_apply_vport_conf(esw, vport); + +-- +2.13.6 + diff --git a/SOURCES/0082-netdrv-net-mlx5-E-switch-Offloads-introduce-and-use-.patch b/SOURCES/0082-netdrv-net-mlx5-E-switch-Offloads-introduce-and-use-.patch new file mode 100644 index 0000000..b5d2b52 --- /dev/null +++ b/SOURCES/0082-netdrv-net-mlx5-E-switch-Offloads-introduce-and-use-.patch @@ -0,0 +1,124 @@ +From d8608d0e2b0bcaa440ea7bcc65ef20699846a27e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:03:59 -0400 +Subject: [PATCH 082/312] [netdrv] net/mlx5: E-switch, Offloads introduce and + use per vport acl tables APIs + +Message-id: <20200510150452.10307-35-ahleihel@redhat.com> +Patchwork-id: 306658 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 34/87] net/mlx5: E-switch, Offloads introduce and use per vport acl tables APIs +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 89a0f1fb16adca959ea1485a856fbcfcd1d24208 +Author: Parav Pandit +Date: Mon Oct 28 23:35:20 2019 +0000 + + net/mlx5: E-switch, Offloads introduce and use per vport acl tables APIs + + Introduce and use per vport ACL tables creation and destroy APIs, so that + subsequently patch can use them during enabling/disabling a vport. + + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 49 ++++++++++++++-------- + 1 file changed, 32 insertions(+), 17 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index b33543c5f68f..756031dcf056 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1964,6 +1964,32 @@ static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw) + esw_check_vport_match_metadata_supported(esw); + } + ++static int ++esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++{ ++ int err; ++ ++ err = esw_vport_ingress_config(esw, vport); ++ if (err) ++ return err; ++ ++ if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { ++ err = esw_vport_egress_config(esw, vport); ++ if (err) ++ esw_vport_disable_ingress_acl(esw, vport); ++ } ++ return err; ++} ++ ++static void ++esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++{ ++ esw_vport_disable_egress_acl(esw, vport); ++ esw_vport_disable_ingress_acl(esw, vport); ++} ++ + static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) + { + struct mlx5_vport *vport; +@@ -1974,15 +2000,9 @@ static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + + mlx5_esw_for_all_vports(esw, i, vport) { +- err = esw_vport_ingress_config(esw, vport); ++ err = esw_vport_create_offloads_acl_tables(esw, vport); + if (err) +- goto err_ingress; +- +- if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { +- err = esw_vport_egress_config(esw, vport); +- if (err) +- goto err_egress; +- } ++ goto err_acl_table; + } + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) +@@ -1990,13 +2010,10 @@ static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) + + return 0; + +-err_egress: +- esw_vport_disable_ingress_acl(esw, vport); +-err_ingress: ++err_acl_table: + for (j = MLX5_VPORT_PF; j < i; j++) { + vport = &esw->vports[j]; +- esw_vport_disable_egress_acl(esw, vport); +- esw_vport_disable_ingress_acl(esw, vport); ++ esw_vport_destroy_offloads_acl_tables(esw, vport); + } + + return err; +@@ -2007,10 +2024,8 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) + struct mlx5_vport *vport; + int i; + +- mlx5_esw_for_all_vports(esw, i, vport) { +- esw_vport_disable_egress_acl(esw, vport); +- esw_vport_disable_ingress_acl(esw, vport); +- } ++ mlx5_esw_for_all_vports(esw, i, vport) ++ esw_vport_destroy_offloads_acl_tables(esw, vport); + + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; + } +-- +2.13.6 + diff --git a/SOURCES/0083-netdrv-net-mlx5-E-switch-Offloads-shift-ACL-programm.patch b/SOURCES/0083-netdrv-net-mlx5-E-switch-Offloads-shift-ACL-programm.patch new file mode 100644 index 0000000..5c2f89e --- /dev/null +++ b/SOURCES/0083-netdrv-net-mlx5-E-switch-Offloads-shift-ACL-programm.patch @@ -0,0 +1,198 @@ +From 31d317151ad03b5040aa5ee117208ff4b688095b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:00 -0400 +Subject: [PATCH 083/312] [netdrv] net/mlx5: E-switch, Offloads shift ACL + programming during enable/disable vport + +Message-id: <20200510150452.10307-36-ahleihel@redhat.com> +Patchwork-id: 306659 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 35/87] net/mlx5: E-switch, Offloads shift ACL programming during enable/disable vport +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c + Context diff due to already backported commit + 1e62e222db2e ("net/mlx5: E-Switch, Use vport metadata matching only when mandatory") + ---> In function esw_create_uplink_offloads_acl_tables, we now call esw_use_vport_metadata + instead of esw_check_vport_match_metadata_supported. + +commit 748da30b376e034ae54b53e7e38e15cfa2bf4dda +Author: Vu Pham +Date: Mon Oct 28 23:35:22 2019 +0000 + + net/mlx5: E-switch, Offloads shift ACL programming during enable/disable vport + + Currently legacy mode enables ACL while enabling vport, while offloads + mode enable ACL when moving to offloads mode. + + Bring consistency to both modes by enabling/disabling ACL when + enabling/disabling a vport. + + It also eliminates creating ingress ACL table on unused ECPF vport in + offloads mode. + + Signed-off-by: Vu Pham + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 6 ++-- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 7 ++++ + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 42 +++++++--------------- + 3 files changed, 24 insertions(+), 31 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 2d094bb7b8a1..91b5ec6c3e13 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1717,8 +1717,8 @@ static int esw_vport_setup_acl(struct mlx5_eswitch *esw, + { + if (esw->mode == MLX5_ESWITCH_LEGACY) + return esw_vport_create_legacy_acl_tables(esw, vport); +- +- return 0; ++ else ++ return esw_vport_create_offloads_acl_tables(esw, vport); + } + + static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, +@@ -1742,6 +1742,8 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw, + { + if (esw->mode == MLX5_ESWITCH_LEGACY) + esw_vport_destroy_legacy_acl_tables(esw, vport); ++ else ++ esw_vport_destroy_offloads_acl_tables(esw, vport); + } + + static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index d29df0c302f2..0927019062d2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -631,6 +631,13 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events); + void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); + ++int ++esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport); ++void ++esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport); ++ + #else /* CONFIG_MLX5_ESWITCH */ + /* eswitch API stubs */ + static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 756031dcf056..2485c2a7ad9d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1964,7 +1964,7 @@ static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw) + esw_check_vport_match_metadata_supported(esw); + } + +-static int ++int + esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +@@ -1982,7 +1982,7 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, + return err; + } + +-static void ++void + esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +@@ -1990,43 +1990,27 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + esw_vport_disable_ingress_acl(esw, vport); + } + +-static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw) ++static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) + { + struct mlx5_vport *vport; +- int i, j; + int err; + + if (esw_use_vport_metadata(esw)) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; + +- mlx5_esw_for_all_vports(esw, i, vport) { +- err = esw_vport_create_offloads_acl_tables(esw, vport); +- if (err) +- goto err_acl_table; +- } +- +- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) +- esw_info(esw->dev, "Use metadata reg_c as source vport to match\n"); +- +- return 0; +- +-err_acl_table: +- for (j = MLX5_VPORT_PF; j < i; j++) { +- vport = &esw->vports[j]; +- esw_vport_destroy_offloads_acl_tables(esw, vport); +- } +- ++ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); ++ err = esw_vport_create_offloads_acl_tables(esw, vport); ++ if (err) ++ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; + return err; + } + +-static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) ++static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) + { + struct mlx5_vport *vport; +- int i; +- +- mlx5_esw_for_all_vports(esw, i, vport) +- esw_vport_destroy_offloads_acl_tables(esw, vport); + ++ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); ++ esw_vport_destroy_offloads_acl_tables(esw, vport); + esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA; + } + +@@ -2044,7 +2028,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); + mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); + +- err = esw_create_offloads_acl_tables(esw); ++ err = esw_create_uplink_offloads_acl_tables(esw); + if (err) + return err; + +@@ -2069,7 +2053,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + esw_destroy_offloads_fdb_tables(esw); + + create_fdb_err: +- esw_destroy_offloads_acl_tables(esw); ++ esw_destroy_uplink_offloads_acl_tables(esw); + + return err; + } +@@ -2079,7 +2063,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) + esw_destroy_vport_rx_group(esw); + esw_destroy_offloads_table(esw); + esw_destroy_offloads_fdb_tables(esw); +- esw_destroy_offloads_acl_tables(esw); ++ esw_destroy_uplink_offloads_acl_tables(esw); + } + + static void +-- +2.13.6 + diff --git a/SOURCES/0084-netdrv-net-mlx5-Restrict-metadata-disablement-to-off.patch b/SOURCES/0084-netdrv-net-mlx5-Restrict-metadata-disablement-to-off.patch new file mode 100644 index 0000000..597b517 --- /dev/null +++ b/SOURCES/0084-netdrv-net-mlx5-Restrict-metadata-disablement-to-off.patch @@ -0,0 +1,104 @@ +From 807c9a6c1824b43987f92a40a7ef47bd582a38e6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:01 -0400 +Subject: [PATCH 084/312] [netdrv] net/mlx5: Restrict metadata disablement to + offloads mode + +Message-id: <20200510150452.10307-37-ahleihel@redhat.com> +Patchwork-id: 306660 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 36/87] net/mlx5: Restrict metadata disablement to offloads mode +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit a962d7a61e2404cda6a89bfa5cc193c62223bb5e +Author: Parav Pandit +Date: Mon Oct 28 23:35:24 2019 +0000 + + net/mlx5: Restrict metadata disablement to offloads mode + + Now that there is clear separation for acl setup/cleanup between legacy + and offloads mode, limit metdata disablement to offloads mode. + + Signed-off-by: Parav Pandit + Reviewed-by: Vu Pham + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 -- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 -- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 9 ++++++--- + 3 files changed, 6 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 91b5ec6c3e13..97af7d793435 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1209,8 +1209,6 @@ void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + mlx5_del_flow_rules(vport->ingress.allow_rule); + vport->ingress.allow_rule = NULL; + } +- +- esw_vport_del_ingress_acl_modify_metadata(esw, vport); + } + + void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 0927019062d2..777224ed18bc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -282,8 +282,6 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); + void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport); + int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + u32 rate_mbps); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 2485c2a7ad9d..767993b10110 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1849,8 +1849,8 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + return err; + } + +-void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport) ++static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) + { + if (vport->ingress.offloads.modify_metadata_rule) { + mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule); +@@ -1976,8 +1976,10 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, + + if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { + err = esw_vport_egress_config(esw, vport); +- if (err) ++ if (err) { ++ esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_disable_ingress_acl(esw, vport); ++ } + } + return err; + } +@@ -1987,6 +1989,7 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { + esw_vport_disable_egress_acl(esw, vport); ++ esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_disable_ingress_acl(esw, vport); + } + +-- +2.13.6 + diff --git a/SOURCES/0085-netdrv-net-mlx5-Refactor-ingress-acl-configuration.patch b/SOURCES/0085-netdrv-net-mlx5-Refactor-ingress-acl-configuration.patch new file mode 100644 index 0000000..49b4360 --- /dev/null +++ b/SOURCES/0085-netdrv-net-mlx5-Refactor-ingress-acl-configuration.patch @@ -0,0 +1,588 @@ +From ccb016735ab552893c77a5deeeef4d795c18448e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:02 -0400 +Subject: [PATCH 085/312] [netdrv] net/mlx5: Refactor ingress acl configuration + +Message-id: <20200510150452.10307-38-ahleihel@redhat.com> +Patchwork-id: 306661 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 37/87] net/mlx5: Refactor ingress acl configuration +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 10652f39943ec19d32a6fa44a8523b0d40abcbcf +Author: Parav Pandit +Date: Mon Oct 28 23:35:26 2019 +0000 + + net/mlx5: Refactor ingress acl configuration + + Drop, untagged, spoof check and untagged spoof check flow groups are + limited to legacy mode only. + + Therefore, following refactoring is done to + (a) improve code readability + (b) have better code split between legacy and offloads mode + + 1. Move legacy flow groups under legacy structure + 2. Add validity check for group deletion + 3. Restrict scope of esw_vport_disable_ingress_acl to legacy mode + 4. Rename esw_vport_enable_ingress_acl() to + esw_vport_create_ingress_acl_table() and limit its scope to + table creation + 5. Introduce legacy flow groups creation helper + esw_legacy_create_ingress_acl_groups() and keep its scope to legacy mode + 6. Reduce offloads ingress groups from 4 to just 1 metadata group + per vport + 7. Removed redundant IS_ERR_OR_NULL as entries are marked NULL on free. + 8. Shortern error message to remove redundant 'E-switch' + + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 228 ++++++++++++--------- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 17 +- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 67 +++++- + 3 files changed, 201 insertions(+), 111 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 97af7d793435..1937198405e1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1065,57 +1065,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, + vport->egress.acl = NULL; + } + +-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport) ++static int ++esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) + { + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; +- struct mlx5_flow_namespace *root_ns; +- struct mlx5_flow_table *acl; + struct mlx5_flow_group *g; + void *match_criteria; + u32 *flow_group_in; +- /* The ingress acl table contains 4 groups +- * (2 active rules at the same time - +- * 1 allow rule from one of the first 3 groups. +- * 1 drop rule from the last group): +- * 1)Allow untagged traffic with smac=original mac. +- * 2)Allow untagged traffic. +- * 3)Allow traffic with smac=original mac. +- * 4)Drop all other traffic. +- */ +- int table_size = 4; +- int err = 0; +- +- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) +- return -EOPNOTSUPP; +- +- if (!IS_ERR_OR_NULL(vport->ingress.acl)) +- return 0; +- +- esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", +- vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); +- +- root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, +- mlx5_eswitch_vport_num_to_index(esw, vport->vport)); +- if (!root_ns) { +- esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport); +- return -EOPNOTSUPP; +- } ++ int err; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + +- acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); +- if (IS_ERR(acl)) { +- err = PTR_ERR(acl); +- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", +- vport->vport, err); +- goto out; +- } +- vport->ingress.acl = acl; +- + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); +@@ -1125,14 +1089,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + +- g = mlx5_create_flow_group(acl, flow_group_in); ++ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); +- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", ++ esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n", + vport->vport, err); +- goto out; ++ goto spoof_err; + } +- vport->ingress.allow_untagged_spoofchk_grp = g; ++ vport->ingress.legacy.allow_untagged_spoofchk_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); +@@ -1140,14 +1104,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + +- g = mlx5_create_flow_group(acl, flow_group_in); ++ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); +- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", ++ esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n", + vport->vport, err); +- goto out; ++ goto untagged_err; + } +- vport->ingress.allow_untagged_only_grp = g; ++ vport->ingress.legacy.allow_untagged_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); +@@ -1156,80 +1120,134 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); + +- g = mlx5_create_flow_group(acl, flow_group_in); ++ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); +- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", ++ esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n", + vport->vport, err); +- goto out; ++ goto allow_spoof_err; + } +- vport->ingress.allow_spoofchk_only_grp = g; ++ vport->ingress.legacy.allow_spoofchk_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); + +- g = mlx5_create_flow_group(acl, flow_group_in); ++ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); +- esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", ++ esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n", + vport->vport, err); +- goto out; ++ goto drop_err; + } +- vport->ingress.drop_grp = g; ++ vport->ingress.legacy.drop_grp = g; ++ kvfree(flow_group_in); ++ return 0; + +-out: +- if (err) { +- if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp)) +- mlx5_destroy_flow_group( +- vport->ingress.allow_spoofchk_only_grp); +- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp)) +- mlx5_destroy_flow_group( +- vport->ingress.allow_untagged_only_grp); +- if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp)) +- mlx5_destroy_flow_group( +- vport->ingress.allow_untagged_spoofchk_grp); +- if (!IS_ERR_OR_NULL(vport->ingress.acl)) +- mlx5_destroy_flow_table(vport->ingress.acl); ++drop_err: ++ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) { ++ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); ++ vport->ingress.legacy.allow_spoofchk_only_grp = NULL; + } +- ++allow_spoof_err: ++ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) { ++ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); ++ vport->ingress.legacy.allow_untagged_only_grp = NULL; ++ } ++untagged_err: ++ if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) { ++ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); ++ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; ++ } ++spoof_err: + kvfree(flow_group_in); + return err; + } + ++int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport, int table_size) ++{ ++ struct mlx5_core_dev *dev = esw->dev; ++ struct mlx5_flow_namespace *root_ns; ++ struct mlx5_flow_table *acl; ++ int vport_index; ++ int err; ++ ++ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) ++ return -EOPNOTSUPP; ++ ++ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", ++ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); ++ ++ vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport); ++ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, ++ vport_index); ++ if (!root_ns) { ++ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", ++ vport->vport); ++ return -EOPNOTSUPP; ++ } ++ ++ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); ++ if (IS_ERR(acl)) { ++ err = PTR_ERR(acl); ++ esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n", ++ vport->vport, err); ++ return err; ++ } ++ vport->ingress.acl = acl; ++ return 0; ++} ++ ++void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport) ++{ ++ if (!vport->ingress.acl) ++ return; ++ ++ mlx5_destroy_flow_table(vport->ingress.acl); ++ vport->ingress.acl = NULL; ++} ++ + void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +- if (!IS_ERR_OR_NULL(vport->ingress.legacy.drop_rule)) { ++ if (vport->ingress.legacy.drop_rule) { + mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); + vport->ingress.legacy.drop_rule = NULL; + } + +- if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) { ++ if (vport->ingress.allow_rule) { + mlx5_del_flow_rules(vport->ingress.allow_rule); + vport->ingress.allow_rule = NULL; + } + } + +-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport) ++static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) + { +- if (IS_ERR_OR_NULL(vport->ingress.acl)) ++ if (!vport->ingress.acl) + return; + + esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); + + esw_vport_cleanup_ingress_rules(esw, vport); +- mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp); +- mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp); +- mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp); +- mlx5_destroy_flow_group(vport->ingress.drop_grp); +- mlx5_destroy_flow_table(vport->ingress.acl); +- vport->ingress.acl = NULL; +- vport->ingress.drop_grp = NULL; +- vport->ingress.allow_spoofchk_only_grp = NULL; +- vport->ingress.allow_untagged_only_grp = NULL; +- vport->ingress.allow_untagged_spoofchk_grp = NULL; ++ if (vport->ingress.legacy.allow_spoofchk_only_grp) { ++ mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp); ++ vport->ingress.legacy.allow_spoofchk_only_grp = NULL; ++ } ++ if (vport->ingress.legacy.allow_untagged_only_grp) { ++ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp); ++ vport->ingress.legacy.allow_untagged_only_grp = NULL; ++ } ++ if (vport->ingress.legacy.allow_untagged_spoofchk_grp) { ++ mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp); ++ vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL; ++ } ++ if (vport->ingress.legacy.drop_grp) { ++ mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp); ++ vport->ingress.legacy.drop_grp = NULL; ++ } ++ esw_vport_destroy_ingress_acl_table(vport); + } + + static int esw_vport_ingress_config(struct mlx5_eswitch *esw, +@@ -1244,19 +1262,36 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + int err = 0; + u8 *smac_v; + ++ /* The ingress acl table contains 4 groups ++ * (2 active rules at the same time - ++ * 1 allow rule from one of the first 3 groups. ++ * 1 drop rule from the last group): ++ * 1)Allow untagged traffic with smac=original mac. ++ * 2)Allow untagged traffic. ++ * 3)Allow traffic with smac=original mac. ++ * 4)Drop all other traffic. ++ */ ++ int table_size = 4; ++ + esw_vport_cleanup_ingress_rules(esw, vport); + + if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { +- esw_vport_disable_ingress_acl(esw, vport); ++ esw_vport_disable_legacy_ingress_acl(esw, vport); + return 0; + } + +- err = esw_vport_enable_ingress_acl(esw, vport); +- if (err) { +- mlx5_core_warn(esw->dev, +- "failed to enable ingress acl (%d) on vport[%d]\n", +- err, vport->vport); +- return err; ++ if (!vport->ingress.acl) { ++ err = esw_vport_create_ingress_acl_table(esw, vport, table_size); ++ if (err) { ++ esw_warn(esw->dev, ++ "vport[%d] enable ingress acl err (%d)\n", ++ err, vport->vport); ++ return err; ++ } ++ ++ err = esw_vport_create_legacy_ingress_acl_groups(esw, vport); ++ if (err) ++ goto out; + } + + esw_debug(esw->dev, +@@ -1317,10 +1352,11 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + vport->ingress.legacy.drop_rule = NULL; + goto out; + } ++ kvfree(spec); ++ return 0; + + out: +- if (err) +- esw_vport_cleanup_ingress_rules(esw, vport); ++ esw_vport_disable_legacy_ingress_acl(esw, vport); + kvfree(spec); + return err; + } +@@ -1700,7 +1736,7 @@ static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw, + return 0; + + egress_err: +- esw_vport_disable_ingress_acl(esw, vport); ++ esw_vport_disable_legacy_ingress_acl(esw, vport); + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); + vport->egress.legacy.drop_counter = NULL; + +@@ -1730,7 +1766,7 @@ static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw, + mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); + vport->egress.legacy.drop_counter = NULL; + +- esw_vport_disable_ingress_acl(esw, vport); ++ esw_vport_disable_legacy_ingress_acl(esw, vport); + mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter); + vport->ingress.legacy.drop_counter = NULL; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 777224ed18bc..963d0df0d66b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -65,25 +65,30 @@ + + struct vport_ingress { + struct mlx5_flow_table *acl; ++#ifdef __GENKSYMS__ + struct mlx5_flow_group *allow_untagged_spoofchk_grp; + struct mlx5_flow_group *allow_spoofchk_only_grp; + struct mlx5_flow_group *allow_untagged_only_grp; + struct mlx5_flow_group *drop_grp; +-#ifdef __GENKSYMS__ + struct mlx5_modify_hdr *modify_metadata; + struct mlx5_flow_handle *modify_metadata_rule; + #endif +- struct mlx5_flow_handle *allow_rule; ++ struct mlx5_flow_handle *allow_rule; + #ifdef __GENKSYMS__ + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; + #endif + #ifndef __GENKSYMS__ + struct { ++ struct mlx5_flow_group *allow_spoofchk_only_grp; ++ struct mlx5_flow_group *allow_untagged_spoofchk_grp; ++ struct mlx5_flow_group *allow_untagged_only_grp; ++ struct mlx5_flow_group *drop_grp; + struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; + } legacy; + struct { ++ struct mlx5_flow_group *metadata_grp; + struct mlx5_modify_hdr *modify_metadata; + struct mlx5_flow_handle *modify_metadata_rule; + } offloads; +@@ -272,16 +277,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); + int esw_offloads_init_reps(struct mlx5_eswitch *esw); + void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +-int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport); ++int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport, ++ int table_size); ++void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport); + void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); + int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); + void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); +-void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, +- struct mlx5_vport *vport); + int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + u32 rate_mbps); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 767993b10110..7fe085fa3d29 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1860,6 +1860,44 @@ static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + } + } + ++static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, ++ struct mlx5_vport *vport) ++{ ++ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); ++ struct mlx5_flow_group *g; ++ u32 *flow_group_in; ++ int ret = 0; ++ ++ flow_group_in = kvzalloc(inlen, GFP_KERNEL); ++ if (!flow_group_in) ++ return -ENOMEM; ++ ++ memset(flow_group_in, 0, inlen); ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); ++ ++ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); ++ if (IS_ERR(g)) { ++ ret = PTR_ERR(g); ++ esw_warn(esw->dev, ++ "Failed to create vport[%d] ingress metdata group, err(%d)\n", ++ vport->vport, ret); ++ goto grp_err; ++ } ++ vport->ingress.offloads.metadata_grp = g; ++grp_err: ++ kvfree(flow_group_in); ++ return ret; ++} ++ ++static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) ++{ ++ if (vport->ingress.offloads.metadata_grp) { ++ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); ++ vport->ingress.offloads.metadata_grp = NULL; ++ } ++} ++ + static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { +@@ -1870,8 +1908,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + return 0; + + esw_vport_cleanup_ingress_rules(esw, vport); +- +- err = esw_vport_enable_ingress_acl(esw, vport); ++ err = esw_vport_create_ingress_acl_table(esw, vport, 1); + if (err) { + esw_warn(esw->dev, + "failed to enable ingress acl (%d) on vport[%d]\n", +@@ -1879,25 +1916,34 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + return err; + } + ++ err = esw_vport_create_ingress_acl_group(esw, vport); ++ if (err) ++ goto group_err; ++ + esw_debug(esw->dev, + "vport[%d] configure ingress rules\n", vport->vport); + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + err = esw_vport_add_ingress_acl_modify_metadata(esw, vport); + if (err) +- goto out; ++ goto metadata_err; + } + + if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && + mlx5_eswitch_is_vf_vport(esw, vport->vport)) { + err = esw_vport_ingress_prio_tag_config(esw, vport); + if (err) +- goto out; ++ goto prio_tag_err; + } ++ return 0; + +-out: +- if (err) +- esw_vport_disable_ingress_acl(esw, vport); ++prio_tag_err: ++ esw_vport_del_ingress_acl_modify_metadata(esw, vport); ++metadata_err: ++ esw_vport_cleanup_ingress_rules(esw, vport); ++ esw_vport_destroy_ingress_acl_group(vport); ++group_err: ++ esw_vport_destroy_ingress_acl_table(vport); + return err; + } + +@@ -1978,7 +2024,8 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, + err = esw_vport_egress_config(esw, vport); + if (err) { + esw_vport_del_ingress_acl_modify_metadata(esw, vport); +- esw_vport_disable_ingress_acl(esw, vport); ++ esw_vport_cleanup_ingress_rules(esw, vport); ++ esw_vport_destroy_ingress_acl_table(vport); + } + } + return err; +@@ -1990,7 +2037,9 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + { + esw_vport_disable_egress_acl(esw, vport); + esw_vport_del_ingress_acl_modify_metadata(esw, vport); +- esw_vport_disable_ingress_acl(esw, vport); ++ esw_vport_cleanup_ingress_rules(esw, vport); ++ esw_vport_destroy_ingress_acl_group(vport); ++ esw_vport_destroy_ingress_acl_table(vport); + } + + static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) +-- +2.13.6 + diff --git a/SOURCES/0086-netdrv-net-mlx5-FPGA-support-network-cards-with-stan.patch b/SOURCES/0086-netdrv-net-mlx5-FPGA-support-network-cards-with-stan.patch new file mode 100644 index 0000000..74d2e6e --- /dev/null +++ b/SOURCES/0086-netdrv-net-mlx5-FPGA-support-network-cards-with-stan.patch @@ -0,0 +1,178 @@ +From a3c4a2bce469b8cc656cf14145d310cd3531ae2e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:04 -0400 +Subject: [PATCH 086/312] [netdrv] net/mlx5: FPGA, support network cards with + standalone FPGA + +Message-id: <20200510150452.10307-40-ahleihel@redhat.com> +Patchwork-id: 306663 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 39/87] net/mlx5: FPGA, support network cards with standalone FPGA +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit cc4db579e69b4c92a51fdc9f44bc671b40427824 +Author: Igor Leshenko +Date: Thu Sep 5 18:56:28 2019 +0300 + + net/mlx5: FPGA, support network cards with standalone FPGA + + Not all mlx5 cards with FPGA device use it for network processing. + + mlx5_core driver configures network connection to FPGA device + for all mlx5 cards with installed FPGA. If FPGA is not a part of + network path, driver crashes in this case + + Check FPGA name in function mlx5_fpga_device_start() and continue + integrate FPGA into packets flow only for dedicated cards. + Currently there are Newton and Edison cards. + + Signed-off-by: Igor Leshenko + Reviewed-by: Meir Lichtinger + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h | 10 ++-- + .../net/ethernet/mellanox/mlx5/core/fpga/core.c | 61 +++++++++++++++------- + 2 files changed, 46 insertions(+), 25 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +index eb8b0fe0b4e1..11621d265d7e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +@@ -35,11 +35,11 @@ + + #include + +-enum mlx5_fpga_device_id { +- MLX5_FPGA_DEVICE_UNKNOWN = 0, +- MLX5_FPGA_DEVICE_KU040 = 1, +- MLX5_FPGA_DEVICE_KU060 = 2, +- MLX5_FPGA_DEVICE_KU060_2 = 3, ++enum mlx5_fpga_id { ++ MLX5_FPGA_NEWTON = 0, ++ MLX5_FPGA_EDISON = 1, ++ MLX5_FPGA_MORSE = 2, ++ MLX5_FPGA_MORSEQ = 3, + }; + + enum mlx5_fpga_image { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +index d046d1ec2a86..2ce4241459ce 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +@@ -81,19 +81,28 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image) + } + } + +-static const char *mlx5_fpga_device_name(u32 device) ++static const char *mlx5_fpga_name(u32 fpga_id) + { +- switch (device) { +- case MLX5_FPGA_DEVICE_KU040: +- return "ku040"; +- case MLX5_FPGA_DEVICE_KU060: +- return "ku060"; +- case MLX5_FPGA_DEVICE_KU060_2: +- return "ku060_2"; +- case MLX5_FPGA_DEVICE_UNKNOWN: +- default: +- return "unknown"; ++ static char ret[32]; ++ ++ switch (fpga_id) { ++ case MLX5_FPGA_NEWTON: ++ return "Newton"; ++ case MLX5_FPGA_EDISON: ++ return "Edison"; ++ case MLX5_FPGA_MORSE: ++ return "Morse"; ++ case MLX5_FPGA_MORSEQ: ++ return "MorseQ"; + } ++ ++ snprintf(ret, sizeof(ret), "Unknown %d", fpga_id); ++ return ret; ++} ++ ++static int mlx5_is_fpga_lookaside(u32 fpga_id) ++{ ++ return fpga_id != MLX5_FPGA_NEWTON && fpga_id != MLX5_FPGA_EDISON; + } + + static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) +@@ -110,8 +119,12 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) + fdev->last_admin_image = query.admin_image; + fdev->last_oper_image = query.oper_image; + +- mlx5_fpga_dbg(fdev, "Status %u; Admin image %u; Oper image %u\n", +- query.status, query.admin_image, query.oper_image); ++ mlx5_fpga_info(fdev, "Status %u; Admin image %u; Oper image %u\n", ++ query.status, query.admin_image, query.oper_image); ++ ++ /* for FPGA lookaside projects FPGA load status is not important */ ++ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id))) ++ return 0; + + if (query.status != MLX5_FPGA_STATUS_SUCCESS) { + mlx5_fpga_err(fdev, "%s image failed to load; status %u\n", +@@ -167,25 +180,30 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) + struct mlx5_fpga_device *fdev = mdev->fpga; + unsigned int max_num_qps; + unsigned long flags; +- u32 fpga_device_id; ++ u32 fpga_id; + int err; + + if (!fdev) + return 0; + +- err = mlx5_fpga_device_load_check(fdev); ++ err = mlx5_fpga_caps(fdev->mdev); + if (err) + goto out; + +- err = mlx5_fpga_caps(fdev->mdev); ++ err = mlx5_fpga_device_load_check(fdev); + if (err) + goto out; + +- fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device); +- mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n", +- mlx5_fpga_device_name(fpga_device_id), +- fpga_device_id, ++ fpga_id = MLX5_CAP_FPGA(fdev->mdev, fpga_id); ++ mlx5_fpga_info(fdev, "FPGA card %s:%u\n", mlx5_fpga_name(fpga_id), fpga_id); ++ ++ /* No QPs if FPGA does not participate in net processing */ ++ if (mlx5_is_fpga_lookaside(fpga_id)) ++ goto out; ++ ++ mlx5_fpga_info(fdev, "%s(%d): image, version %u; SBU %06x:%04x version %d\n", + mlx5_fpga_image_name(fdev->last_oper_image), ++ fdev->last_oper_image, + MLX5_CAP_FPGA(fdev->mdev, image_version), + MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id), + MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id), +@@ -264,6 +282,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev) + if (!fdev) + return; + ++ if (mlx5_is_fpga_lookaside(MLX5_CAP_FPGA(fdev->mdev, fpga_id))) ++ return; ++ + spin_lock_irqsave(&fdev->state_lock, flags); + if (fdev->state != MLX5_FPGA_STATUS_SUCCESS) { + spin_unlock_irqrestore(&fdev->state_lock, flags); +-- +2.13.6 + diff --git a/SOURCES/0087-netdrv-net-mlx5-Remove-unneeded-variable-in-mlx5_unl.patch b/SOURCES/0087-netdrv-net-mlx5-Remove-unneeded-variable-in-mlx5_unl.patch new file mode 100644 index 0000000..cdbd89e --- /dev/null +++ b/SOURCES/0087-netdrv-net-mlx5-Remove-unneeded-variable-in-mlx5_unl.patch @@ -0,0 +1,63 @@ +From 97090ed92050b2a62a9c572b895dba75ce9e7fa2 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:05 -0400 +Subject: [PATCH 087/312] [netdrv] net/mlx5: Remove unneeded variable in + mlx5_unload_one + +Message-id: <20200510150452.10307-41-ahleihel@redhat.com> +Patchwork-id: 306665 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 40/87] net/mlx5: Remove unneeded variable in mlx5_unload_one +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 32680da7103439095ba8c2dbe30c3e4d0e05e4c2 +Author: zhong jiang +Date: Fri Sep 13 00:59:02 2019 +0800 + + net/mlx5: Remove unneeded variable in mlx5_unload_one + + mlx5_unload_one do not need local variable to store different value, + Hence just remove it. + + Signed-off-by: zhong jiang + Acked-by: Saeed Mahameed + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 490bd80c586a..57e376e4e938 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1252,8 +1252,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + + static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) + { +- int err = 0; +- + if (cleanup) { + mlx5_unregister_device(dev); + mlx5_drain_health_wq(dev); +@@ -1281,7 +1279,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) + mlx5_function_teardown(dev, cleanup); + out: + mutex_unlock(&dev->intf_state_mutex); +- return err; ++ return 0; + } + + static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) +-- +2.13.6 + diff --git a/SOURCES/0088-netdrv-net-mlx5e-Verify-that-rule-has-at-least-one-f.patch b/SOURCES/0088-netdrv-net-mlx5e-Verify-that-rule-has-at-least-one-f.patch new file mode 100644 index 0000000..739d01c --- /dev/null +++ b/SOURCES/0088-netdrv-net-mlx5e-Verify-that-rule-has-at-least-one-f.patch @@ -0,0 +1,65 @@ +From 0a412d2add9b9647bd09dd2eb19f0eb5d470ebdf Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:06 -0400 +Subject: [PATCH 088/312] [netdrv] net/mlx5e: Verify that rule has at least one + fwd/drop action + +Message-id: <20200510150452.10307-42-ahleihel@redhat.com> +Patchwork-id: 306664 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 41/87] net/mlx5e: Verify that rule has at least one fwd/drop action +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit ae2741e2b6ce2bf1b656b1152c4ef147ff35b096 +Author: Vlad Buslov +Date: Wed Sep 11 21:14:54 2019 +0300 + + net/mlx5e: Verify that rule has at least one fwd/drop action + + Currently, mlx5 tc layer doesn't verify that rule has at least one forward + or drop action which leads to following firmware syndrome when user tries + to offload such action: + + [ 1824.860501] mlx5_core 0000:81:00.0: mlx5_cmd_check:753:(pid 29458): SET_FLOW_TABLE_ENTRY(0x936) op_mod(0x0) failed, status bad parameter(0x3), syndrome (0x144b7a) + + Add check at the end of parse_tc_fdb_actions() that verifies that resulting + attribute has action fwd or drop flag set. + + Signed-off-by: Vlad Buslov + Reviewed-by: Paul Blakey + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index ece33ff718a4..b13e7996ad83 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3446,6 +3446,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + } + ++ if (!(attr->action & ++ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { ++ NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action"); ++ return -EOPNOTSUPP; ++ } ++ + if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + NL_SET_ERR_MSG_MOD(extack, + "current firmware doesn't support split rule for port mirroring"); +-- +2.13.6 + diff --git a/SOURCES/0089-netdrv-net-mlx5-Do-not-hold-group-lock-while-allocat.patch b/SOURCES/0089-netdrv-net-mlx5-Do-not-hold-group-lock-while-allocat.patch new file mode 100644 index 0000000..cd03830 --- /dev/null +++ b/SOURCES/0089-netdrv-net-mlx5-Do-not-hold-group-lock-while-allocat.patch @@ -0,0 +1,90 @@ +From 3ee7fedb0cc980a0923043e8dd7b87ec83998925 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:07 -0400 +Subject: [PATCH 089/312] [netdrv] net/mlx5: Do not hold group lock while + allocating FTE in software + +Message-id: <20200510150452.10307-43-ahleihel@redhat.com> +Patchwork-id: 306666 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 42/87] net/mlx5: Do not hold group lock while allocating FTE in software +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 84c7af637512be9c3254189bd5910dae0d2a8602 +Author: Parav Pandit +Date: Thu Sep 19 17:22:19 2019 -0500 + + net/mlx5: Do not hold group lock while allocating FTE in software + + FTE memory allocation using alloc_fte() doesn't have any dependency + on the flow group. + Hence, do not hold flow group lock while performing alloc_fte(). + This helps to reduce contention of flow group lock. + + Signed-off-by: Parav Pandit + Reviewed-by: Daniel Jurgens + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 19 ++++++++++--------- + 1 file changed, 10 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 495396f42153..e8064bd87aad 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1817,6 +1817,13 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, + return rule; + } + ++ fte = alloc_fte(ft, spec, flow_act); ++ if (IS_ERR(fte)) { ++ up_write_ref_node(&ft->node, false); ++ err = PTR_ERR(fte); ++ goto err_alloc_fte; ++ } ++ + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); + up_write_ref_node(&ft->node, false); + +@@ -1824,17 +1831,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, + if (err) + goto err_release_fg; + +- fte = alloc_fte(ft, spec, flow_act); +- if (IS_ERR(fte)) { +- err = PTR_ERR(fte); +- goto err_release_fg; +- } +- + err = insert_fte(g, fte); +- if (err) { +- kmem_cache_free(steering->ftes_cache, fte); ++ if (err) + goto err_release_fg; +- } + + nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); + up_write_ref_node(&g->node, false); +@@ -1846,6 +1845,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, + + err_release_fg: + up_write_ref_node(&g->node, false); ++ kmem_cache_free(steering->ftes_cache, fte); ++err_alloc_fte: + tree_put_node(&g->node, false); + return ERR_PTR(err); + } +-- +2.13.6 + diff --git a/SOURCES/0090-netdrv-net-mlx5-Support-lockless-FTE-read-lookups.patch b/SOURCES/0090-netdrv-net-mlx5-Support-lockless-FTE-read-lookups.patch new file mode 100644 index 0000000..e99d2e3 --- /dev/null +++ b/SOURCES/0090-netdrv-net-mlx5-Support-lockless-FTE-read-lookups.patch @@ -0,0 +1,202 @@ +From 82116044164f1f78e4eec9f31231adc6976b928d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:08 -0400 +Subject: [PATCH 090/312] [netdrv] net/mlx5: Support lockless FTE read lookups + +Message-id: <20200510150452.10307-44-ahleihel@redhat.com> +Patchwork-id: 306667 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 43/87] net/mlx5: Support lockless FTE read lookups +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 7dee607ed0e04500459db53001d8e02f8831f084 +Author: Parav Pandit +Date: Wed Sep 18 18:50:32 2019 -0500 + + net/mlx5: Support lockless FTE read lookups + + During connection tracking offloads with high number of connections, + (40K connections per second), flow table group lock contention is + observed. + To improve the performance by reducing lock contention, lockless + FTE read lookup is performed as described below. + + Each flow table entry is refcounted. + Flow table entry is removed when refcount drops to zero. + rhash table allows rcu protected lookup. + Each hash table entry insertion and removal is write lock protected. + + Hence, it is possible to perform lockless lookup in rhash table using + following scheme. + + (a) Guard FTE entry lookup per group using rcu read lock. + (b) Before freeing the FTE entry, wait for all readers to finish + accessing the FTE. + + Below example of one reader and write in parallel racing, shows + protection in effect with rcu lock. + + lookup_fte_locked() + rcu_read_lock(); + search_hash_table() + existing_flow_group_write_lock(); + tree_put_node(fte) + drop_ref_cnt(fte) + del_sw_fte(fte) + del_hash_table_entry(); + call_rcu(); + existing_flow_group_write_unlock(); + get_ref_cnt(fte) fails + rcu_read_unlock(); + rcu grace period(); + [..] + kmem_cache_free(fte); + + Signed-off-by: Parav Pandit + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 70 ++++++++++++++++++----- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 1 + + 2 files changed, 56 insertions(+), 15 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index e8064bd87aad..6e1ef05becce 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -531,9 +531,16 @@ static void del_hw_fte(struct fs_node *node) + } + } + ++static void del_sw_fte_rcu(struct rcu_head *head) ++{ ++ struct fs_fte *fte = container_of(head, struct fs_fte, rcu); ++ struct mlx5_flow_steering *steering = get_steering(&fte->node); ++ ++ kmem_cache_free(steering->ftes_cache, fte); ++} ++ + static void del_sw_fte(struct fs_node *node) + { +- struct mlx5_flow_steering *steering = get_steering(node); + struct mlx5_flow_group *fg; + struct fs_fte *fte; + int err; +@@ -546,7 +553,8 @@ static void del_sw_fte(struct fs_node *node) + rhash_fte); + WARN_ON(err); + ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); +- kmem_cache_free(steering->ftes_cache, fte); ++ ++ call_rcu(&fte->rcu, del_sw_fte_rcu); + } + + static void del_hw_flow_group(struct fs_node *node) +@@ -1626,22 +1634,47 @@ static u64 matched_fgs_get_version(struct list_head *match_head) + } + + static struct fs_fte * +-lookup_fte_locked(struct mlx5_flow_group *g, +- const u32 *match_value, +- bool take_write) ++lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value) + { + struct fs_fte *fte_tmp; + +- if (take_write) +- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); +- else +- nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); +- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, +- rhash_fte); ++ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); ++ ++ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte); + if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { + fte_tmp = NULL; + goto out; + } ++ ++ if (!fte_tmp->node.active) { ++ tree_put_node(&fte_tmp->node, false); ++ fte_tmp = NULL; ++ goto out; ++ } ++ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); ++ ++out: ++ up_write_ref_node(&g->node, false); ++ return fte_tmp; ++} ++ ++static struct fs_fte * ++lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) ++{ ++ struct fs_fte *fte_tmp; ++ ++ if (!tree_get_node(&g->node)) ++ return NULL; ++ ++ rcu_read_lock(); ++ fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte); ++ if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { ++ rcu_read_unlock(); ++ fte_tmp = NULL; ++ goto out; ++ } ++ rcu_read_unlock(); ++ + if (!fte_tmp->node.active) { + tree_put_node(&fte_tmp->node, false); + fte_tmp = NULL; +@@ -1649,14 +1682,21 @@ lookup_fte_locked(struct mlx5_flow_group *g, + } + + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); ++ + out: +- if (take_write) +- up_write_ref_node(&g->node, false); +- else +- up_read_ref_node(&g->node); ++ tree_put_node(&g->node, false); + return fte_tmp; + } + ++static struct fs_fte * ++lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write) ++{ ++ if (write) ++ return lookup_fte_for_write_locked(g, match_value); ++ else ++ return lookup_fte_for_read_locked(g, match_value); ++} ++ + static struct mlx5_flow_handle * + try_add_to_existing_fg(struct mlx5_flow_table *ft, + struct list_head *match_head, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +index c6221ccbdddf..8e4ca13f4d74 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +@@ -205,6 +205,7 @@ struct fs_fte { + enum fs_fte_status status; + struct mlx5_fc *counter; + struct rhash_head hash; ++ struct rcu_head rcu; + int modify_mask; + }; + +-- +2.13.6 + diff --git a/SOURCES/0091-netdrv-net-mlx5e-TX-Dump-WQs-wqe-descriptors-on-CQE-.patch b/SOURCES/0091-netdrv-net-mlx5e-TX-Dump-WQs-wqe-descriptors-on-CQE-.patch new file mode 100644 index 0000000..abfded9 --- /dev/null +++ b/SOURCES/0091-netdrv-net-mlx5e-TX-Dump-WQs-wqe-descriptors-on-CQE-.patch @@ -0,0 +1,115 @@ +From cb1711c38d4d4209ecb17851818a4c7e2a3176c3 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:09 -0400 +Subject: [PATCH 091/312] [netdrv] net/mlx5e: TX, Dump WQs wqe descriptors on + CQE with error events + +Message-id: <20200510150452.10307-45-ahleihel@redhat.com> +Patchwork-id: 306668 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 44/87] net/mlx5e: TX, Dump WQs wqe descriptors on CQE with error events +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 130c7b46c93d313ca07d85a30d90021e424c7e9b +Author: Saeed Mahameed +Date: Tue May 7 08:56:38 2019 -0700 + + net/mlx5e: TX, Dump WQs wqe descriptors on CQE with error events + + Dump the Work Queue's TX WQE descriptor when a completion with + error is received. + + Example: + [5.331832] mlx5_core 0000:00:04.0 enp0s4: Error cqe on cqn 0xa, ci 0x1, TXQ-SQ qpn 0xe, opcode 0xd, syndrome 0x2, vendor syndrome 0x0 + [5.333127] 00000000: 55 65 02 75 31 fe c2 d2 6b 6c 62 1e f9 e1 d8 5c + [5.333837] 00000010: d3 b2 6c b8 89 e4 84 20 0b f4 3c e0 f3 75 41 ca + [5.334568] 00000020: 46 00 00 00 cd 70 a0 92 18 3a 01 de 00 00 00 00 + [5.335313] 00000030: 7d bc 05 89 b2 e9 00 02 1e 00 00 0e 00 00 30 d2 + [5.335972] WQE DUMP: WQ size 1024 WQ cur size 0, WQE index 0x0, len: 64 + [5.336710] 00000000: 00 00 00 1e 00 00 0e 04 00 00 00 08 00 00 00 00 + [5.337524] 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 12 33 33 + [5.338151] 00000020: 00 00 00 16 52 54 00 00 00 01 86 dd 60 00 00 00 + [5.338740] 00000030: 00 00 00 48 00 00 00 00 00 00 00 00 66 ba 58 14 + + Signed-off-by: Saeed Mahameed + Signed-off-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 6 ++++++ + drivers/net/ethernet/mellanox/mlx5/core/wq.c | 18 ++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/wq.h | 1 + + 3 files changed, 25 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index 001752ace7f0..3ce27194ee7e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -462,8 +462,14 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) + if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, + &sq->state)) { ++ struct mlx5e_tx_wqe_info *wi; ++ u16 ci; ++ ++ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); ++ wi = &sq->db.wqe_info[ci]; + mlx5e_dump_error_cqe(sq, + (struct mlx5_err_cqe *)cqe); ++ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); + queue_work(cq->channel->priv->wq, + &sq->recover_work); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +index dd2315ce4441..dab2625e1e59 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +@@ -96,6 +96,24 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + return err; + } + ++void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) ++{ ++ size_t len; ++ void *wqe; ++ ++ if (!net_ratelimit()) ++ return; ++ ++ nstrides = max_t(u8, nstrides, 1); ++ ++ len = nstrides << wq->fbc.log_stride; ++ wqe = mlx5_wq_cyc_get_wqe(wq, ix); ++ ++ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n", ++ mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len); ++ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false); ++} ++ + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *qpc, struct mlx5_wq_qp *wq, + struct mlx5_wq_ctrl *wq_ctrl) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +index 55791f71a778..27338c3c6136 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +@@ -79,6 +79,7 @@ struct mlx5_wq_ll { + int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl); ++void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides); + u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); + + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, +-- +2.13.6 + diff --git a/SOURCES/0092-netdrv-net-mlx5-WQ-Move-short-getters-into-header-fi.patch b/SOURCES/0092-netdrv-net-mlx5-WQ-Move-short-getters-into-header-fi.patch new file mode 100644 index 0000000..64bfb27 --- /dev/null +++ b/SOURCES/0092-netdrv-net-mlx5-WQ-Move-short-getters-into-header-fi.patch @@ -0,0 +1,135 @@ +From ff649813dd587b6fe99a52b44bc8aef6cba9e5d1 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:10 -0400 +Subject: [PATCH 092/312] [netdrv] net/mlx5: WQ, Move short getters into header + file + +Message-id: <20200510150452.10307-46-ahleihel@redhat.com> +Patchwork-id: 306669 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 45/87] net/mlx5: WQ, Move short getters into header file +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 769619ee39dfa8297a1fe2bc2865eb1e73a9f824 +Author: Tariq Toukan +Date: Wed Oct 16 13:29:16 2019 +0300 + + net/mlx5: WQ, Move short getters into header file + + Move short Work Queue API getter functions into the WQ + header file. + + Signed-off-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/wq.c | 20 -------------------- + drivers/net/ethernet/mellanox/mlx5/core/wq.h | 24 ++++++++++++++++++++---- + 2 files changed, 20 insertions(+), 24 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +index dab2625e1e59..f2a0e72285ba 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +@@ -34,26 +34,6 @@ + #include "wq.h" + #include "mlx5_core.h" + +-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) +-{ +- return (u32)wq->fbc.sz_m1 + 1; +-} +- +-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) +-{ +- return wq->fbc.sz_m1 + 1; +-} +- +-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq) +-{ +- return wq->fbc.log_stride; +-} +- +-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) +-{ +- return (u32)wq->fbc.sz_m1 + 1; +-} +- + static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) + { + return ((u32)1 << log_sz) << log_stride; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +index 27338c3c6136..d9a94bc223c0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl); + void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides); +-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); + + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *qpc, struct mlx5_wq_qp *wq, +@@ -89,16 +88,18 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl); +-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); +-u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq); + + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl); +-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); + + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); + ++static inline u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) ++{ ++ return (u32)wq->fbc.sz_m1 + 1; ++} ++ + static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) + { + return wq->cur_sz == wq->sz; +@@ -169,6 +170,16 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) + return !equal && !smaller; + } + ++static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) ++{ ++ return wq->fbc.sz_m1 + 1; ++} ++ ++static inline u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq) ++{ ++ return wq->fbc.log_stride; ++} ++ + static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr) + { + return ctr & wq->fbc.sz_m1; +@@ -225,6 +236,11 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) + return cqe; + } + ++static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) ++{ ++ return (u32)wq->fbc.sz_m1 + 1; ++} ++ + static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) + { + return wq->cur_sz == wq->fbc.sz_m1; +-- +2.13.6 + diff --git a/SOURCES/0093-netdrv-net-mlx5e-Bit-sized-fields-rewrite-support.patch b/SOURCES/0093-netdrv-net-mlx5e-Bit-sized-fields-rewrite-support.patch new file mode 100644 index 0000000..219091d --- /dev/null +++ b/SOURCES/0093-netdrv-net-mlx5e-Bit-sized-fields-rewrite-support.patch @@ -0,0 +1,267 @@ +From 1d4ac7b4c1c443681ec5bb74e185884e00755ed6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:11 -0400 +Subject: [PATCH 093/312] [netdrv] net/mlx5e: Bit sized fields rewrite support + +Message-id: <20200510150452.10307-47-ahleihel@redhat.com> +Patchwork-id: 306670 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 46/87] net/mlx5e: Bit sized fields rewrite support +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 88f30bbcbaaa1b124fcc622ff49e3d427da9c96c +Author: Dmytro Linkin +Date: Wed Oct 2 07:37:08 2019 +0000 + + net/mlx5e: Bit sized fields rewrite support + + This patch doesn't change any functionality, but is a pre-step for + adding support for rewriting of bit-sized fields, like DSCP and ECN + in IPv4 header, similar fields in IPv6, etc. + + Signed-off-by: Dmytro Linkin + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 122 ++++++++++++------------ + 1 file changed, 62 insertions(+), 60 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index b13e7996ad83..ab6d99d6ba14 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2244,13 +2244,14 @@ static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, + + struct mlx5_fields { + u8 field; +- u8 size; ++ u8 field_bsize; ++ u32 field_mask; + u32 offset; + u32 match_offset; + }; + +-#define OFFLOAD(fw_field, size, field, off, match_field) \ +- {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \ ++#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \ ++ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \ + offsetof(struct pedit_headers, field) + (off), \ + MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} + +@@ -2268,18 +2269,18 @@ struct mlx5_fields { + }) + + static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, +- void *matchmaskp, int size) ++ void *matchmaskp, u8 bsize) + { + bool same = false; + +- switch (size) { +- case sizeof(u8): ++ switch (bsize) { ++ case 8: + same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); + break; +- case sizeof(u16): ++ case 16: + same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); + break; +- case sizeof(u32): ++ case 32: + same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); + break; + } +@@ -2288,41 +2289,42 @@ static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, + } + + static struct mlx5_fields fields[] = { +- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16), +- OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0), +- OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16), +- OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0), +- OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype), +- OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid), +- +- OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit), +- OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), +- OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), +- +- OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0, ++ OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16), ++ OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0), ++ OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16), ++ OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0), ++ OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype), ++ OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid), ++ ++ OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit), ++ OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), ++ OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), ++ ++ OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[0]), +- OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0, ++ OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[4]), +- OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0, ++ OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[8]), +- OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0, ++ OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0, + src_ipv4_src_ipv6.ipv6_layout.ipv6[12]), +- OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0, ++ OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]), +- OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0, ++ OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]), +- OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0, ++ OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]), +- OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0, ++ OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]), +- OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit), ++ OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit), + +- OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport), +- OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport), +- OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags), ++ OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport), ++ OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport), ++ /* in linux iphdr tcp_flags is 8 bits long */ ++ OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags), + +- OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport), +- OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport), ++ OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport), ++ OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), + }; + + /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at +@@ -2335,19 +2337,17 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) + { + struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; +- void *headers_c = get_match_headers_criteria(*action_flags, +- &parse_attr->spec); +- void *headers_v = get_match_headers_value(*action_flags, +- &parse_attr->spec); + int i, action_size, nactions, max_actions, first, last, next_z; +- void *s_masks_p, *a_masks_p, *vals_p; ++ void *headers_c, *headers_v, *action, *vals_p; ++ u32 *s_masks_p, *a_masks_p, s_mask, a_mask; + struct mlx5_fields *f; +- u8 cmd, field_bsize; +- u32 s_mask, a_mask; + unsigned long mask; + __be32 mask_be32; + __be16 mask_be16; +- void *action; ++ u8 cmd; ++ ++ headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec); ++ headers_v = get_match_headers_value(*action_flags, &parse_attr->spec); + + set_masks = &hdrs[0].masks; + add_masks = &hdrs[1].masks; +@@ -2372,8 +2372,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + s_masks_p = (void *)set_masks + f->offset; + a_masks_p = (void *)add_masks + f->offset; + +- memcpy(&s_mask, s_masks_p, f->size); +- memcpy(&a_mask, a_masks_p, f->size); ++ s_mask = *s_masks_p & f->field_mask; ++ a_mask = *a_masks_p & f->field_mask; + + if (!s_mask && !a_mask) /* nothing to offload here */ + continue; +@@ -2402,38 +2402,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + vals_p = (void *)set_vals + f->offset; + /* don't rewrite if we have a match on the same value */ + if (cmp_val_mask(vals_p, s_masks_p, match_val, +- match_mask, f->size)) ++ match_mask, f->field_bsize)) + skip = true; + /* clear to denote we consumed this field */ +- memset(s_masks_p, 0, f->size); ++ *s_masks_p &= ~f->field_mask; + } else { +- u32 zero = 0; +- + cmd = MLX5_ACTION_TYPE_ADD; + mask = a_mask; + vals_p = (void *)add_vals + f->offset; + /* add 0 is no change */ +- if (!memcmp(vals_p, &zero, f->size)) ++ if ((*(u32 *)vals_p & f->field_mask) == 0) + skip = true; + /* clear to denote we consumed this field */ +- memset(a_masks_p, 0, f->size); ++ *a_masks_p &= ~f->field_mask; + } + if (skip) + continue; + +- field_bsize = f->size * BITS_PER_BYTE; +- +- if (field_bsize == 32) { ++ if (f->field_bsize == 32) { + mask_be32 = *(__be32 *)&mask; + mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); +- } else if (field_bsize == 16) { ++ } else if (f->field_bsize == 16) { + mask_be16 = *(__be16 *)&mask; + mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); + } + +- first = find_first_bit(&mask, field_bsize); +- next_z = find_next_zero_bit(&mask, field_bsize, first); +- last = find_last_bit(&mask, field_bsize); ++ first = find_first_bit(&mask, f->field_bsize); ++ next_z = find_next_zero_bit(&mask, f->field_bsize, first); ++ last = find_last_bit(&mask, f->field_bsize); + if (first < next_z && next_z < last) { + NL_SET_ERR_MSG_MOD(extack, + "rewrite of few sub-fields isn't supported"); +@@ -2446,16 +2442,22 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + MLX5_SET(set_action_in, action, field, f->field); + + if (cmd == MLX5_ACTION_TYPE_SET) { +- MLX5_SET(set_action_in, action, offset, first); ++ int start; ++ ++ /* if field is bit sized it can start not from first bit */ ++ start = find_first_bit((unsigned long *)&f->field_mask, ++ f->field_bsize); ++ ++ MLX5_SET(set_action_in, action, offset, first - start); + /* length is num of bits to be written, zero means length of 32 */ + MLX5_SET(set_action_in, action, length, (last - first + 1)); + } + +- if (field_bsize == 32) ++ if (f->field_bsize == 32) + MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first); +- else if (field_bsize == 16) ++ else if (f->field_bsize == 16) + MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first); +- else if (field_bsize == 8) ++ else if (f->field_bsize == 8) + MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); + + action += action_size; +-- +2.13.6 + diff --git a/SOURCES/0094-netdrv-net-mlx5e-Add-ToS-DSCP-header-rewrite-support.patch b/SOURCES/0094-netdrv-net-mlx5e-Add-ToS-DSCP-header-rewrite-support.patch new file mode 100644 index 0000000..28d00be --- /dev/null +++ b/SOURCES/0094-netdrv-net-mlx5e-Add-ToS-DSCP-header-rewrite-support.patch @@ -0,0 +1,62 @@ +From 9555891ed1fbd0e9a491b35499dabb75fd5d6782 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:12 -0400 +Subject: [PATCH 094/312] [netdrv] net/mlx5e: Add ToS (DSCP) header rewrite + support + +Message-id: <20200510150452.10307-48-ahleihel@redhat.com> +Patchwork-id: 306671 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 47/87] net/mlx5e: Add ToS (DSCP) header rewrite support +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit ab9341b54969a2d02dbb7819e2f17c2f0d9cf5b5 +Author: Dmytro Linkin +Date: Mon Oct 7 10:48:00 2019 +0000 + + net/mlx5e: Add ToS (DSCP) header rewrite support + + Add support for rewriting of DSCP part of ToS field. + Next commands, for example, can be used to offload rewrite action: + + OVS: + $ ovs-ofctl add-flow ovs-sriov "ip, in_port=REP, \ + actions=mod_nw_tos:68, output:NIC" + + iproute2 (used retain mask, as tc command rewrite whole ToS field): + $ tc filter add dev REP ingress protocol ip prio 1 flower skip_sw \ + ip_proto icmp action pedit munge ip tos set 68 retain 0xfc pipe \ + action mirred egress redirect dev NIC + + Signed-off-by: Dmytro Linkin + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index ab6d99d6ba14..1a4b8d995826 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2296,6 +2296,7 @@ static struct mlx5_fields fields[] = { + OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype), + OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid), + ++ OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp), + OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit), + OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4), + OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), +-- +2.13.6 + diff --git a/SOURCES/0095-netdrv-net-mlx5-rate-limit-alloc_ent-error-messages.patch b/SOURCES/0095-netdrv-net-mlx5-rate-limit-alloc_ent-error-messages.patch new file mode 100644 index 0000000..1316f0e --- /dev/null +++ b/SOURCES/0095-netdrv-net-mlx5-rate-limit-alloc_ent-error-messages.patch @@ -0,0 +1,56 @@ +From 33326c01f2afd8a6879e9bcc963dc2c90c13f9a8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:13 -0400 +Subject: [PATCH 095/312] [netdrv] net/mlx5: rate limit alloc_ent error + messages + +Message-id: <20200510150452.10307-49-ahleihel@redhat.com> +Patchwork-id: 306672 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 48/87] net/mlx5: rate limit alloc_ent error messages +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 5a212e0cac548e5e4fb3f2ba1b5b2f6c8949687d +Author: Li RongQing +Date: Thu Oct 24 16:23:33 2019 +0800 + + net/mlx5: rate limit alloc_ent error messages + + when debug a bug, which triggers TX hang, and kernel log is + spammed with the following info message + + [ 1172.044764] mlx5_core 0000:21:00.0: cmd_work_handler:930:(pid 8): + failed to allocate command entry + + Signed-off-by: Li RongQing + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 8242f96ab931..71a52b890f38 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -866,7 +866,7 @@ static void cmd_work_handler(struct work_struct *work) + if (!ent->page_queue) { + alloc_ret = alloc_ent(cmd); + if (alloc_ret < 0) { +- mlx5_core_err(dev, "failed to allocate command entry\n"); ++ mlx5_core_err_rl(dev, "failed to allocate command entry\n"); + if (ent->callback) { + ent->callback(-EAGAIN, ent->context); + mlx5_free_cmd_msg(dev, ent->out); +-- +2.13.6 + diff --git a/SOURCES/0096-netdrv-net-mlx5-LAG-Use-port-enumerators.patch b/SOURCES/0096-netdrv-net-mlx5-LAG-Use-port-enumerators.patch new file mode 100644 index 0000000..09ced12 --- /dev/null +++ b/SOURCES/0096-netdrv-net-mlx5-LAG-Use-port-enumerators.patch @@ -0,0 +1,328 @@ +From 8e8051d3aa6145a96ad1457fc55cb31426fc2bdf Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:14 -0400 +Subject: [PATCH 096/312] [netdrv] net/mlx5: LAG, Use port enumerators + +Message-id: <20200510150452.10307-50-ahleihel@redhat.com> +Patchwork-id: 306674 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 49/87] net/mlx5: LAG, Use port enumerators +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c + Various context diff due to missing commit: + 5481d73f8154 ("ipv4: Use accessors for fib_info nexthop data") + And already backported commit: + 1cdc14e9d134 ("net/mlx5: LAG, Use affinity type enumerators") + +commit 84d2dbb0aaaf1098aa2c2ca07003bf3f973732ac +Author: Erez Alfasi +Date: Mon Sep 16 13:59:58 2019 +0300 + + net/mlx5: LAG, Use port enumerators + + Instead of using explicit array indexes, simply use + ports enumerators to make the code more readable. + + Fixes: 7907f23adc18 ("net/mlx5: Implement RoCE LAG feature") + Signed-off-by: Erez Alfasi + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/lag.c | 65 +++++++++++++----------- + drivers/net/ethernet/mellanox/mlx5/core/lag.h | 5 ++ + drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c | 56 ++++++++++---------- + 3 files changed, 69 insertions(+), 57 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c +index c5ef2ff26465..fc0d9583475d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c +@@ -145,34 +145,35 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, + { + *port1 = 1; + *port2 = 2; +- if (!tracker->netdev_state[0].tx_enabled || +- !tracker->netdev_state[0].link_up) { ++ if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled || ++ !tracker->netdev_state[MLX5_LAG_P1].link_up) { + *port1 = 2; + return; + } + +- if (!tracker->netdev_state[1].tx_enabled || +- !tracker->netdev_state[1].link_up) ++ if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled || ++ !tracker->netdev_state[MLX5_LAG_P2].link_up) + *port2 = 1; + } + + void mlx5_modify_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker) + { +- struct mlx5_core_dev *dev0 = ldev->pf[0].dev; ++ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + u8 v2p_port1, v2p_port2; + int err; + + mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, + &v2p_port2); + +- if (v2p_port1 != ldev->v2p_map[0] || +- v2p_port2 != ldev->v2p_map[1]) { +- ldev->v2p_map[0] = v2p_port1; +- ldev->v2p_map[1] = v2p_port2; ++ if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] || ++ v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) { ++ ldev->v2p_map[MLX5_LAG_P1] = v2p_port1; ++ ldev->v2p_map[MLX5_LAG_P2] = v2p_port2; + + mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d", +- ldev->v2p_map[0], ldev->v2p_map[1]); ++ ldev->v2p_map[MLX5_LAG_P1], ++ ldev->v2p_map[MLX5_LAG_P2]); + + err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); + if (err) +@@ -185,16 +186,17 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, + static int mlx5_create_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker) + { +- struct mlx5_core_dev *dev0 = ldev->pf[0].dev; ++ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int err; + +- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0], +- &ldev->v2p_map[1]); ++ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], ++ &ldev->v2p_map[MLX5_LAG_P2]); + + mlx5_core_info(dev0, "lag map port 1:%d port 2:%d", +- ldev->v2p_map[0], ldev->v2p_map[1]); ++ ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]); + +- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]); ++ err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1], ++ ldev->v2p_map[MLX5_LAG_P2]); + if (err) + mlx5_core_err(dev0, + "Failed to create LAG (%d)\n", +@@ -207,7 +209,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, + u8 flags) + { + bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE); +- struct mlx5_core_dev *dev0 = ldev->pf[0].dev; ++ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + int err; + + err = mlx5_create_lag(ldev, tracker); +@@ -229,7 +231,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, + + static int mlx5_deactivate_lag(struct mlx5_lag *ldev) + { +- struct mlx5_core_dev *dev0 = ldev->pf[0].dev; ++ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + bool roce_lag = __mlx5_lag_is_roce(ldev); + int err; + +@@ -252,14 +254,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) + + static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) + { +- if (!ldev->pf[0].dev || !ldev->pf[1].dev) ++ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev) + return false; + + #ifdef CONFIG_MLX5_ESWITCH +- return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev); ++ return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev, ++ ldev->pf[MLX5_LAG_P2].dev); + #else +- return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) && +- !mlx5_sriov_is_enabled(ldev->pf[1].dev)); ++ return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) && ++ !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev)); + #endif + } + +@@ -285,8 +288,8 @@ static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev) + + static void mlx5_do_bond(struct mlx5_lag *ldev) + { +- struct mlx5_core_dev *dev0 = ldev->pf[0].dev; +- struct mlx5_core_dev *dev1 = ldev->pf[1].dev; ++ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; ++ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + struct lag_tracker tracker; + bool do_bond, roce_lag; + int err; +@@ -692,10 +695,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) + goto unlock; + + if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { +- ndev = ldev->tracker.netdev_state[0].tx_enabled ? +- ldev->pf[0].netdev : ldev->pf[1].netdev; ++ ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ? ++ ldev->pf[MLX5_LAG_P1].netdev : ++ ldev->pf[MLX5_LAG_P2].netdev; + } else { +- ndev = ldev->pf[0].netdev; ++ ndev = ldev->pf[MLX5_LAG_P1].netdev; + } + if (ndev) + dev_hold(ndev); +@@ -717,7 +721,8 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) + return true; + + ldev = mlx5_lag_dev_get(dev); +- if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev) ++ if (!ldev || !__mlx5_lag_is_roce(ldev) || ++ ldev->pf[MLX5_LAG_P1].dev == dev) + return true; + + /* If bonded, we do not add an IB device for PF1. */ +@@ -746,11 +751,11 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, + ldev = mlx5_lag_dev_get(dev); + if (ldev && __mlx5_lag_is_roce(ldev)) { + num_ports = MLX5_MAX_PORTS; +- mdev[0] = ldev->pf[0].dev; +- mdev[1] = ldev->pf[1].dev; ++ mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev; ++ mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev; + } else { + num_ports = 1; +- mdev[0] = dev; ++ mdev[MLX5_LAG_P1] = dev; + } + + for (i = 0; i < num_ports; ++i) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h +index 1dea0b1c9826..f1068aac6406 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h +@@ -8,6 +8,11 @@ + #include "lag_mp.h" + + enum { ++ MLX5_LAG_P1, ++ MLX5_LAG_P2, ++}; ++ ++enum { + MLX5_LAG_FLAG_ROCE = 1 << 0, + MLX5_LAG_FLAG_SRIOV = 1 << 1, + MLX5_LAG_FLAG_MULTIPATH = 1 << 2, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +index a5addeadc732..151ba67e4d25 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +@@ -10,10 +10,11 @@ + + static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) + { +- if (!ldev->pf[0].dev || !ldev->pf[1].dev) ++ if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev) + return false; + +- return mlx5_esw_multipath_prereq(ldev->pf[0].dev, ldev->pf[1].dev); ++ return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev, ++ ldev->pf[MLX5_LAG_P2].dev); + } + + static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) +@@ -52,36 +53,36 @@ static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, + + switch (port) { + case MLX5_LAG_NORMAL_AFFINITY: +- tracker.netdev_state[0].tx_enabled = true; +- tracker.netdev_state[1].tx_enabled = true; +- tracker.netdev_state[0].link_up = true; +- tracker.netdev_state[1].link_up = true; ++ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true; ++ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true; ++ tracker.netdev_state[MLX5_LAG_P1].link_up = true; ++ tracker.netdev_state[MLX5_LAG_P2].link_up = true; + break; + case MLX5_LAG_P1_AFFINITY: +- tracker.netdev_state[0].tx_enabled = true; +- tracker.netdev_state[0].link_up = true; +- tracker.netdev_state[1].tx_enabled = false; +- tracker.netdev_state[1].link_up = false; ++ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true; ++ tracker.netdev_state[MLX5_LAG_P1].link_up = true; ++ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false; ++ tracker.netdev_state[MLX5_LAG_P2].link_up = false; + break; + case MLX5_LAG_P2_AFFINITY: +- tracker.netdev_state[0].tx_enabled = false; +- tracker.netdev_state[0].link_up = false; +- tracker.netdev_state[1].tx_enabled = true; +- tracker.netdev_state[1].link_up = true; ++ tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false; ++ tracker.netdev_state[MLX5_LAG_P1].link_up = false; ++ tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true; ++ tracker.netdev_state[MLX5_LAG_P2].link_up = true; + break; + default: +- mlx5_core_warn(ldev->pf[0].dev, "Invalid affinity port %d", +- port); ++ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev, ++ "Invalid affinity port %d", port); + return; + } + +- if (tracker.netdev_state[0].tx_enabled) +- mlx5_notifier_call_chain(ldev->pf[0].dev->priv.events, ++ if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled) ++ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events, + MLX5_DEV_EVENT_PORT_AFFINITY, + (void *)0); + +- if (tracker.netdev_state[1].tx_enabled) +- mlx5_notifier_call_chain(ldev->pf[1].dev->priv.events, ++ if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled) ++ mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events, + MLX5_DEV_EVENT_PORT_AFFINITY, + (void *)0); + +@@ -135,11 +136,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + return; + + /* Verify next hops are ports of the same hca */ +- if (!(fi->fib_nh[0].nh_dev == ldev->pf[0].netdev && +- fi->fib_nh[1].nh_dev == ldev->pf[1].netdev) && +- !(fi->fib_nh[0].nh_dev == ldev->pf[1].netdev && +- fi->fib_nh[1].nh_dev == ldev->pf[0].netdev)) { +- mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n"); ++ if (!(fi->fib_nh[0].nh_dev == ldev->pf[MLX5_LAG_P1].netdev && ++ fi->fib_nh[1].nh_dev == ldev->pf[MLX5_LAG_P2].netdev) && ++ !(fi->fib_nh[0].nh_dev == ldev->pf[MLX5_LAG_P2].netdev && ++ fi->fib_nh[1].nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) { ++ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev, ++ "Multipath offload require two ports of the same HCA\n"); + return; + } + +@@ -255,8 +257,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, + fen_info = container_of(info, struct fib_entry_notifier_info, + info); + fi = fen_info->fi; +- if (fi->fib_dev != ldev->pf[0].netdev && +- fi->fib_dev != ldev->pf[1].netdev) { ++ if (fi->fib_dev != ldev->pf[MLX5_LAG_P1].netdev && ++ fi->fib_dev != ldev->pf[MLX5_LAG_P2].netdev) { + return NOTIFY_DONE; + } + fib_work = mlx5_lag_init_fib_work(ldev, event); +-- +2.13.6 + diff --git a/SOURCES/0097-netdrv-net-mlx5-fix-kvfree-of-uninitialized-pointer-.patch b/SOURCES/0097-netdrv-net-mlx5-fix-kvfree-of-uninitialized-pointer-.patch new file mode 100644 index 0000000..c96f5aa --- /dev/null +++ b/SOURCES/0097-netdrv-net-mlx5-fix-kvfree-of-uninitialized-pointer-.patch @@ -0,0 +1,57 @@ +From 54b8e94b33419c07a2e04193b185412a08d4786f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:16 -0400 +Subject: [PATCH 097/312] [netdrv] net/mlx5: fix kvfree of uninitialized + pointer spec + +Message-id: <20200510150452.10307-52-ahleihel@redhat.com> +Patchwork-id: 306675 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 51/87] net/mlx5: fix kvfree of uninitialized pointer spec +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 8b3f2eb038d3098b37715afced1e62bbc72da90f +Author: Colin Ian King +Date: Tue Nov 5 18:27:40 2019 +0000 + + net/mlx5: fix kvfree of uninitialized pointer spec + + Currently when a call to esw_vport_create_legacy_ingress_acl_group + fails the error exit path to label 'out' will cause a kvfree on the + uninitialized pointer spec. Fix this by ensuring pointer spec is + initialized to NULL to avoid this issue. + + Addresses-Coverity: ("Uninitialized pointer read") + Fixes: 10652f39943e ("net/mlx5: Refactor ingress acl configuration") + Signed-off-by: Colin Ian King + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 1937198405e1..93cf6eb77163 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -1257,7 +1257,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_flow_destination drop_ctr_dst = {0}; + struct mlx5_flow_destination *dst = NULL; + struct mlx5_flow_act flow_act = {0}; +- struct mlx5_flow_spec *spec; ++ struct mlx5_flow_spec *spec = NULL; + int dest_num = 0; + int err = 0; + u8 *smac_v; +-- +2.13.6 + diff --git a/SOURCES/0098-netdrv-net-mlx5-fix-spelling-mistake-metdata-metadat.patch b/SOURCES/0098-netdrv-net-mlx5-fix-spelling-mistake-metdata-metadat.patch new file mode 100644 index 0000000..b20bf95 --- /dev/null +++ b/SOURCES/0098-netdrv-net-mlx5-fix-spelling-mistake-metdata-metadat.patch @@ -0,0 +1,53 @@ +From 7747b8a366a2a6eeb89b862ccd8f7411bc000126 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:17 -0400 +Subject: [PATCH 098/312] [netdrv] net/mlx5: fix spelling mistake "metdata" -> + "metadata" + +Message-id: <20200510150452.10307-53-ahleihel@redhat.com> +Patchwork-id: 306677 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 52/87] net/mlx5: fix spelling mistake "metdata" -> "metadata" +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 9ea7f01f470a25bb795224cc0ecc57c91a1519c6 +Author: Colin Ian King +Date: Tue Nov 5 14:54:16 2019 +0000 + + net/mlx5: fix spelling mistake "metdata" -> "metadata" + + There is a spelling mistake in a esw_warn warning message. Fix it. + + Signed-off-by: Colin Ian King + Reviewed-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 7fe085fa3d29..fe1946b89a11 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1880,7 +1880,7 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, + if (IS_ERR(g)) { + ret = PTR_ERR(g); + esw_warn(esw->dev, +- "Failed to create vport[%d] ingress metdata group, err(%d)\n", ++ "Failed to create vport[%d] ingress metadata group, err(%d)\n", + vport->vport, ret); + goto grp_err; + } +-- +2.13.6 + diff --git a/SOURCES/0099-netdrv-net-mlx5-Dump-of-fw_fatal-use-updated-devlink.patch b/SOURCES/0099-netdrv-net-mlx5-Dump-of-fw_fatal-use-updated-devlink.patch new file mode 100644 index 0000000..760ccb7 --- /dev/null +++ b/SOURCES/0099-netdrv-net-mlx5-Dump-of-fw_fatal-use-updated-devlink.patch @@ -0,0 +1,86 @@ +From c3c632a540ceec478b466227dd94d1ff5be82e26 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:19 -0400 +Subject: [PATCH 099/312] [netdrv] net/mlx5: Dump of fw_fatal use updated + devlink binary interface + +Message-id: <20200510150452.10307-55-ahleihel@redhat.com> +Patchwork-id: 306679 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 54/87] net/mlx5: Dump of fw_fatal use updated devlink binary interface +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit d4e82cf4df300368e8d271a2caf14611a5e176a6 +Author: Aya Levin +Date: Tue Nov 12 14:07:50 2019 +0200 + + net/mlx5: Dump of fw_fatal use updated devlink binary interface + + Remove redundant code from fw_fatal reporter's dump callback. Use + updated devlink interface of binary fmsg pair which breaks the output + into chunks internally. + + Signed-off-by: Aya Levin + Acked-by: Jiri Pirko + Acked-by: Saeed Mahameed + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/health.c | 18 +----------------- + 1 file changed, 1 insertion(+), 17 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c +index c07f3154437c..d6b0a4ef9daf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c +@@ -552,7 +552,6 @@ mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, + return mlx5_health_try_recover(dev); + } + +-#define MLX5_CR_DUMP_CHUNK_SIZE 256 + static int + mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx) +@@ -560,8 +559,6 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, + struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter); + u32 crdump_size = dev->priv.health.crdump_size; + u32 *cr_data; +- u32 data_size; +- u32 offset; + int err; + + if (!mlx5_core_is_pf(dev)) +@@ -582,20 +579,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, + goto free_data; + } + +- err = devlink_fmsg_arr_pair_nest_start(fmsg, "crdump_data"); +- if (err) +- goto free_data; +- for (offset = 0; offset < crdump_size; offset += data_size) { +- if (crdump_size - offset < MLX5_CR_DUMP_CHUNK_SIZE) +- data_size = crdump_size - offset; +- else +- data_size = MLX5_CR_DUMP_CHUNK_SIZE; +- err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset, +- data_size); +- if (err) +- goto free_data; +- } +- err = devlink_fmsg_arr_pair_nest_end(fmsg); ++ err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size); + + free_data: + kvfree(cr_data); +-- +2.13.6 + diff --git a/SOURCES/0100-netdrv-net-mlx5-Simplify-fdb-chain-and-prio-eswitch-.patch b/SOURCES/0100-netdrv-net-mlx5-Simplify-fdb-chain-and-prio-eswitch-.patch new file mode 100644 index 0000000..adc07d9 --- /dev/null +++ b/SOURCES/0100-netdrv-net-mlx5-Simplify-fdb-chain-and-prio-eswitch-.patch @@ -0,0 +1,82 @@ +From aacd036ff4feac42054a90ce1974adab336b723c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:21 -0400 +Subject: [PATCH 100/312] [netdrv] net/mlx5: Simplify fdb chain and prio + eswitch defines + +Message-id: <20200510150452.10307-57-ahleihel@redhat.com> +Patchwork-id: 306682 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 56/87] net/mlx5: Simplify fdb chain and prio eswitch defines +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 12063c2e4c0e38f36c0e6f0942cd138feed022b3 +Author: Paul Blakey +Date: Tue Nov 12 00:34:24 2019 +0100 + + net/mlx5: Simplify fdb chain and prio eswitch defines + + FDB_MAX_CHAIN and FDB_MAX_PRIO were defined differently depending + on if CONFIG_MLX5_ESWITCH is enabled to save space on allocations. + + This is a minor space saving, and there is no real need for it. + Simplify things instead, and define them the same in both cases. + + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Acked-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 12 ++++-------- + 1 file changed, 4 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 963d0df0d66b..628925cb385f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -43,6 +43,10 @@ + #include + #include "lib/mpfs.h" + ++#define FDB_MAX_CHAIN 3 ++#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) ++#define FDB_MAX_PRIO 16 ++ + #ifdef CONFIG_MLX5_ESWITCH + + #define MLX5_MAX_UC_PER_VPORT(dev) \ +@@ -59,10 +63,6 @@ + #define mlx5_esw_has_fwd_fdb(dev) \ + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) + +-#define FDB_MAX_CHAIN 3 +-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) +-#define FDB_MAX_PRIO 16 +- + struct vport_ingress { + struct mlx5_flow_table *acl; + #ifdef __GENKSYMS__ +@@ -656,10 +656,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) + + static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} + +-#define FDB_MAX_CHAIN 1 +-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) +-#define FDB_MAX_PRIO 1 +- + #endif /* CONFIG_MLX5_ESWITCH */ + + #endif /* __MLX5_ESWITCH_H__ */ +-- +2.13.6 + diff --git a/SOURCES/0101-netdrv-net-mlx5-Rename-FDB_-tc-related-defines-to-FD.patch b/SOURCES/0101-netdrv-net-mlx5-Rename-FDB_-tc-related-defines-to-FD.patch new file mode 100644 index 0000000..341a2b5 --- /dev/null +++ b/SOURCES/0101-netdrv-net-mlx5-Rename-FDB_-tc-related-defines-to-FD.patch @@ -0,0 +1,181 @@ +From 609bffc51ba95f81dac7cb26fd035a1a9c0e13d2 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:22 -0400 +Subject: [PATCH 101/312] [netdrv] net/mlx5: Rename FDB_* tc related defines to + FDB_TC_* defines + +Message-id: <20200510150452.10307-58-ahleihel@redhat.com> +Patchwork-id: 306680 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 57/87] net/mlx5: Rename FDB_* tc related defines to FDB_TC_* defines +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 2cf2954bd7ffd8250ae257b45b96915003c26d7d +Author: Paul Blakey +Date: Tue Nov 12 00:34:25 2019 +0100 + + net/mlx5: Rename FDB_* tc related defines to FDB_TC_* defines + + Rename it to prepare for next patch that will add a + different type of offload to the FDB. + + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Acked-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 8 ++++---- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 10 +++++----- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 8 ++++---- + 4 files changed, 15 insertions(+), 15 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 1a4b8d995826..0bc017569822 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1074,7 +1074,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, + memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); + slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + slow_attr->split_count = 0; +- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; ++ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; + + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); + if (!IS_ERR(rule)) +@@ -1091,7 +1091,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, + memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); + slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + slow_attr->split_count = 0; +- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; ++ slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; + mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); + flow_flag_clear(flow, SLOW); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 628925cb385f..f18fad32a20c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -43,9 +43,9 @@ + #include + #include "lib/mpfs.h" + +-#define FDB_MAX_CHAIN 3 +-#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) +-#define FDB_MAX_PRIO 16 ++#define FDB_TC_MAX_CHAIN 3 ++#define FDB_TC_SLOW_PATH_CHAIN (FDB_TC_MAX_CHAIN + 1) ++#define FDB_TC_MAX_PRIO 16 + + #ifdef CONFIG_MLX5_ESWITCH + +@@ -192,7 +192,7 @@ struct mlx5_eswitch_fdb { + struct { + struct mlx5_flow_table *fdb; + u32 num_rules; +- } fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS]; ++ } fdb_prio[FDB_TC_MAX_CHAIN + 1][FDB_TC_MAX_PRIO + 1][PRIO_LEVELS]; + /* Protects fdb_prio table */ + struct mutex fdb_prio_lock; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index fe1946b89a11..9e59fb7ad68f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -75,7 +75,7 @@ bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw) + u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) + { + if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) +- return FDB_MAX_CHAIN; ++ return FDB_TC_MAX_CHAIN; + + return 0; + } +@@ -83,7 +83,7 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) + u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) + { + if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) +- return FDB_MAX_PRIO; ++ return FDB_TC_MAX_PRIO; + + return 1; + } +@@ -927,7 +927,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) + int table_prio, l = 0; + u32 flags = 0; + +- if (chain == FDB_SLOW_PATH_CHAIN) ++ if (chain == FDB_TC_SLOW_PATH_CHAIN) + return esw->fdb_table.offloads.slow_fdb; + + mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); +@@ -952,7 +952,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) + flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); + +- table_prio = (chain * FDB_MAX_PRIO) + prio - 1; ++ table_prio = (chain * FDB_TC_MAX_PRIO) + prio - 1; + + /* create earlier levels for correct fs_core lookup when + * connecting tables +@@ -989,7 +989,7 @@ esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) + { + int l; + +- if (chain == FDB_SLOW_PATH_CHAIN) ++ if (chain == FDB_TC_SLOW_PATH_CHAIN) + return; + + mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 6e1ef05becce..56faf8e6e9ae 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2609,7 +2609,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + return -ENOMEM; + + steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) * +- (FDB_MAX_CHAIN + 1), GFP_KERNEL); ++ (FDB_TC_MAX_CHAIN + 1), GFP_KERNEL); + if (!steering->fdb_sub_ns) + return -ENOMEM; + +@@ -2620,7 +2620,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + goto out_err; + } + +- levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1); ++ levels = 2 * FDB_TC_MAX_PRIO * (FDB_TC_MAX_CHAIN + 1); + maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, + FDB_FAST_PATH, + levels); +@@ -2629,14 +2629,14 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + goto out_err; + } + +- for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) { ++ for (chain = 0; chain <= FDB_TC_MAX_CHAIN; chain++) { + ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); + if (IS_ERR(ns)) { + err = PTR_ERR(ns); + goto out_err; + } + +- for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) { ++ for (prio = 0; prio < FDB_TC_MAX_PRIO * (chain + 1); prio++) { + min_prio = fs_create_prio(ns, prio, 2); + if (IS_ERR(min_prio)) { + err = PTR_ERR(min_prio); +-- +2.13.6 + diff --git a/SOURCES/0102-netdrv-net-mlx5-Define-fdb-tc-levels-per-prio.patch b/SOURCES/0102-netdrv-net-mlx5-Define-fdb-tc-levels-per-prio.patch new file mode 100644 index 0000000..abcc3ec --- /dev/null +++ b/SOURCES/0102-netdrv-net-mlx5-Define-fdb-tc-levels-per-prio.patch @@ -0,0 +1,95 @@ +From 1d9d347b493116ebe3b0817174f1734cdd66fc2a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:23 -0400 +Subject: [PATCH 102/312] [netdrv] net/mlx5: Define fdb tc levels per prio + +Message-id: <20200510150452.10307-59-ahleihel@redhat.com> +Patchwork-id: 306681 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 58/87] net/mlx5: Define fdb tc levels per prio +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 4db7b98e943225dc2a7435811767e44f63640462 +Author: Paul Blakey +Date: Tue Nov 12 00:34:26 2019 +0100 + + net/mlx5: Define fdb tc levels per prio + + Define FDB_TC_LEVELS_PER_PRIO instead of magic number 2. + This is the number of levels used by each tc prio table in the fdb. + + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Acked-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 6 ++++-- + 2 files changed, 6 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index f18fad32a20c..e2c1555a6a73 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -46,6 +46,7 @@ + #define FDB_TC_MAX_CHAIN 3 + #define FDB_TC_SLOW_PATH_CHAIN (FDB_TC_MAX_CHAIN + 1) + #define FDB_TC_MAX_PRIO 16 ++#define FDB_TC_LEVELS_PER_PRIO 2 + + #ifdef CONFIG_MLX5_ESWITCH + +@@ -165,7 +166,6 @@ enum offloads_fdb_flags { + + extern const unsigned int ESW_POOLS[4]; + +-#define PRIO_LEVELS 2 + struct mlx5_eswitch_fdb { + union { + struct legacy_fdb { +@@ -192,7 +192,7 @@ struct mlx5_eswitch_fdb { + struct { + struct mlx5_flow_table *fdb; + u32 num_rules; +- } fdb_prio[FDB_TC_MAX_CHAIN + 1][FDB_TC_MAX_PRIO + 1][PRIO_LEVELS]; ++ } fdb_prio[FDB_TC_MAX_CHAIN + 1][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; + /* Protects fdb_prio table */ + struct mutex fdb_prio_lock; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 56faf8e6e9ae..ce4774560c56 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2620,7 +2620,8 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + goto out_err; + } + +- levels = 2 * FDB_TC_MAX_PRIO * (FDB_TC_MAX_CHAIN + 1); ++ levels = FDB_TC_LEVELS_PER_PRIO * ++ FDB_TC_MAX_PRIO * (FDB_TC_MAX_CHAIN + 1); + maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, + FDB_FAST_PATH, + levels); +@@ -2637,7 +2638,8 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + } + + for (prio = 0; prio < FDB_TC_MAX_PRIO * (chain + 1); prio++) { +- min_prio = fs_create_prio(ns, prio, 2); ++ min_prio = fs_create_prio(ns, prio, ++ FDB_TC_LEVELS_PER_PRIO); + if (IS_ERR(min_prio)) { + err = PTR_ERR(min_prio); + goto out_err; +-- +2.13.6 + diff --git a/SOURCES/0103-netdrv-net-mlx5-Accumulate-levels-for-chains-prio-na.patch b/SOURCES/0103-netdrv-net-mlx5-Accumulate-levels-for-chains-prio-na.patch new file mode 100644 index 0000000..9d9482e --- /dev/null +++ b/SOURCES/0103-netdrv-net-mlx5-Accumulate-levels-for-chains-prio-na.patch @@ -0,0 +1,89 @@ +From 147ff36227e0ae4feefbe51315598cf96f01e3d8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:24 -0400 +Subject: [PATCH 103/312] [netdrv] net/mlx5: Accumulate levels for chains prio + namespaces + +Message-id: <20200510150452.10307-60-ahleihel@redhat.com> +Patchwork-id: 306683 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 59/87] net/mlx5: Accumulate levels for chains prio namespaces +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 34b13cb3eaa5ad205f4497da6420262da4940b9e +Author: Paul Blakey +Date: Tue Nov 12 00:34:27 2019 +0100 + + net/mlx5: Accumulate levels for chains prio namespaces + + Tc chains are implemented by creating a chained prio steering type, and + inside it there is a namespace for each chain (FDB_TC_MAX_CHAINS). Each + of those has a list of priorities. + + Currently, all namespaces in a prio start at the parent prio level. + But since we can jump from chain (namespace) to another chain in the + same prio, we need the levels for higher chains to be higher as well. + So we created unused prios to account for levels in previous namespaces. + + Fix that by accumulating the namespaces levels if we are inside a chained + type prio, and removing the unused prios. + + Fixes: 328edb499f99 ('net/mlx5: Split FDB fast path prio to multiple namespaces') + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Acked-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 10 +++++++++- + 2 files changed, 10 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 9e59fb7ad68f..0f0d8decb04c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -952,7 +952,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) + flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); + +- table_prio = (chain * FDB_TC_MAX_PRIO) + prio - 1; ++ table_prio = prio - 1; + + /* create earlier levels for correct fs_core lookup when + * connecting tables +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index ce4774560c56..9411b17cdeb5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2403,9 +2403,17 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) + int acc_level_ns = acc_level; + + prio->start_level = acc_level; +- fs_for_each_ns(ns, prio) ++ fs_for_each_ns(ns, prio) { + /* This updates start_level and num_levels of ns's priority descendants */ + acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); ++ ++ /* If this a prio with chains, and we can jump from one chain ++ * (namepsace) to another, so we accumulate the levels ++ */ ++ if (prio->node.type == FS_TYPE_PRIO_CHAINS) ++ acc_level = acc_level_ns; ++ } ++ + if (!prio->num_levels) + prio->num_levels = acc_level_ns - prio->start_level; + WARN_ON(prio->num_levels < acc_level_ns - prio->start_level); +-- +2.13.6 + diff --git a/SOURCES/0104-netdrv-net-mlx5-Refactor-creating-fast-path-prio-cha.patch b/SOURCES/0104-netdrv-net-mlx5-Refactor-creating-fast-path-prio-cha.patch new file mode 100644 index 0000000..2b05ae1 --- /dev/null +++ b/SOURCES/0104-netdrv-net-mlx5-Refactor-creating-fast-path-prio-cha.patch @@ -0,0 +1,189 @@ +From 17f63d3c2a1820ae7398573af125ff1612428e74 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:25 -0400 +Subject: [PATCH 104/312] [netdrv] net/mlx5: Refactor creating fast path prio + chains + +Message-id: <20200510150452.10307-61-ahleihel@redhat.com> +Patchwork-id: 306685 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 60/87] net/mlx5: Refactor creating fast path prio chains +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 439e843f1f43640fd52530433d803db8585cd028 +Author: Paul Blakey +Date: Tue Nov 12 00:34:28 2019 +0100 + + net/mlx5: Refactor creating fast path prio chains + + Next patch will re-use this to add a new chain but in a + different prio. + + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Acked-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 118 +++++++++++++++------- + 1 file changed, 82 insertions(+), 36 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 9411b17cdeb5..606aa32bfa3a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2602,60 +2602,106 @@ static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) + steering->rdma_rx_root_ns = NULL; + return err; + } +-static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ++ ++/* FT and tc chains are stored in the same array so we can re-use the ++ * mlx5_get_fdb_sub_ns() and tc api for FT chains. ++ * When creating a new ns for each chain store it in the first available slot. ++ * Assume tc chains are created and stored first and only then the FT chain. ++ */ ++static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, ++ struct mlx5_flow_namespace *ns) ++{ ++ int chain = 0; ++ ++ while (steering->fdb_sub_ns[chain]) ++ ++chain; ++ ++ steering->fdb_sub_ns[chain] = ns; ++} ++ ++static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering, ++ struct fs_prio *maj_prio) + { + struct mlx5_flow_namespace *ns; +- struct fs_prio *maj_prio; + struct fs_prio *min_prio; ++ int prio; ++ ++ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); ++ if (IS_ERR(ns)) ++ return PTR_ERR(ns); ++ ++ for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) { ++ min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO); ++ if (IS_ERR(min_prio)) ++ return PTR_ERR(min_prio); ++ } ++ ++ store_fdb_sub_ns_prio_chain(steering, ns); ++ ++ return 0; ++} ++ ++static int create_fdb_chains(struct mlx5_flow_steering *steering, ++ int fs_prio, ++ int chains) ++{ ++ struct fs_prio *maj_prio; + int levels; + int chain; +- int prio; + int err; + +- steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); +- if (!steering->fdb_root_ns) +- return -ENOMEM; ++ levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains; ++ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, ++ fs_prio, ++ levels); ++ if (IS_ERR(maj_prio)) ++ return PTR_ERR(maj_prio); ++ ++ for (chain = 0; chain < chains; chain++) { ++ err = create_fdb_sub_ns_prio_chain(steering, maj_prio); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} + +- steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) * +- (FDB_TC_MAX_CHAIN + 1), GFP_KERNEL); ++static int create_fdb_fast_path(struct mlx5_flow_steering *steering) ++{ ++ const int total_chains = FDB_TC_MAX_CHAIN + 1; ++ int err; ++ ++ steering->fdb_sub_ns = kcalloc(total_chains, ++ sizeof(*steering->fdb_sub_ns), ++ GFP_KERNEL); + if (!steering->fdb_sub_ns) + return -ENOMEM; + ++ err = create_fdb_chains(steering, FDB_FAST_PATH, FDB_TC_MAX_CHAIN + 1); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ++{ ++ struct fs_prio *maj_prio; ++ int err; ++ ++ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); ++ if (!steering->fdb_root_ns) ++ return -ENOMEM; ++ + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, + 1); + if (IS_ERR(maj_prio)) { + err = PTR_ERR(maj_prio); + goto out_err; + } +- +- levels = FDB_TC_LEVELS_PER_PRIO * +- FDB_TC_MAX_PRIO * (FDB_TC_MAX_CHAIN + 1); +- maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, +- FDB_FAST_PATH, +- levels); +- if (IS_ERR(maj_prio)) { +- err = PTR_ERR(maj_prio); ++ err = create_fdb_fast_path(steering); ++ if (err) + goto out_err; +- } +- +- for (chain = 0; chain <= FDB_TC_MAX_CHAIN; chain++) { +- ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF); +- if (IS_ERR(ns)) { +- err = PTR_ERR(ns); +- goto out_err; +- } +- +- for (prio = 0; prio < FDB_TC_MAX_PRIO * (chain + 1); prio++) { +- min_prio = fs_create_prio(ns, prio, +- FDB_TC_LEVELS_PER_PRIO); +- if (IS_ERR(min_prio)) { +- err = PTR_ERR(min_prio); +- goto out_err; +- } +- } +- +- steering->fdb_sub_ns[chain] = ns; +- } + + maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1); + if (IS_ERR(maj_prio)) { +-- +2.13.6 + diff --git a/SOURCES/0105-netdrv-net-mlx5-Add-new-chain-for-netfilter-flow-tab.patch b/SOURCES/0105-netdrv-net-mlx5-Add-new-chain-for-netfilter-flow-tab.patch new file mode 100644 index 0000000..2790d0b --- /dev/null +++ b/SOURCES/0105-netdrv-net-mlx5-Add-new-chain-for-netfilter-flow-tab.patch @@ -0,0 +1,127 @@ +From 669687247f34d6fd86192bedd8aa8afa3b04b905 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:26 -0400 +Subject: [PATCH 105/312] [netdrv] net/mlx5: Add new chain for netfilter flow + table offload + +Message-id: <20200510150452.10307-62-ahleihel@redhat.com> +Patchwork-id: 306686 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 61/87] net/mlx5: Add new chain for netfilter flow table offload +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 975b992fdd4b38028d7c1dcf38286d6e7991c1b2 +Author: Paul Blakey +Date: Tue Nov 12 00:34:29 2019 +0100 + + net/mlx5: Add new chain for netfilter flow table offload + + Netfilter tables (nftables) implements a software datapath that + comes after tc ingress datapath. The datapath supports offloading + such rules via the flow table offload API. + + This API is currently only used by NFT and it doesn't provide the + global priority in regards to tc offload, so we assume offloading such + rules must come after tc. It does provide a flow table priority + parameter, so we need to provide some supported priority range. + + For that, split fastpath prio to two, flow table offload and tc offload, + with one dedicated priority chain for flow table offload. + + Next patch will re-use the multi chain API to access this chain by + allowing access to this chain by the fdb_sub_namespace. + + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Acked-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 11 ++++++++++- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 9 ++++++--- + include/linux/mlx5/fs.h | 3 ++- + 3 files changed, 18 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index e2c1555a6a73..d9c3b8767224 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -44,7 +44,12 @@ + #include "lib/mpfs.h" + + #define FDB_TC_MAX_CHAIN 3 +-#define FDB_TC_SLOW_PATH_CHAIN (FDB_TC_MAX_CHAIN + 1) ++#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) ++#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) ++ ++/* The index of the last real chain (FT) + 1 as chain zero is valid as well */ ++#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) ++ + #define FDB_TC_MAX_PRIO 16 + #define FDB_TC_LEVELS_PER_PRIO 2 + +@@ -192,7 +197,11 @@ struct mlx5_eswitch_fdb { + struct { + struct mlx5_flow_table *fdb; + u32 num_rules; ++#ifndef __GENKSYMS__ ++ } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; ++#else + } fdb_prio[FDB_TC_MAX_CHAIN + 1][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; ++#endif + /* Protects fdb_prio table */ + struct mutex fdb_prio_lock; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 606aa32bfa3a..bf0cea1e294b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2668,16 +2668,19 @@ static int create_fdb_chains(struct mlx5_flow_steering *steering, + + static int create_fdb_fast_path(struct mlx5_flow_steering *steering) + { +- const int total_chains = FDB_TC_MAX_CHAIN + 1; + int err; + +- steering->fdb_sub_ns = kcalloc(total_chains, ++ steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS, + sizeof(*steering->fdb_sub_ns), + GFP_KERNEL); + if (!steering->fdb_sub_ns) + return -ENOMEM; + +- err = create_fdb_chains(steering, FDB_FAST_PATH, FDB_TC_MAX_CHAIN + 1); ++ err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1); ++ if (err) ++ return err; ++ ++ err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1); + if (err) + return err; + +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index 948cba3389ff..bb66d1da0ef3 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -82,7 +82,8 @@ enum mlx5_flow_namespace_type { + + enum { + FDB_BYPASS_PATH, +- FDB_FAST_PATH, ++ FDB_TC_OFFLOAD, ++ FDB_FT_OFFLOAD, + FDB_SLOW_PATH, + }; + +-- +2.13.6 + diff --git a/SOURCES/0106-netdrv-net-mlx5-Remove-redundant-NULL-initialization.patch b/SOURCES/0106-netdrv-net-mlx5-Remove-redundant-NULL-initialization.patch new file mode 100644 index 0000000..db7e148 --- /dev/null +++ b/SOURCES/0106-netdrv-net-mlx5-Remove-redundant-NULL-initialization.patch @@ -0,0 +1,93 @@ +From 74070ba1fa2d9560c642c8f985236aa62b65c946 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:27 -0400 +Subject: [PATCH 106/312] [netdrv] net/mlx5: Remove redundant NULL + initializations + +Message-id: <20200510150452.10307-63-ahleihel@redhat.com> +Patchwork-id: 306684 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 62/87] net/mlx5: Remove redundant NULL initializations +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c + Fix may be used uninitialized warning in function mlx5e_tc_tun_create_header_ipv6. + This fix was taken from the following upstream merge commit: + 95e6ba513316 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net") + +commit e6014afd1c5717d556778ec1307cf7ab27ba5a2d +Author: Eli Cohen +Date: Wed Oct 30 16:48:15 2019 +0200 + + net/mlx5: Remove redundant NULL initializations + + Neighbour initializations to NULL are not necessary as the pointers are + not used if an error is returned, and if success returned, pointers are + initialized. + + Signed-off-by: Eli Cohen + Reviewed-by: Vlad Buslov + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +index c2a4b6710f74..92559a758d07 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +@@ -77,8 +77,8 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, + struct neighbour **out_n, + u8 *out_ttl) + { ++ struct neighbour *n; + struct rtable *rt; +- struct neighbour *n = NULL; + + #if IS_ENABLED(CONFIG_INET) + struct mlx5_core_dev *mdev = priv->mdev; +@@ -138,8 +138,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct neighbour **out_n, + u8 *out_ttl) + { +- struct neighbour *n = NULL; + struct dst_entry *dst; ++ struct neighbour *n; + + #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + int ret; +@@ -212,8 +212,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + const struct ip_tunnel_key *tun_key = &e->tun_info->key; + struct net_device *out_dev, *route_dev; +- struct neighbour *n = NULL; + struct flowi4 fl4 = {}; ++ struct neighbour *n; + int ipv4_encap_size; + char *encap_header; + u8 nud_state, ttl; +@@ -331,9 +331,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + const struct ip_tunnel_key *tun_key = &e->tun_info->key; + struct net_device *out_dev, *route_dev; +- struct neighbour *n = NULL; + struct flowi6 fl6 = {}; + struct ipv6hdr *ip6h; ++ struct neighbour *n = NULL; + int ipv6_encap_size; + char *encap_header; + u8 nud_state, ttl; +-- +2.13.6 + diff --git a/SOURCES/0107-netdrv-net-mlx5-Don-t-write-read-only-fields-in-MODI.patch b/SOURCES/0107-netdrv-net-mlx5-Don-t-write-read-only-fields-in-MODI.patch new file mode 100644 index 0000000..eaa000c --- /dev/null +++ b/SOURCES/0107-netdrv-net-mlx5-Don-t-write-read-only-fields-in-MODI.patch @@ -0,0 +1,80 @@ +From a41e9dba86d101d1d26eae83b2f4d9ad30d1cbfe Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:31 -0400 +Subject: [PATCH 107/312] [netdrv] net/mlx5: Don't write read-only fields in + MODIFY_HCA_VPORT_CONTEXT command + +Message-id: <20200510150452.10307-67-ahleihel@redhat.com> +Patchwork-id: 306690 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 66/87] net/mlx5: Don't write read-only fields in MODIFY_HCA_VPORT_CONTEXT command +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit ab118da4c10a70b8437f5c90ab77adae1835963e +Author: Leon Romanovsky +Date: Wed Nov 13 12:03:47 2019 +0200 + + net/mlx5: Don't write read-only fields in MODIFY_HCA_VPORT_CONTEXT command + + The MODIFY_HCA_VPORT_CONTEXT uses field_selector to mask fields needed + to be written, other fields are required to be zero according to the + HW specification. The supported fields are controlled by bitfield + and limited to vport state, node and port GUIDs. + + Signed-off-by: Leon Romanovsky + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/vport.c | 27 +++++++------------------ + 1 file changed, 7 insertions(+), 20 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c +index 30f7848a6f88..1faac31f74d0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c +@@ -1064,26 +1064,13 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, + + ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context); + MLX5_SET(hca_vport_context, ctx, field_select, req->field_select); +- MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware); +- MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi); +- MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw); +- MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy); +- MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state); +- MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state); +- MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); +- MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); +- MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); +- MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm); +- MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2); +- MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm); +- MLX5_SET(hca_vport_context, ctx, lid, req->lid); +- MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply); +- MLX5_SET(hca_vport_context, ctx, lmc, req->lmc); +- MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout); +- MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid); +- MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl); +- MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter); +- MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter); ++ if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY) ++ MLX5_SET(hca_vport_context, ctx, vport_state_policy, ++ req->policy); ++ if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID) ++ MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); ++ if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID) ++ MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); + ex: + kfree(in); +-- +2.13.6 + diff --git a/SOURCES/0108-netdrv-net-mlx5-DR-Refactor-VXLAN-GPE-flex-parser-tu.patch b/SOURCES/0108-netdrv-net-mlx5-DR-Refactor-VXLAN-GPE-flex-parser-tu.patch new file mode 100644 index 0000000..59fc0c7 --- /dev/null +++ b/SOURCES/0108-netdrv-net-mlx5-DR-Refactor-VXLAN-GPE-flex-parser-tu.patch @@ -0,0 +1,256 @@ +From de7ecc45d9cd3da13c9d9b7816abb7d9713b531f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:32 -0400 +Subject: [PATCH 108/312] [netdrv] net/mlx5: DR, Refactor VXLAN GPE flex parser + tunnel code for SW steering + +Message-id: <20200510150452.10307-68-ahleihel@redhat.com> +Patchwork-id: 306691 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 67/87] net/mlx5: DR, Refactor VXLAN GPE flex parser tunnel code for SW steering +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 6e9e286e4ad53311b51b28fdc4b952ab7d2520c4 +Author: Yevgeny Kliteynik +Date: Mon Oct 28 16:30:27 2019 +0200 + + net/mlx5: DR, Refactor VXLAN GPE flex parser tunnel code for SW steering + + Refactor flex parser tunnel code: + - Add definition for flex parser tunneling header for VXLAN-GPE + - Use macros for VXLAN-GPE SW steering when building STE + - Refactor the code to reflect that this is a VXLAN GPE + only code and not a general flex parser code. + This also significantly simplifies addition of more + flex parser protocols, such as Geneve. + + Signed-off-by: Yevgeny Kliteynik + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../mellanox/mlx5/core/steering/dr_matcher.c | 33 ++++++---- + .../ethernet/mellanox/mlx5/core/steering/dr_ste.c | 73 +++++++++------------- + .../mellanox/mlx5/core/steering/dr_types.h | 6 +- + .../mellanox/mlx5/core/steering/mlx5_ifc_dr.h | 11 ++++ + 4 files changed, 66 insertions(+), 57 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +index c6548980daf0..f177c468b740 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +@@ -102,13 +102,29 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) + DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ + DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) + +-static bool dr_mask_is_flex_parser_tnl_set(struct mlx5dr_match_misc3 *misc3) ++static bool ++dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) + { + return (misc3->outer_vxlan_gpe_vni || + misc3->outer_vxlan_gpe_next_protocol || + misc3->outer_vxlan_gpe_flags); + } + ++static bool ++dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps) ++{ ++ return caps->flex_protocols & ++ MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; ++} ++ ++static bool ++dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask, ++ struct mlx5dr_domain *dmn) ++{ ++ return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) && ++ dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps); ++} ++ + static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3) + { + return (misc3->icmpv6_type || misc3->icmpv6_code || +@@ -137,13 +153,6 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc) + return (misc->source_sqn || misc->source_port); + } + +-static bool +-dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_domain *dmn) +-{ +- return dmn->info.caps.flex_protocols & +- MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; +-} +- + int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, + struct mlx5dr_matcher_rx_tx *nic_matcher, + enum mlx5dr_ipv outer_ipv, +@@ -262,10 +271,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, + inner, rx); + } + +- if (dr_mask_is_flex_parser_tnl_set(&mask.misc3) && +- dr_matcher_supp_flex_parser_vxlan_gpe(dmn)) +- mlx5dr_ste_build_flex_parser_tnl(&sb[idx++], &mask, +- inner, rx); ++ if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn)) ++ mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++], ++ &mask, ++ inner, rx); + + if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) + mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +index 813e256a599e..496e680e0fd9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +@@ -2083,68 +2083,57 @@ void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, + sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag; + } + +-static void dr_ste_build_flex_parser_tnl_bit_mask(struct mlx5dr_match_param *value, +- bool inner, u8 *bit_mask) ++static void ++dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value, ++ bool inner, u8 *bit_mask) + { + struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3; + +- if (misc_3_mask->outer_vxlan_gpe_flags || +- misc_3_mask->outer_vxlan_gpe_next_protocol) { +- MLX5_SET(ste_flex_parser_tnl, bit_mask, +- flex_parser_tunneling_header_63_32, +- (misc_3_mask->outer_vxlan_gpe_flags << 24) | +- (misc_3_mask->outer_vxlan_gpe_next_protocol)); +- misc_3_mask->outer_vxlan_gpe_flags = 0; +- misc_3_mask->outer_vxlan_gpe_next_protocol = 0; +- } +- +- if (misc_3_mask->outer_vxlan_gpe_vni) { +- MLX5_SET(ste_flex_parser_tnl, bit_mask, +- flex_parser_tunneling_header_31_0, +- misc_3_mask->outer_vxlan_gpe_vni << 8); +- misc_3_mask->outer_vxlan_gpe_vni = 0; +- } ++ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, ++ outer_vxlan_gpe_flags, ++ misc_3_mask, outer_vxlan_gpe_flags); ++ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, ++ outer_vxlan_gpe_next_protocol, ++ misc_3_mask, outer_vxlan_gpe_next_protocol); ++ DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask, ++ outer_vxlan_gpe_vni, ++ misc_3_mask, outer_vxlan_gpe_vni); + } + +-static int dr_ste_build_flex_parser_tnl_tag(struct mlx5dr_match_param *value, +- struct mlx5dr_ste_build *sb, +- u8 *hw_ste_p) ++static int ++dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, ++ struct mlx5dr_ste_build *sb, ++ u8 *hw_ste_p) + { + struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; + struct mlx5dr_match_misc3 *misc3 = &value->misc3; + u8 *tag = hw_ste->tag; + +- if (misc3->outer_vxlan_gpe_flags || +- misc3->outer_vxlan_gpe_next_protocol) { +- MLX5_SET(ste_flex_parser_tnl, tag, +- flex_parser_tunneling_header_63_32, +- (misc3->outer_vxlan_gpe_flags << 24) | +- (misc3->outer_vxlan_gpe_next_protocol)); +- misc3->outer_vxlan_gpe_flags = 0; +- misc3->outer_vxlan_gpe_next_protocol = 0; +- } +- +- if (misc3->outer_vxlan_gpe_vni) { +- MLX5_SET(ste_flex_parser_tnl, tag, +- flex_parser_tunneling_header_31_0, +- misc3->outer_vxlan_gpe_vni << 8); +- misc3->outer_vxlan_gpe_vni = 0; +- } ++ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, ++ outer_vxlan_gpe_flags, misc3, ++ outer_vxlan_gpe_flags); ++ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, ++ outer_vxlan_gpe_next_protocol, misc3, ++ outer_vxlan_gpe_next_protocol); ++ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag, ++ outer_vxlan_gpe_vni, misc3, ++ outer_vxlan_gpe_vni); + + return 0; + } + +-void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, +- struct mlx5dr_match_param *mask, +- bool inner, bool rx) ++void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, ++ struct mlx5dr_match_param *mask, ++ bool inner, bool rx) + { +- dr_ste_build_flex_parser_tnl_bit_mask(mask, inner, sb->bit_mask); ++ dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner, ++ sb->bit_mask); + + sb->rx = rx; + sb->inner = inner; + sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; + sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); +- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_tag; ++ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag; + } + + static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +index b18720707255..9dedc162756c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +@@ -327,9 +327,9 @@ int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +-void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb, +- struct mlx5dr_match_param *mask, +- bool inner, bool rx); ++void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, ++ struct mlx5dr_match_param *mask, ++ bool inner, bool rx); + void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +index 596c927220d9..6d78b027fe56 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +@@ -548,6 +548,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_bits { + u8 reserved_at_40[0x40]; + }; + ++struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits { ++ u8 outer_vxlan_gpe_flags[0x8]; ++ u8 reserved_at_8[0x10]; ++ u8 outer_vxlan_gpe_next_protocol[0x8]; ++ ++ u8 outer_vxlan_gpe_vni[0x18]; ++ u8 reserved_at_38[0x8]; ++ ++ u8 reserved_at_40[0x40]; ++}; ++ + struct mlx5_ifc_ste_general_purpose_bits { + u8 general_purpose_lookup_field[0x20]; + +-- +2.13.6 + diff --git a/SOURCES/0109-netdrv-net-mlx5-DR-Add-HW-bits-and-definitions-for-G.patch b/SOURCES/0109-netdrv-net-mlx5-DR-Add-HW-bits-and-definitions-for-G.patch new file mode 100644 index 0000000..e69c3ba --- /dev/null +++ b/SOURCES/0109-netdrv-net-mlx5-DR-Add-HW-bits-and-definitions-for-G.patch @@ -0,0 +1,77 @@ +From 05c740d36ff80368cce999cd1f694cfa5dd7ab4b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:33 -0400 +Subject: [PATCH 109/312] [netdrv] net/mlx5: DR, Add HW bits and definitions + for Geneve flex parser + +Message-id: <20200510150452.10307-69-ahleihel@redhat.com> +Patchwork-id: 306692 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 68/87] net/mlx5: DR, Add HW bits and definitions for Geneve flex parser +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit a18fab48dbacbb7ff104a13e987778b7995bec07 +Author: Yevgeny Kliteynik +Date: Mon Oct 28 16:58:53 2019 +0200 + + net/mlx5: DR, Add HW bits and definitions for Geneve flex parser + + Add definition for flex parser tunneling header for Geneve. + + Signed-off-by: Yevgeny Kliteynik + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h | 13 +++++++++++++ + include/linux/mlx5/mlx5_ifc.h | 1 + + 2 files changed, 14 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +index 6d78b027fe56..1722f4668269 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +@@ -559,6 +559,19 @@ struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits { + u8 reserved_at_40[0x40]; + }; + ++struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits { ++ u8 reserved_at_0[0x2]; ++ u8 geneve_opt_len[0x6]; ++ u8 geneve_oam[0x1]; ++ u8 reserved_at_9[0x7]; ++ u8 geneve_protocol_type[0x10]; ++ ++ u8 geneve_vni[0x18]; ++ u8 reserved_at_38[0x8]; ++ ++ u8 reserved_at_40[0x40]; ++}; ++ + struct mlx5_ifc_ste_general_purpose_bits { + u8 general_purpose_lookup_field[0x20]; + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index a77ca587c3cc..4c703796c233 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1113,6 +1113,7 @@ enum { + }; + + enum { ++ MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3, + MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, + MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, + MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, +-- +2.13.6 + diff --git a/SOURCES/0110-netdrv-net-mlx5-DR-Add-support-for-Geneve-packets-SW.patch b/SOURCES/0110-netdrv-net-mlx5-DR-Add-support-for-Geneve-packets-SW.patch new file mode 100644 index 0000000..3e93392 --- /dev/null +++ b/SOURCES/0110-netdrv-net-mlx5-DR-Add-support-for-Geneve-packets-SW.patch @@ -0,0 +1,169 @@ +From 4470dc0119dc1193bdced8bedfacca93bcc8d92b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:34 -0400 +Subject: [PATCH 110/312] [netdrv] net/mlx5: DR, Add support for Geneve packets + SW steering + +Message-id: <20200510150452.10307-70-ahleihel@redhat.com> +Patchwork-id: 306693 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 69/87] net/mlx5: DR, Add support for Geneve packets SW steering +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit b6d12238459d2f3c1140689c8fbb1bf1e0fe1927 +Author: Yevgeny Kliteynik +Date: Mon Oct 28 17:22:06 2019 +0200 + + net/mlx5: DR, Add support for Geneve packets SW steering + + Add support for SW steering matching on Geneve header fields: + - VNI + - OAM + - protocol type + - options length + + Signed-off-by: Yevgeny Kliteynik + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../mellanox/mlx5/core/steering/dr_matcher.c | 27 +++++++++++ + .../ethernet/mellanox/mlx5/core/steering/dr_ste.c | 53 ++++++++++++++++++++++ + .../mellanox/mlx5/core/steering/dr_types.h | 3 ++ + 3 files changed, 83 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +index f177c468b740..c6dbd856df94 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +@@ -125,6 +125,29 @@ dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask, + dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps); + } + ++static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc) ++{ ++ return misc->geneve_vni || ++ misc->geneve_oam || ++ misc->geneve_protocol_type || ++ misc->geneve_opt_len; ++} ++ ++static bool ++dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps) ++{ ++ return caps->flex_protocols & ++ MLX5_FLEX_PARSER_GENEVE_ENABLED; ++} ++ ++static bool ++dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask, ++ struct mlx5dr_domain *dmn) ++{ ++ return dr_mask_is_misc_geneve_set(&mask->misc) && ++ dr_matcher_supp_flex_parser_geneve(&dmn->info.caps); ++} ++ + static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3) + { + return (misc3->icmpv6_type || misc3->icmpv6_code || +@@ -275,6 +298,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, + mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++], + &mask, + inner, rx); ++ else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn)) ++ mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++], ++ &mask, ++ inner, rx); + + if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) + mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +index 496e680e0fd9..aade62a9ee5c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +@@ -2136,6 +2136,59 @@ void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, + sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag; + } + ++static void ++dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value, ++ u8 *bit_mask) ++{ ++ struct mlx5dr_match_misc *misc_mask = &value->misc; ++ ++ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, ++ geneve_protocol_type, ++ misc_mask, geneve_protocol_type); ++ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, ++ geneve_oam, ++ misc_mask, geneve_oam); ++ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, ++ geneve_opt_len, ++ misc_mask, geneve_opt_len); ++ DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask, ++ geneve_vni, ++ misc_mask, geneve_vni); ++} ++ ++static int ++dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, ++ struct mlx5dr_ste_build *sb, ++ u8 *hw_ste_p) ++{ ++ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; ++ struct mlx5dr_match_misc *misc = &value->misc; ++ u8 *tag = hw_ste->tag; ++ ++ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, ++ geneve_protocol_type, misc, geneve_protocol_type); ++ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, ++ geneve_oam, misc, geneve_oam); ++ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, ++ geneve_opt_len, misc, geneve_opt_len); ++ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag, ++ geneve_vni, misc, geneve_vni); ++ ++ return 0; ++} ++ ++void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, ++ struct mlx5dr_match_param *mask, ++ bool inner, bool rx) ++{ ++ dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask); ++ sb->rx = rx; ++ sb->inner = inner; ++ sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER; ++ sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask); ++ sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag; ++} ++ + static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value, + u8 *bit_mask) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +index 9dedc162756c..dffe35145d19 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +@@ -330,6 +330,9 @@ int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, + void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); ++void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, ++ struct mlx5dr_match_param *mask, ++ bool inner, bool rx); + void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +-- +2.13.6 + diff --git a/SOURCES/0111-netdrv-net-mlx5e-TC-Stub-out-ipv6-tun-create-header-.patch b/SOURCES/0111-netdrv-net-mlx5e-TC-Stub-out-ipv6-tun-create-header-.patch new file mode 100644 index 0000000..58af218 --- /dev/null +++ b/SOURCES/0111-netdrv-net-mlx5e-TC-Stub-out-ipv6-tun-create-header-.patch @@ -0,0 +1,91 @@ +From bbbbc3a6f49588fbf5e36c22542b2701921a467f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:35 -0400 +Subject: [PATCH 111/312] [netdrv] net/mlx5e: TC, Stub out ipv6 tun create + header function + +Message-id: <20200510150452.10307-71-ahleihel@redhat.com> +Patchwork-id: 306694 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 70/87] net/mlx5e: TC, Stub out ipv6 tun create header function +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit e689e998e102100bdf7991763d4c244704846f2d +Author: Saeed Mahameed +Date: Fri Nov 1 15:38:30 2019 -0700 + + net/mlx5e: TC, Stub out ipv6 tun create header function + + Improve mlx5e_route_lookup_ipv6 function structure by avoiding #ifdef then + return -EOPNOTSUPP in the middle of the function code. + + To do so, we stub out mlx5e_tc_tun_create_header_ipv6 which is the only + caller of this helper function to avoid calling it altogether + when ipv6 is compiled out, which should also cleanup some compiler + warnings of unused variables. + + Signed-off-by: Saeed Mahameed + Reviewed-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | 4 ---- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h | 7 +++++++ + 2 files changed, 7 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +index 92559a758d07..8a9791473f57 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +@@ -141,7 +141,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct dst_entry *dst; + struct neighbour *n; + +-#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + int ret; + + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, +@@ -157,9 +156,6 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + dst_release(dst); + return ret; + } +-#else +- return -EOPNOTSUPP; +-#endif + + n = dst_neigh_lookup(dst, &fl6->daddr); + dst_release(dst); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +index c362b9225dc2..6f9a78c85ffd 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +@@ -58,9 +58,16 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); + ++#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); ++#else ++static inline int ++mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ++ struct net_device *mirred_dev, ++ struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; } ++#endif + + bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, + struct net_device *netdev); +-- +2.13.6 + diff --git a/SOURCES/0112-netdrv-net-mlx5e-Remove-redundant-pointer-check.patch b/SOURCES/0112-netdrv-net-mlx5e-Remove-redundant-pointer-check.patch new file mode 100644 index 0000000..e779f3a --- /dev/null +++ b/SOURCES/0112-netdrv-net-mlx5e-Remove-redundant-pointer-check.patch @@ -0,0 +1,122 @@ +From 729bd53ed3a4df05c1593a5aba2d60dfd4390b43 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:36 -0400 +Subject: [PATCH 112/312] [netdrv] net/mlx5e: Remove redundant pointer check + +Message-id: <20200510150452.10307-72-ahleihel@redhat.com> +Patchwork-id: 306695 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 71/87] net/mlx5e: Remove redundant pointer check +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit 90ac245814abc30d2423474310654d31e3908b2f +Author: Eli Cohen +Date: Thu Oct 31 09:12:18 2019 +0200 + + net/mlx5e: Remove redundant pointer check + + When code reaches the "out" label, n is guaranteed to be valid so we can + unconditionally call neigh_release. + + Also change the label to release_neigh to better reflect the fact that + we unconditionally free the neighbour and also match other labels + convention. + + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/tc_tun.c | 22 ++++++++++------------ + 1 file changed, 10 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +index 8a9791473f57..edcbcc3d3223 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +@@ -236,13 +236,13 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", + ipv4_encap_size, max_encap_size); + err = -EOPNOTSUPP; +- goto out; ++ goto release_neigh; + } + + encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); + if (!encap_header) { + err = -ENOMEM; +- goto out; ++ goto release_neigh; + } + + /* used by mlx5e_detach_encap to lookup a neigh hash table +@@ -294,7 +294,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ +- goto out; ++ goto release_neigh; + } + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, +@@ -314,9 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); + free_encap: + kfree(encap_header); +-out: +- if (n) +- neigh_release(n); ++release_neigh: ++ neigh_release(n); + return err; + } + +@@ -355,13 +354,13 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", + ipv6_encap_size, max_encap_size); + err = -EOPNOTSUPP; +- goto out; ++ goto release_neigh; + } + + encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); + if (!encap_header) { + err = -ENOMEM; +- goto out; ++ goto release_neigh; + } + + /* used by mlx5e_detach_encap to lookup a neigh hash table +@@ -412,7 +411,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ +- goto out; ++ goto release_neigh; + } + + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, +@@ -433,9 +432,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); + free_encap: + kfree(encap_header); +-out: +- if (n) +- neigh_release(n); ++release_neigh: ++ neigh_release(n); + return err; + } + +-- +2.13.6 + diff --git a/SOURCES/0113-netdrv-net-use-rhashtable_lookup-instead-of-rhashtab.patch b/SOURCES/0113-netdrv-net-use-rhashtable_lookup-instead-of-rhashtab.patch new file mode 100644 index 0000000..ddfd90a --- /dev/null +++ b/SOURCES/0113-netdrv-net-use-rhashtable_lookup-instead-of-rhashtab.patch @@ -0,0 +1,56 @@ +From d30f4fa003f76ccbe9522f732b478d90c60372d4 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:37 -0400 +Subject: [PATCH 113/312] [netdrv] net: use rhashtable_lookup() instead of + rhashtable_lookup_fast() + +Message-id: <20200510150452.10307-73-ahleihel@redhat.com> +Patchwork-id: 306697 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 72/87] net: use rhashtable_lookup() instead of rhashtable_lookup_fast() +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 +Conflicts: + - Take mlx5 chagnes only. + +commit ab818362c9054beb950b97a09ce7b0d56f5a32a1 +Author: Taehee Yoo +Date: Fri Nov 22 08:15:19 2019 +0000 + + net: use rhashtable_lookup() instead of rhashtable_lookup_fast() + + rhashtable_lookup_fast() internally calls rcu_read_lock() then, + calls rhashtable_lookup(). So if rcu_read_lock() is already held, + rhashtable_lookup() is enough. + + Signed-off-by: Taehee Yoo + Signed-off-by: Jakub Kicinski + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 0bc017569822..8afafb7eeb55 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3853,7 +3853,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, + int err; + + rcu_read_lock(); +- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); ++ flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); + if (!flow || !same_flow_direction(flow, flags)) { + err = -EINVAL; + goto errout; +-- +2.13.6 + diff --git a/SOURCES/0114-netdrv-net-mlx5e-Fix-build-error-without-IPV6.patch b/SOURCES/0114-netdrv-net-mlx5e-Fix-build-error-without-IPV6.patch new file mode 100644 index 0000000..9cd6fd6 --- /dev/null +++ b/SOURCES/0114-netdrv-net-mlx5e-Fix-build-error-without-IPV6.patch @@ -0,0 +1,156 @@ +From 7004b16ea401b0d7634ec572614fefd8b410dfaa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:38 -0400 +Subject: [PATCH 114/312] [netdrv] net/mlx5e: Fix build error without IPV6 + +Message-id: <20200510150452.10307-74-ahleihel@redhat.com> +Patchwork-id: 306696 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 73/87] net/mlx5e: Fix build error without IPV6 +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c + Context diff due to already backported commit: + 6c8991f41546 ("net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup") + +commit 5f9fc3325ef95398c363b9b7813a7e99d4d85d7d +Author: YueHaibing +Date: Wed Nov 27 21:27:00 2019 +0800 + + net/mlx5e: Fix build error without IPV6 + + If IPV6 is not set and CONFIG_MLX5_ESWITCH is y, + building fails: + + drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c:322:5: error: redefinition of mlx5e_tc_tun_create_header_ipv6 + int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + In file included from drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c:7:0: + drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h:67:1: note: previous definition of mlx5e_tc_tun_create_header_ipv6 was here + mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Use #ifdef to guard this, also move mlx5e_route_lookup_ipv6 + to cleanup unused warning. + + Reported-by: Hulk Robot + Fixes: e689e998e102 ("net/mlx5e: TC, Stub out ipv6 tun create header function") + Signed-off-by: YueHaibing + Acked-by: Saeed Mahameed + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/tc_tun.c | 74 +++++++++++----------- + 1 file changed, 38 insertions(+), 36 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +index edcbcc3d3223..b855933f6bec 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +@@ -130,42 +130,6 @@ static const char *mlx5e_netdev_kind(struct net_device *dev) + return "unknown"; + } + +-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, +- struct net_device *mirred_dev, +- struct net_device **out_dev, +- struct net_device **route_dev, +- struct flowi6 *fl6, +- struct neighbour **out_n, +- u8 *out_ttl) +-{ +- struct dst_entry *dst; +- struct neighbour *n; +- +- int ret; +- +- dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, +- NULL); +- if (IS_ERR(dst)) +- return PTR_ERR(dst); +- +- if (!(*out_ttl)) +- *out_ttl = ip6_dst_hoplimit(dst); +- +- ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); +- if (ret < 0) { +- dst_release(dst); +- return ret; +- } +- +- n = dst_neigh_lookup(dst, &fl6->daddr); +- dst_release(dst); +- if (!n) +- return -ENOMEM; +- +- *out_n = n; +- return 0; +-} +- + static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto, + struct mlx5e_encap_entry *e) + { +@@ -319,6 +283,43 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + return err; + } + ++#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) ++static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ++ struct net_device *mirred_dev, ++ struct net_device **out_dev, ++ struct net_device **route_dev, ++ struct flowi6 *fl6, ++ struct neighbour **out_n, ++ u8 *out_ttl) ++{ ++ struct dst_entry *dst; ++ struct neighbour *n; ++ ++ int ret; ++ ++ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6, ++ NULL); ++ if (IS_ERR(dst)) ++ return PTR_ERR(dst); ++ ++ if (!(*out_ttl)) ++ *out_ttl = ip6_dst_hoplimit(dst); ++ ++ ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); ++ if (ret < 0) { ++ dst_release(dst); ++ return ret; ++ } ++ ++ n = dst_neigh_lookup(dst, &fl6->daddr); ++ dst_release(dst); ++ if (!n) ++ return -ENOMEM; ++ ++ *out_n = n; ++ return 0; ++} ++ + int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +@@ -436,6 +437,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + neigh_release(n); + return err; + } ++#endif + + bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, + struct net_device *netdev) +-- +2.13.6 + diff --git a/SOURCES/0115-netdrv-net-mlx5e-E-switch-Fix-Ingress-ACL-groups-in-.patch b/SOURCES/0115-netdrv-net-mlx5e-E-switch-Fix-Ingress-ACL-groups-in-.patch new file mode 100644 index 0000000..3165256 --- /dev/null +++ b/SOURCES/0115-netdrv-net-mlx5e-E-switch-Fix-Ingress-ACL-groups-in-.patch @@ -0,0 +1,297 @@ +From ca0a47bf9d5acf50538cd7f805640169dc595044 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:39 -0400 +Subject: [PATCH 115/312] [netdrv] net/mlx5e: E-switch, Fix Ingress ACL groups + in switchdev mode for prio tag + +Message-id: <20200510150452.10307-75-ahleihel@redhat.com> +Patchwork-id: 306698 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 74/87] net/mlx5e: E-switch, Fix Ingress ACL groups in switchdev mode for prio tag +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc1 + +commit b7826076d7ae5928fdd2972a6c3e180148fb74c1 +Author: Parav Pandit +Date: Tue Nov 12 17:06:00 2019 -0600 + + net/mlx5e: E-switch, Fix Ingress ACL groups in switchdev mode for prio tag + + In cited commit, when prio tag mode is enabled, FTE creation fails + due to missing group with valid match criteria. + + Hence, + (a) create prio tag group metadata_prio_tag_grp when prio tag is + enabled with match criteria for vlan push FTE. + (b) Rename metadata_grp to metadata_allmatch_grp to reflect its purpose. + + Also when priority tag is enabled, delete metadata settings after + deleting ingress rules, which are using it. + + Tide up rest of the ingress config code for unnecessary labels. + + Fixes: 10652f39943e ("net/mlx5: Refactor ingress acl configuration") + Signed-off-by: Parav Pandit + Reviewed-by: Eli Britstein + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 9 +- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 122 ++++++++++++++------- + 2 files changed, 93 insertions(+), 38 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index d9c3b8767224..14814f41346e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -94,7 +94,14 @@ struct vport_ingress { + struct mlx5_fc *drop_counter; + } legacy; + struct { +- struct mlx5_flow_group *metadata_grp; ++ /* Optional group to add an FTE to do internal priority ++ * tagging on ingress packets. ++ */ ++ struct mlx5_flow_group *metadata_prio_tag_grp; ++ /* Group to add default match-all FTE entry to tag ingress ++ * packet with metadata. ++ */ ++ struct mlx5_flow_group *metadata_allmatch_grp; + struct mlx5_modify_hdr *modify_metadata; + struct mlx5_flow_handle *modify_metadata_rule; + } offloads; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 0f0d8decb04c..121abcae993a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) + return 1; + } + ++static bool ++esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, ++ const struct mlx5_vport *vport) ++{ ++ return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && ++ mlx5_eswitch_is_vf_vport(esw, vport->vport)); ++} ++ + static void + mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, +@@ -1763,12 +1771,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, + * required, allow + * Unmatched traffic is allowed by default + */ +- + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); +- if (!spec) { +- err = -ENOMEM; +- goto out_no_mem; +- } ++ if (!spec) ++ return -ENOMEM; + + /* Untagged packets - push prio tag VLAN, allow */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); +@@ -1794,14 +1799,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, + "vport[%d] configure ingress untagged allow rule, err(%d)\n", + vport->vport, err); + vport->ingress.allow_rule = NULL; +- goto out; + } + +-out: + kvfree(spec); +-out_no_mem: +- if (err) +- esw_vport_cleanup_ingress_rules(esw, vport); + return err; + } + +@@ -1839,13 +1839,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, + esw_warn(esw->dev, + "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", + vport->vport, err); ++ mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); + vport->ingress.offloads.modify_metadata_rule = NULL; +- goto out; + } +- +-out: +- if (err) +- mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); + return err; + } + +@@ -1865,50 +1861,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, + { + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *g; ++ void *match_criteria; + u32 *flow_group_in; ++ u32 flow_index = 0; + int ret = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + +- memset(flow_group_in, 0, inlen); +- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); +- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); ++ if (esw_check_ingress_prio_tag_enabled(esw, vport)) { ++ /* This group is to hold FTE to match untagged packets when prio_tag ++ * is enabled. ++ */ ++ memset(flow_group_in, 0, inlen); + +- g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); +- if (IS_ERR(g)) { +- ret = PTR_ERR(g); +- esw_warn(esw->dev, +- "Failed to create vport[%d] ingress metadata group, err(%d)\n", +- vport->vport, ret); +- goto grp_err; ++ match_criteria = MLX5_ADDR_OF(create_flow_group_in, ++ flow_group_in, match_criteria); ++ MLX5_SET(create_flow_group_in, flow_group_in, ++ match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); ++ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); ++ ++ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); ++ if (IS_ERR(g)) { ++ ret = PTR_ERR(g); ++ esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n", ++ vport->vport, ret); ++ goto prio_tag_err; ++ } ++ vport->ingress.offloads.metadata_prio_tag_grp = g; ++ flow_index++; ++ } ++ ++ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { ++ /* This group holds an FTE with no matches for add metadata for ++ * tagged packets, if prio-tag is enabled (as a fallthrough), ++ * or all traffic in case prio-tag is disabled. ++ */ ++ memset(flow_group_in, 0, inlen); ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); ++ ++ g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); ++ if (IS_ERR(g)) { ++ ret = PTR_ERR(g); ++ esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", ++ vport->vport, ret); ++ goto metadata_err; ++ } ++ vport->ingress.offloads.metadata_allmatch_grp = g; ++ } ++ ++ kvfree(flow_group_in); ++ return 0; ++ ++metadata_err: ++ if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) { ++ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); ++ vport->ingress.offloads.metadata_prio_tag_grp = NULL; + } +- vport->ingress.offloads.metadata_grp = g; +-grp_err: ++prio_tag_err: + kvfree(flow_group_in); + return ret; + } + + static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) + { +- if (vport->ingress.offloads.metadata_grp) { +- mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); +- vport->ingress.offloads.metadata_grp = NULL; ++ if (vport->ingress.offloads.metadata_allmatch_grp) { ++ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp); ++ vport->ingress.offloads.metadata_allmatch_grp = NULL; ++ } ++ ++ if (vport->ingress.offloads.metadata_prio_tag_grp) { ++ mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); ++ vport->ingress.offloads.metadata_prio_tag_grp = NULL; + } + } + + static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { ++ int num_ftes = 0; + int err; + + if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && +- !MLX5_CAP_GEN(esw->dev, prio_tag_required)) ++ !esw_check_ingress_prio_tag_enabled(esw, vport)) + return 0; + + esw_vport_cleanup_ingress_rules(esw, vport); +- err = esw_vport_create_ingress_acl_table(esw, vport, 1); ++ ++ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) ++ num_ftes++; ++ if (esw_check_ingress_prio_tag_enabled(esw, vport)) ++ num_ftes++; ++ ++ err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes); + if (err) { + esw_warn(esw->dev, + "failed to enable ingress acl (%d) on vport[%d]\n", +@@ -1929,8 +1978,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + goto metadata_err; + } + +- if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && +- mlx5_eswitch_is_vf_vport(esw, vport->vport)) { ++ if (esw_check_ingress_prio_tag_enabled(esw, vport)) { + err = esw_vport_ingress_prio_tag_config(esw, vport); + if (err) + goto prio_tag_err; +@@ -1940,7 +1988,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + prio_tag_err: + esw_vport_del_ingress_acl_modify_metadata(esw, vport); + metadata_err: +- esw_vport_cleanup_ingress_rules(esw, vport); + esw_vport_destroy_ingress_acl_group(vport); + group_err: + esw_vport_destroy_ingress_acl_table(vport); +@@ -2023,8 +2070,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, + if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { + err = esw_vport_egress_config(esw, vport); + if (err) { +- esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_cleanup_ingress_rules(esw, vport); ++ esw_vport_del_ingress_acl_modify_metadata(esw, vport); ++ esw_vport_destroy_ingress_acl_group(vport); + esw_vport_destroy_ingress_acl_table(vport); + } + } +@@ -2036,8 +2084,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { + esw_vport_disable_egress_acl(esw, vport); +- esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_cleanup_ingress_rules(esw, vport); ++ esw_vport_del_ingress_acl_modify_metadata(esw, vport); + esw_vport_destroy_ingress_acl_group(vport); + esw_vport_destroy_ingress_acl_table(vport); + } +-- +2.13.6 + diff --git a/SOURCES/0116-netdrv-treewide-Use-sizeof_field-macro.patch b/SOURCES/0116-netdrv-treewide-Use-sizeof_field-macro.patch new file mode 100644 index 0000000..fc7df6b --- /dev/null +++ b/SOURCES/0116-netdrv-treewide-Use-sizeof_field-macro.patch @@ -0,0 +1,99 @@ +From 2b192742217de481dce69444e94fe1ec27ea8c4e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:40 -0400 +Subject: [PATCH 116/312] [netdrv] treewide: Use sizeof_field() macro + +Message-id: <20200510150452.10307-76-ahleihel@redhat.com> +Patchwork-id: 306699 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 75/87] treewide: Use sizeof_field() macro +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc2 +Conflicts: + - Take mlx5 changes only. + +commit c593642c8be046915ca3a4a300243a68077cd207 +Author: Pankaj Bharadiya +Date: Mon Dec 9 10:31:43 2019 -0800 + + treewide: Use sizeof_field() macro + + Replace all the occurrences of FIELD_SIZEOF() with sizeof_field() except + at places where these are defined. Later patches will remove the unused + definition of FIELD_SIZEOF(). + + This patch is generated using following script: + + EXCLUDE_FILES="include/linux/stddef.h|include/linux/kernel.h" + + git grep -l -e "\bFIELD_SIZEOF\b" | while read file; + do + + if [[ "$file" =~ $EXCLUDE_FILES ]]; then + continue + fi + sed -i -e 's/\bFIELD_SIZEOF\b/sizeof_field/g' $file; + done + + Signed-off-by: Pankaj Bharadiya + Link: https://lore.kernel.org/r/20190924105839.110713-3-pankaj.laxminarayan.bharadiya@intel.com + Co-developed-by: Kees Cook + Signed-off-by: Kees Cook + Acked-by: David Miller # for net + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 6 +++--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 ++-- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +index c76da309506b..e4ec0e03c289 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +@@ -87,10 +87,10 @@ static const struct rhashtable_params rhash_sa = { + * value is not constant during the lifetime + * of the key object. + */ +- .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - +- FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), ++ .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - ++ sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), + .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + +- FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), ++ sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), + .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), + .automatic_shrinking = true, + .min_size = 1, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index bf0cea1e294b..7138dcf2e538 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -209,7 +209,7 @@ enum fs_i_lock_class { + }; + + static const struct rhashtable_params rhash_fte = { +- .key_len = FIELD_SIZEOF(struct fs_fte, val), ++ .key_len = sizeof_field(struct fs_fte, val), + .key_offset = offsetof(struct fs_fte, val), + .head_offset = offsetof(struct fs_fte, hash), + .automatic_shrinking = true, +@@ -217,7 +217,7 @@ static const struct rhashtable_params rhash_fte = { + }; + + static const struct rhashtable_params rhash_fg = { +- .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask), ++ .key_len = sizeof_field(struct mlx5_flow_group, mask), + .key_offset = offsetof(struct mlx5_flow_group, mask), + .head_offset = offsetof(struct mlx5_flow_group, hash), + .automatic_shrinking = true, +-- +2.13.6 + diff --git a/SOURCES/0117-netdrv-net-mlx5e-Avoid-duplicating-rule-destinations.patch b/SOURCES/0117-netdrv-net-mlx5e-Avoid-duplicating-rule-destinations.patch new file mode 100644 index 0000000..beee64d --- /dev/null +++ b/SOURCES/0117-netdrv-net-mlx5e-Avoid-duplicating-rule-destinations.patch @@ -0,0 +1,208 @@ +From d614648206c87c5d068f9dec596c9dfb152a5618 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:42 -0400 +Subject: [PATCH 117/312] [netdrv] net/mlx5e: Avoid duplicating rule + destinations + +Message-id: <20200510150452.10307-78-ahleihel@redhat.com> +Patchwork-id: 306701 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 77/87] net/mlx5e: Avoid duplicating rule destinations +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc6 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c + Context diff due to missing commit: + 84179981317f ("net/mlx5: TC: Offload flow table rules") + ---> Local bool ft_flow variable does not exist yet in function + parse_tc_fdb_actions + +commit 554fe75c1b3f679b1eebf193a4e56492837d3f5a +Author: Dmytro Linkin +Date: Thu Oct 31 18:15:51 2019 +0200 + + net/mlx5e: Avoid duplicating rule destinations + + Following scenario easily break driver logic and crash the kernel: + 1. Add rule with mirred actions to same device. + 2. Delete this rule. + In described scenario rule is not added to database and on deletion + driver access invalid entry. + Example: + + $ tc filter add dev ens1f0_0 ingress protocol ip prio 1 \ + flower skip_sw \ + action mirred egress mirror dev ens1f0_1 pipe \ + action mirred egress redirect dev ens1f0_1 + $ tc filter del dev ens1f0_0 ingress protocol ip prio 1 + + Dmesg output: + + [ 376.634396] mlx5_core 0000:82:00.0: mlx5_cmd_check:756:(pid 3439): DESTROY_FLOW_GROUP(0x934) op_mod(0x0) failed, status bad resource state(0x9), syndrome (0x563e2f) + [ 376.654983] mlx5_core 0000:82:00.0: del_hw_flow_group:567:(pid 3439): flow steering can't destroy fg 89 of ft 3145728 + [ 376.673433] kasan: CONFIG_KASAN_INLINE enabled + [ 376.683769] kasan: GPF could be caused by NULL-ptr deref or user memory access + [ 376.695229] general protection fault: 0000 [#1] PREEMPT SMP KASAN PTI + [ 376.705069] CPU: 7 PID: 3439 Comm: tc Not tainted 5.4.0-rc5+ #76 + [ 376.714959] Hardware name: Supermicro SYS-2028TP-DECTR/X10DRT-PT, BIOS 2.0a 08/12/2016 + [ 376.726371] RIP: 0010:mlx5_del_flow_rules+0x105/0x960 [mlx5_core] + [ 376.735817] Code: 01 00 00 00 48 83 eb 08 e8 28 d9 ff ff 4c 39 e3 75 d8 4c 8d bd c0 02 00 00 48 b8 00 00 00 00 00 fc ff df 4c 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e 84 04 00 00 48 8d 7d 28 8b 9 d + [ 376.761261] RSP: 0018:ffff888847c56db8 EFLAGS: 00010202 + [ 376.770054] RAX: dffffc0000000000 RBX: ffff8888582a6da0 RCX: ffff888847c56d60 + [ 376.780743] RDX: 0000000000000058 RSI: 0000000000000008 RDI: 0000000000000282 + [ 376.791328] RBP: 0000000000000000 R08: fffffbfff0c60ea6 R09: fffffbfff0c60ea6 + [ 376.802050] R10: fffffbfff0c60ea5 R11: ffffffff8630752f R12: ffff8888582a6da0 + [ 376.812798] R13: dffffc0000000000 R14: ffff8888582a6da0 R15: 00000000000002c0 + [ 376.823445] FS: 00007f675f9a8840(0000) GS:ffff88886d200000(0000) knlGS:0000000000000000 + [ 376.834971] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + [ 376.844179] CR2: 00000000007d9640 CR3: 00000007d3f26003 CR4: 00000000001606e0 + [ 376.854843] Call Trace: + [ 376.868542] __mlx5_eswitch_del_rule+0x49/0x300 [mlx5_core] + [ 376.877735] mlx5e_tc_del_fdb_flow+0x6ec/0x9e0 [mlx5_core] + [ 376.921549] mlx5e_flow_put+0x2b/0x50 [mlx5_core] + [ 376.929813] mlx5e_delete_flower+0x5b6/0xbd0 [mlx5_core] + [ 376.973030] tc_setup_cb_reoffload+0x29/0xc0 + [ 376.980619] fl_reoffload+0x50a/0x770 [cls_flower] + [ 377.015087] tcf_block_playback_offloads+0xbd/0x250 + [ 377.033400] tcf_block_setup+0x1b2/0xc60 + [ 377.057247] tcf_block_offload_cmd+0x195/0x240 + [ 377.098826] tcf_block_offload_unbind+0xe7/0x180 + [ 377.107056] __tcf_block_put+0xe5/0x400 + [ 377.114528] ingress_destroy+0x3d/0x60 [sch_ingress] + [ 377.122894] qdisc_destroy+0xf1/0x5a0 + [ 377.129993] qdisc_graft+0xa3d/0xe50 + [ 377.151227] tc_get_qdisc+0x48e/0xa20 + [ 377.165167] rtnetlink_rcv_msg+0x35d/0x8d0 + [ 377.199528] netlink_rcv_skb+0x11e/0x340 + [ 377.219638] netlink_unicast+0x408/0x5b0 + [ 377.239913] netlink_sendmsg+0x71b/0xb30 + [ 377.267505] sock_sendmsg+0xb1/0xf0 + [ 377.273801] ___sys_sendmsg+0x635/0x900 + [ 377.312784] __sys_sendmsg+0xd3/0x170 + [ 377.338693] do_syscall_64+0x95/0x460 + [ 377.344833] entry_SYSCALL_64_after_hwframe+0x49/0xbe + [ 377.352321] RIP: 0033:0x7f675e58e090 + + To avoid this, for every mirred action check if output device was + already processed. If so - drop rule with EOPNOTSUPP error. + + Signed-off-by: Dmytro Linkin + Reviewed-by: Roi Dayan + Reviewed-by: Vlad Buslov + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 58 ++++++++++++++++++++++++- + 1 file changed, 57 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 8afafb7eeb55..0af1d5b1e438 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2988,6 +2988,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info + return kmemdup(tun_info, tun_size, GFP_KERNEL); + } + ++static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ int out_index, ++ struct mlx5e_encap_entry *e, ++ struct netlink_ext_ack *extack) ++{ ++ int i; ++ ++ for (i = 0; i < out_index; i++) { ++ if (flow->encaps[i].e != e) ++ continue; ++ NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action"); ++ netdev_err(priv->netdev, "can't duplicate encap action\n"); ++ return true; ++ } ++ ++ return false; ++} ++ + static int mlx5e_attach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct net_device *mirred_dev, +@@ -3023,6 +3042,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, + + /* must verify if encap is valid or not */ + if (e) { ++ /* Check that entry was not already attached to this flow */ ++ if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { ++ err = -EOPNOTSUPP; ++ goto out_err; ++ } ++ + mutex_unlock(&esw->offloads.encap_tbl_lock); + wait_for_completion(&e->res_ready); + +@@ -3209,6 +3234,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, + same_hw_devs(priv, netdev_priv(out_dev)); + } + ++static bool is_duplicated_output_device(struct net_device *dev, ++ struct net_device *out_dev, ++ int *ifindexes, int if_count, ++ struct netlink_ext_ack *extack) ++{ ++ int i; ++ ++ for (i = 0; i < if_count; i++) { ++ if (ifindexes[i] == out_dev->ifindex) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "can't duplicate output to same device"); ++ netdev_err(dev, "can't duplicate output to same device: %s\n", ++ out_dev->name); ++ return true; ++ } ++ } ++ ++ return false; ++} ++ + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, +@@ -3220,10 +3265,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + const struct ip_tunnel_info *info = NULL; ++ int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; + const struct flow_action_entry *act; ++ int err, i, if_count = 0; + bool encap = false; + u32 action = 0; +- int err, i; + + if (!flow_action_has_entries(flow_action)) + return -EINVAL; +@@ -3292,6 +3338,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + struct net_device *uplink_upper; + ++ if (is_duplicated_output_device(priv->netdev, ++ out_dev, ++ ifindexes, ++ if_count, ++ extack)) ++ return -EOPNOTSUPP; ++ ++ ifindexes[if_count] = out_dev->ifindex; ++ if_count++; ++ + rcu_read_lock(); + uplink_upper = + netdev_master_upper_dev_get_rcu(uplink_dev); +-- +2.13.6 + diff --git a/SOURCES/0118-netdrv-net-mlx5e-Always-print-health-reporter-messag.patch b/SOURCES/0118-netdrv-net-mlx5e-Always-print-health-reporter-messag.patch new file mode 100644 index 0000000..aa8a87c --- /dev/null +++ b/SOURCES/0118-netdrv-net-mlx5e-Always-print-health-reporter-messag.patch @@ -0,0 +1,64 @@ +From f20856e12e6ea7721607296c4e006b50332add56 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:43 -0400 +Subject: [PATCH 118/312] [netdrv] net/mlx5e: Always print health reporter + message to dmesg + +Message-id: <20200510150452.10307-79-ahleihel@redhat.com> +Patchwork-id: 306702 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 78/87] net/mlx5e: Always print health reporter message to dmesg +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc6 + +commit 99cda45426c9a2c59bb2f7cb886a405440282455 +Author: Eran Ben Elisha +Date: Wed Dec 4 14:34:18 2019 +0200 + + net/mlx5e: Always print health reporter message to dmesg + + In case a reporter exists, error message is logged only to the devlink + tracer. The devlink tracer is a visibility utility only, which user can + choose not to monitor. + After cited patch, 3rd party monitoring tools that tracks these error + message will no longer find them in dmesg, causing a regression. + + With this patch, error messages are also logged into the dmesg. + + Fixes: c50de4af1d63 ("net/mlx5e: Generalize tx reporter's functionality") + Signed-off-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/health.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +index 1d6b58860da6..3a975641f902 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv, + struct devlink_health_reporter *reporter, char *err_str, + struct mlx5e_err_ctx *err_ctx) + { +- if (!reporter) { +- netdev_err(priv->netdev, err_str); ++ netdev_err(priv->netdev, err_str); ++ ++ if (!reporter) + return err_ctx->recover(&err_ctx->ctx); +- } ++ + return devlink_health_report(reporter, err_str, err_ctx); + } +-- +2.13.6 + diff --git a/SOURCES/0119-netdrv-net-mlx5-Move-devlink-registration-before-int.patch b/SOURCES/0119-netdrv-net-mlx5-Move-devlink-registration-before-int.patch new file mode 100644 index 0000000..a72fde6 --- /dev/null +++ b/SOURCES/0119-netdrv-net-mlx5-Move-devlink-registration-before-int.patch @@ -0,0 +1,90 @@ +From 2efdd509f52493cf944fe5bfb180843b77267924 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:44 -0400 +Subject: [PATCH 119/312] [netdrv] net/mlx5: Move devlink registration before + interfaces load + +Message-id: <20200510150452.10307-80-ahleihel@redhat.com> +Patchwork-id: 306703 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 79/87] net/mlx5: Move devlink registration before interfaces load +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc6 + +commit a6f3b62386a02c1e94bfa22c543f82d63f5e631b +Author: Michael Guralnik +Date: Wed Nov 20 11:43:49 2019 +0200 + + net/mlx5: Move devlink registration before interfaces load + + Register devlink before interfaces are added. + This will allow interfaces to use devlink while initalizing. For example, + call mlx5_is_roce_enabled. + + Fixes: aba25279c100 ("net/mlx5e: Add TX reporter support") + Signed-off-by: Michael Guralnik + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 16 +++++++++------- + 1 file changed, 9 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 57e376e4e938..f34eeb5af1ef 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1221,6 +1221,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + if (err) + goto err_load; + ++ if (boot) { ++ err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); ++ if (err) ++ goto err_devlink_reg; ++ } ++ + if (mlx5_device_registered(dev)) { + mlx5_attach_device(dev); + } else { +@@ -1238,6 +1244,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + return err; + + err_reg_dev: ++ if (boot) ++ mlx5_devlink_unregister(priv_to_devlink(dev)); ++err_devlink_reg: + mlx5_unload(dev); + err_load: + if (boot) +@@ -1375,10 +1384,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) + + request_module_nowait(MLX5_IB_MOD); + +- err = mlx5_devlink_register(devlink, &pdev->dev); +- if (err) +- goto clean_load; +- + err = mlx5_crdump_enable(dev); + if (err) + dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); +@@ -1386,9 +1391,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) + pci_save_state(pdev); + return 0; + +-clean_load: +- mlx5_unload_one(dev, true); +- + err_load_one: + mlx5_pci_close(dev); + pci_init_err: +-- +2.13.6 + diff --git a/SOURCES/0120-netdrv-Revert-net-mlx5-Support-lockless-FTE-read-loo.patch b/SOURCES/0120-netdrv-Revert-net-mlx5-Support-lockless-FTE-read-loo.patch new file mode 100644 index 0000000..979862a --- /dev/null +++ b/SOURCES/0120-netdrv-Revert-net-mlx5-Support-lockless-FTE-read-loo.patch @@ -0,0 +1,274 @@ +From 5d039200e42b7e170778424514d9d02cbf9fc8e6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:45 -0400 +Subject: [PATCH 120/312] [netdrv] Revert "net/mlx5: Support lockless FTE read + lookups" + +Message-id: <20200510150452.10307-81-ahleihel@redhat.com> +Patchwork-id: 306704 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 80/87] Revert "net/mlx5: Support lockless FTE read lookups" +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc6 + +commit 1f0593e791ed3fb4074d4470a261cde62d806ed5 +Author: Parav Pandit +Date: Wed Dec 11 00:35:18 2019 -0600 + + Revert "net/mlx5: Support lockless FTE read lookups" + + This reverts commit 7dee607ed0e04500459db53001d8e02f8831f084. + + During cleanup path, FTE's parent node group is removed which is + referenced by the FTE while freeing the FTE. + Hence FTE's lockless read lookup optimization done in cited commit is + not possible at the moment. + + Hence, revert the commit. + + This avoid below KAZAN call trace. + + [ 110.390896] BUG: KASAN: use-after-free in find_root.isra.14+0x56/0x60 + [mlx5_core] + [ 110.391048] Read of size 4 at addr ffff888c19e6d220 by task + swapper/12/0 + + [ 110.391219] CPU: 12 PID: 0 Comm: swapper/12 Not tainted 5.5.0-rc1+ + [ 110.391222] Hardware name: HP ProLiant DL380p Gen8, BIOS P70 + 08/02/2014 + [ 110.391225] Call Trace: + [ 110.391229] + [ 110.391246] dump_stack+0x95/0xd5 + [ 110.391307] ? find_root.isra.14+0x56/0x60 [mlx5_core] + [ 110.391320] print_address_description.constprop.5+0x20/0x320 + [ 110.391379] ? find_root.isra.14+0x56/0x60 [mlx5_core] + [ 110.391435] ? find_root.isra.14+0x56/0x60 [mlx5_core] + [ 110.391441] __kasan_report+0x149/0x18c + [ 110.391499] ? find_root.isra.14+0x56/0x60 [mlx5_core] + [ 110.391504] kasan_report+0x12/0x20 + [ 110.391511] __asan_report_load4_noabort+0x14/0x20 + [ 110.391567] find_root.isra.14+0x56/0x60 [mlx5_core] + [ 110.391625] del_sw_fte_rcu+0x4a/0x100 [mlx5_core] + [ 110.391633] rcu_core+0x404/0x1950 + [ 110.391640] ? rcu_accelerate_cbs_unlocked+0x100/0x100 + [ 110.391649] ? run_rebalance_domains+0x201/0x280 + [ 110.391654] rcu_core_si+0xe/0x10 + [ 110.391661] __do_softirq+0x181/0x66c + [ 110.391670] irq_exit+0x12c/0x150 + [ 110.391675] smp_apic_timer_interrupt+0xf0/0x370 + [ 110.391681] apic_timer_interrupt+0xf/0x20 + [ 110.391684] + [ 110.391695] RIP: 0010:cpuidle_enter_state+0xfa/0xba0 + [ 110.391703] Code: 3d c3 9b b5 50 e8 56 75 6e fe 48 89 45 c8 0f 1f 44 + 00 00 31 ff e8 a6 94 6e fe 45 84 ff 0f 85 f6 02 00 00 fb 66 0f 1f 44 00 + 00 <45> 85 f6 0f 88 db 06 00 00 4d 63 fe 4b 8d 04 7f 49 8d 04 87 49 8d + [ 110.391706] RSP: 0018:ffff888c23a6fce8 EFLAGS: 00000246 ORIG_RAX: + ffffffffffffff13 + [ 110.391712] RAX: dffffc0000000000 RBX: ffffe8ffff7002f8 RCX: + 000000000000001f + [ 110.391715] RDX: 1ffff11184ee6cb5 RSI: 0000000040277d83 RDI: + ffff888c277365a8 + [ 110.391718] RBP: ffff888c23a6fd40 R08: 0000000000000002 R09: + 0000000000035280 + [ 110.391721] R10: ffff888c23a6fc80 R11: ffffed11847485d0 R12: + ffffffffb1017740 + [ 110.391723] R13: 0000000000000003 R14: 0000000000000003 R15: + 0000000000000000 + [ 110.391732] ? cpuidle_enter_state+0xea/0xba0 + [ 110.391738] cpuidle_enter+0x4f/0xa0 + [ 110.391747] call_cpuidle+0x6d/0xc0 + [ 110.391752] do_idle+0x360/0x430 + [ 110.391758] ? arch_cpu_idle_exit+0x40/0x40 + [ 110.391765] ? complete+0x67/0x80 + [ 110.391771] cpu_startup_entry+0x1d/0x20 + [ 110.391779] start_secondary+0x2f3/0x3c0 + [ 110.391784] ? set_cpu_sibling_map+0x2500/0x2500 + [ 110.391795] secondary_startup_64+0xa4/0xb0 + + [ 110.391841] Allocated by task 290: + [ 110.391917] save_stack+0x21/0x90 + [ 110.391921] __kasan_kmalloc.constprop.8+0xa7/0xd0 + [ 110.391925] kasan_kmalloc+0x9/0x10 + [ 110.391929] kmem_cache_alloc_trace+0xf6/0x270 + [ 110.391987] create_root_ns.isra.36+0x58/0x260 [mlx5_core] + [ 110.392044] mlx5_init_fs+0x5fd/0x1ee0 [mlx5_core] + [ 110.392092] mlx5_load_one+0xc7a/0x3860 [mlx5_core] + [ 110.392139] init_one+0x6ff/0xf90 [mlx5_core] + [ 110.392145] local_pci_probe+0xde/0x190 + [ 110.392150] work_for_cpu_fn+0x56/0xa0 + [ 110.392153] process_one_work+0x678/0x1140 + [ 110.392157] worker_thread+0x573/0xba0 + [ 110.392162] kthread+0x341/0x400 + [ 110.392166] ret_from_fork+0x1f/0x40 + + [ 110.392218] Freed by task 2742: + [ 110.392288] save_stack+0x21/0x90 + [ 110.392292] __kasan_slab_free+0x137/0x190 + [ 110.392296] kasan_slab_free+0xe/0x10 + [ 110.392299] kfree+0x94/0x250 + [ 110.392357] tree_put_node+0x257/0x360 [mlx5_core] + [ 110.392413] tree_remove_node+0x63/0xb0 [mlx5_core] + [ 110.392469] clean_tree+0x199/0x240 [mlx5_core] + [ 110.392525] mlx5_cleanup_fs+0x76/0x580 [mlx5_core] + [ 110.392572] mlx5_unload+0x22/0xc0 [mlx5_core] + [ 110.392619] mlx5_unload_one+0x99/0x260 [mlx5_core] + [ 110.392666] remove_one+0x61/0x160 [mlx5_core] + [ 110.392671] pci_device_remove+0x10b/0x2c0 + [ 110.392677] device_release_driver_internal+0x1e4/0x490 + [ 110.392681] device_driver_detach+0x36/0x40 + [ 110.392685] unbind_store+0x147/0x200 + [ 110.392688] drv_attr_store+0x6f/0xb0 + [ 110.392693] sysfs_kf_write+0x127/0x1d0 + [ 110.392697] kernfs_fop_write+0x296/0x420 + [ 110.392702] __vfs_write+0x66/0x110 + [ 110.392707] vfs_write+0x1a0/0x500 + [ 110.392711] ksys_write+0x164/0x250 + [ 110.392715] __x64_sys_write+0x73/0xb0 + [ 110.392720] do_syscall_64+0x9f/0x3a0 + [ 110.392725] entry_SYSCALL_64_after_hwframe+0x44/0xa9 + + Fixes: 7dee607ed0e0 ("net/mlx5: Support lockless FTE read lookups") + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 70 +++++------------------ + drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 1 - + 2 files changed, 15 insertions(+), 56 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 7138dcf2e538..0e6a89b81972 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node) + } + } + +-static void del_sw_fte_rcu(struct rcu_head *head) +-{ +- struct fs_fte *fte = container_of(head, struct fs_fte, rcu); +- struct mlx5_flow_steering *steering = get_steering(&fte->node); +- +- kmem_cache_free(steering->ftes_cache, fte); +-} +- + static void del_sw_fte(struct fs_node *node) + { ++ struct mlx5_flow_steering *steering = get_steering(node); + struct mlx5_flow_group *fg; + struct fs_fte *fte; + int err; +@@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node) + rhash_fte); + WARN_ON(err); + ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); +- +- call_rcu(&fte->rcu, del_sw_fte_rcu); ++ kmem_cache_free(steering->ftes_cache, fte); + } + + static void del_hw_flow_group(struct fs_node *node) +@@ -1634,47 +1626,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head) + } + + static struct fs_fte * +-lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value) ++lookup_fte_locked(struct mlx5_flow_group *g, ++ const u32 *match_value, ++ bool take_write) + { + struct fs_fte *fte_tmp; + +- nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); +- +- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte); +- if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { +- fte_tmp = NULL; +- goto out; +- } +- +- if (!fte_tmp->node.active) { +- tree_put_node(&fte_tmp->node, false); +- fte_tmp = NULL; +- goto out; +- } +- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); +- +-out: +- up_write_ref_node(&g->node, false); +- return fte_tmp; +-} +- +-static struct fs_fte * +-lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) +-{ +- struct fs_fte *fte_tmp; +- +- if (!tree_get_node(&g->node)) +- return NULL; +- +- rcu_read_lock(); +- fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte); ++ if (take_write) ++ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); ++ else ++ nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); ++ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, ++ rhash_fte); + if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { +- rcu_read_unlock(); + fte_tmp = NULL; + goto out; + } +- rcu_read_unlock(); +- + if (!fte_tmp->node.active) { + tree_put_node(&fte_tmp->node, false); + fte_tmp = NULL; +@@ -1682,19 +1649,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) + } + + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); +- + out: +- tree_put_node(&g->node, false); +- return fte_tmp; +-} +- +-static struct fs_fte * +-lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write) +-{ +- if (write) +- return lookup_fte_for_write_locked(g, match_value); ++ if (take_write) ++ up_write_ref_node(&g->node, false); + else +- return lookup_fte_for_read_locked(g, match_value); ++ up_read_ref_node(&g->node); ++ return fte_tmp; + } + + static struct mlx5_flow_handle * +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +index 8e4ca13f4d74..c6221ccbdddf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +@@ -205,7 +205,6 @@ struct fs_fte { + enum fs_fte_status status; + struct mlx5_fc *counter; + struct rhash_head hash; +- struct rcu_head rcu; + int modify_mask; + }; + +-- +2.13.6 + diff --git a/SOURCES/0121-netdrv-net-mlx5e-Fix-hairpin-RSS-table-size.patch b/SOURCES/0121-netdrv-net-mlx5e-Fix-hairpin-RSS-table-size.patch new file mode 100644 index 0000000..265b1ea --- /dev/null +++ b/SOURCES/0121-netdrv-net-mlx5e-Fix-hairpin-RSS-table-size.patch @@ -0,0 +1,115 @@ +From 14f853c86f2b801cbe7aa38ebc2fac7883122ec7 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:46 -0400 +Subject: [PATCH 121/312] [netdrv] net/mlx5e: Fix hairpin RSS table size + +Message-id: <20200510150452.10307-82-ahleihel@redhat.com> +Patchwork-id: 306705 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 81/87] net/mlx5e: Fix hairpin RSS table size +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5-rc6 + +commit 6412bb396a63f28de994b1480edf8e4caf4aa494 +Author: Eli Cohen +Date: Wed Dec 11 09:17:40 2019 +0200 + + net/mlx5e: Fix hairpin RSS table size + + Set hairpin table size to the corret size, based on the groups that + would be created in it. Groups are laid out on the table such that a + group occupies a range of entries in the table. This implies that the + group ranges should have correspondence to the table they are laid upon. + + The patch cited below made group 1's size to grow hence causing + overflow of group range laid on the table. + + Fixes: a795d8db2a6d ("net/mlx5e: Support RSS for IP-in-IP and IPv6 tunneled packets") + Signed-off-by: Eli Cohen + Signed-off-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 16 ++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 16 ---------------- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- + 3 files changed, 17 insertions(+), 17 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +index 68d593074f6c..d48292ccda29 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +@@ -122,6 +122,22 @@ enum { + #endif + }; + ++#define MLX5E_TTC_NUM_GROUPS 3 ++#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) ++#define MLX5E_TTC_GROUP2_SIZE BIT(1) ++#define MLX5E_TTC_GROUP3_SIZE BIT(0) ++#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ ++ MLX5E_TTC_GROUP2_SIZE +\ ++ MLX5E_TTC_GROUP3_SIZE) ++ ++#define MLX5E_INNER_TTC_NUM_GROUPS 3 ++#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) ++#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) ++#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) ++#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ ++ MLX5E_INNER_TTC_GROUP2_SIZE +\ ++ MLX5E_INNER_TTC_GROUP3_SIZE) ++ + #ifdef CONFIG_MLX5_EN_RXNFC + + struct mlx5e_ethtool_table { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +index 15b7f0f1427c..73d3dc07331f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +@@ -904,22 +904,6 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, + return err; + } + +-#define MLX5E_TTC_NUM_GROUPS 3 +-#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) +-#define MLX5E_TTC_GROUP2_SIZE BIT(1) +-#define MLX5E_TTC_GROUP3_SIZE BIT(0) +-#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ +- MLX5E_TTC_GROUP2_SIZE +\ +- MLX5E_TTC_GROUP3_SIZE) +- +-#define MLX5E_INNER_TTC_NUM_GROUPS 3 +-#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) +-#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) +-#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) +-#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ +- MLX5E_INNER_TTC_GROUP2_SIZE +\ +- MLX5E_INNER_TTC_GROUP3_SIZE) +- + static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, + bool use_ipv) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 0af1d5b1e438..daef493b8e50 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -586,7 +586,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; + +- ft_attr->max_fte = MLX5E_NUM_TT; ++ ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; + ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; + ft_attr->prio = MLX5E_TC_PRIO; + } +-- +2.13.6 + diff --git a/SOURCES/0122-netdrv-net-mlx5-Fix-lowest-FDB-pool-size.patch b/SOURCES/0122-netdrv-net-mlx5-Fix-lowest-FDB-pool-size.patch new file mode 100644 index 0000000..1457b97 --- /dev/null +++ b/SOURCES/0122-netdrv-net-mlx5-Fix-lowest-FDB-pool-size.patch @@ -0,0 +1,59 @@ +From a88b00c14457d1a833ee903803b1c6e094fb4bd0 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:47 -0400 +Subject: [PATCH 122/312] [netdrv] net/mlx5: Fix lowest FDB pool size + +Message-id: <20200510150452.10307-83-ahleihel@redhat.com> +Patchwork-id: 306706 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 82/87] net/mlx5: Fix lowest FDB pool size +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5 + +commit 93b8a7ecb7287cc9b0196f12a25b57c2462d11dc +Author: Paul Blakey +Date: Tue Dec 31 17:04:15 2019 +0200 + + net/mlx5: Fix lowest FDB pool size + + The pool sizes represent the pool sizes in the fw. when we request + a pool size from fw, it will return the next possible group. + We track how many pools the fw has left and start requesting groups + from the big to the small. + When we start request 4k group, which doesn't exists in fw, fw + wants to allocate the next possible size, 64k, but will fail since + its exhausted. The correct smallest pool size in fw is 128 and not 4k. + + Fixes: e52c28024008 ("net/mlx5: E-Switch, Add chains and priorities") + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 121abcae993a..9d2d850463bb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -866,7 +866,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) + */ + #define ESW_SIZE (16 * 1024 * 1024) + const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, +- 64 * 1024, 4 * 1024 }; ++ 64 * 1024, 128 }; + + static int + get_sz_from_pool(struct mlx5_eswitch *esw) +-- +2.13.6 + diff --git a/SOURCES/0123-netdrv-net-mlx5-Update-the-list-of-the-PCI-supported.patch b/SOURCES/0123-netdrv-net-mlx5-Update-the-list-of-the-PCI-supported.patch new file mode 100644 index 0000000..8bfd1f0 --- /dev/null +++ b/SOURCES/0123-netdrv-net-mlx5-Update-the-list-of-the-PCI-supported.patch @@ -0,0 +1,53 @@ +From 9f67af245d4302d403f376a8574965b1df5e8682 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:48 -0400 +Subject: [PATCH 123/312] [netdrv] net/mlx5: Update the list of the PCI + supported devices + +Message-id: <20200510150452.10307-84-ahleihel@redhat.com> +Patchwork-id: 306708 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 83/87] net/mlx5: Update the list of the PCI supported devices +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5 + +commit 505a7f5478062c6cd11e22022d9f1bf64cd8eab3 +Author: Meir Lichtinger +Date: Thu Dec 12 16:09:33 2019 +0200 + + net/mlx5: Update the list of the PCI supported devices + + Add the upcoming ConnectX-7 device ID. + + Fixes: 85327a9c4150 ("net/mlx5: Update the list of the PCI supported devices") + Signed-off-by: Meir Lichtinger + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index f34eeb5af1ef..05d66dd6791b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1591,6 +1591,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ ++ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ +-- +2.13.6 + diff --git a/SOURCES/0124-netdrv-net-mlx5-E-Switch-Prevent-ingress-rate-config.patch b/SOURCES/0124-netdrv-net-mlx5-E-Switch-Prevent-ingress-rate-config.patch new file mode 100644 index 0000000..ba705e6 --- /dev/null +++ b/SOURCES/0124-netdrv-net-mlx5-E-Switch-Prevent-ingress-rate-config.patch @@ -0,0 +1,71 @@ +From 089fd5fe79675e3a41d6bb6dfce19745fee0fe66 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:49 -0400 +Subject: [PATCH 124/312] [netdrv] net/mlx5: E-Switch, Prevent ingress rate + configuration of uplink rep + +Message-id: <20200510150452.10307-85-ahleihel@redhat.com> +Patchwork-id: 306707 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 84/87] net/mlx5: E-Switch, Prevent ingress rate configuration of uplink rep +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5 + +commit e401a1848be87123a2b2049addbf21138cb47081 +Author: Eli Cohen +Date: Sun Jan 12 13:43:37 2020 +0200 + + net/mlx5: E-Switch, Prevent ingress rate configuration of uplink rep + + Since the implementation relies on limiting the VF transmit rate to + simulate ingress rate limiting, and since either uplink representor or + ecpf are not associated with a VF, we limit the rate limit configuration + for those ports. + + Fixes: fcb64c0f5640 ("net/mlx5: E-Switch, add ingress rate support") + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index daef493b8e50..f701d62a86b1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -4010,6 +4010,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, + u32 rate_mbps; + int err; + ++ vport_num = rpriv->rep->vport; ++ if (vport_num >= MLX5_VPORT_ECPF) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Ingress rate limit is supported only for Eswitch ports connected to VFs"); ++ return -EOPNOTSUPP; ++ } ++ + esw = priv->mdev->priv.eswitch; + /* rate is given in bytes/sec. + * First convert to bits/sec and then round to the nearest mbit/secs. +@@ -4018,8 +4025,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, + * 1 mbit/sec. + */ + rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; +- vport_num = rpriv->rep->vport; +- + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + if (err) + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); +-- +2.13.6 + diff --git a/SOURCES/0125-netdrv-net-mlx5e-kTLS-Fix-corner-case-checks-in-TX-r.patch b/SOURCES/0125-netdrv-net-mlx5e-kTLS-Fix-corner-case-checks-in-TX-r.patch new file mode 100644 index 0000000..7ea4aee --- /dev/null +++ b/SOURCES/0125-netdrv-net-mlx5e-kTLS-Fix-corner-case-checks-in-TX-r.patch @@ -0,0 +1,136 @@ +From 0adbdaae599160549f4ed000cf10e0c68667be45 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:50 -0400 +Subject: [PATCH 125/312] [netdrv] net/mlx5e: kTLS, Fix corner-case checks in + TX resync flow + +Message-id: <20200510150452.10307-86-ahleihel@redhat.com> +Patchwork-id: 306709 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 85/87] net/mlx5e: kTLS, Fix corner-case checks in TX resync flow +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5 + +commit ffbd9ca94e2ebbfe802d4b28bab5ba19818de853 +Author: Tariq Toukan +Date: Sun Jan 12 16:22:14 2020 +0200 + + net/mlx5e: kTLS, Fix corner-case checks in TX resync flow + + There are the following cases: + + 1. Packet ends before start marker: bypass offload. + 2. Packet starts before start marker and ends after it: drop, + not supported, breaks contract with kernel. + 3. packet ends before tls record info starts: drop, + this packet was already acknowledged and its record info + was released. + + Add the above as comment in code. + + Mind possible wraparounds of the TCP seq, replace the simple comparison + with a call to the TCP before() method. + + In addition, remove logic that handles negative sync_len values, + as it became impossible. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Fixes: 46a3ea98074e ("net/mlx5e: kTLS, Enhance TX resync flow") + Signed-off-by: Tariq Toukan + Signed-off-by: Boris Pismenny + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 33 +++++++++++++--------- + 1 file changed, 19 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 778dab1af8fc..8dbb92176bd7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, + + struct tx_sync_info { + u64 rcd_sn; +- s32 sync_len; ++ u32 sync_len; + int nr_frags; + skb_frag_t frags[MAX_SKB_FRAGS]; + }; +@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval { + + static enum mlx5e_ktls_sync_retval + tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, +- u32 tcp_seq, struct tx_sync_info *info) ++ u32 tcp_seq, int datalen, struct tx_sync_info *info) + { + struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; + enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; + struct tls_record_info *record; + int remaining, i = 0; + unsigned long flags; ++ bool ends_before; + + spin_lock_irqsave(&tx_ctx->lock, flags); + record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); +@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, + goto out; + } + +- if (unlikely(tcp_seq < tls_record_start_seq(record))) { +- ret = tls_record_is_start_marker(record) ? +- MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; ++ /* There are the following cases: ++ * 1. packet ends before start marker: bypass offload. ++ * 2. packet starts before start marker and ends after it: drop, ++ * not supported, breaks contract with kernel. ++ * 3. packet ends before tls record info starts: drop, ++ * this packet was already acknowledged and its record info ++ * was released. ++ */ ++ ends_before = before(tcp_seq + datalen, tls_record_start_seq(record)); ++ ++ if (unlikely(tls_record_is_start_marker(record))) { ++ ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; ++ goto out; ++ } else if (ends_before) { ++ ret = MLX5E_KTLS_SYNC_FAIL; + goto out; + } + +@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + u8 num_wqebbs; + int i = 0; + +- ret = tx_sync_info_get(priv_tx, seq, &info); ++ ret = tx_sync_info_get(priv_tx, seq, datalen, &info); + if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { + if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { + stats->tls_skip_no_sync_data++; +@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + goto err_out; + } + +- if (unlikely(info.sync_len < 0)) { +- if (likely(datalen <= -info.sync_len)) +- return MLX5E_KTLS_SYNC_DONE; +- +- stats->tls_drop_bypass_req++; +- goto err_out; +- } +- + stats->tls_ooo++; + + tx_post_resync_params(sq, priv_tx, info.rcd_sn); +-- +2.13.6 + diff --git a/SOURCES/0126-netdrv-net-mlx5e-kTLS-Remove-redundant-posts-in-TX-r.patch b/SOURCES/0126-netdrv-net-mlx5e-kTLS-Remove-redundant-posts-in-TX-r.patch new file mode 100644 index 0000000..c7071cd --- /dev/null +++ b/SOURCES/0126-netdrv-net-mlx5e-kTLS-Remove-redundant-posts-in-TX-r.patch @@ -0,0 +1,56 @@ +From 6243ccbbf9514855d73dde9397cd1c256013279c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:51 -0400 +Subject: [PATCH 126/312] [netdrv] net/mlx5e: kTLS, Remove redundant posts in + TX resync flow + +Message-id: <20200510150452.10307-87-ahleihel@redhat.com> +Patchwork-id: 306710 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 86/87] net/mlx5e: kTLS, Remove redundant posts in TX resync flow +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5 + +commit 1e92899791358dba94a9db7cc3b6004636b5a2f6 +Author: Tariq Toukan +Date: Mon Jan 13 14:46:09 2020 +0200 + + net/mlx5e: kTLS, Remove redundant posts in TX resync flow + + The call to tx_post_resync_params() is done earlier in the flow, + the post of the control WQEs is unnecessarily repeated. Remove it. + + Fixes: 700ec4974240 ("net/mlx5e: kTLS, Fix missing SQ edge fill") + Signed-off-by: Tariq Toukan + Signed-off-by: Boris Pismenny + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 8dbb92176bd7..592e921aa167 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -383,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, + if (unlikely(contig_wqebbs_room < num_wqebbs)) + mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); + +- tx_post_resync_params(sq, priv_tx, info.rcd_sn); +- + for (; i < info.nr_frags; i++) { + unsigned int orig_fsz, frag_offset = 0, n = 0; + skb_frag_t *f = &info.frags[i]; +-- +2.13.6 + diff --git a/SOURCES/0127-netdrv-net-mlx5e-kTLS-Do-not-send-decrypted-marked-S.patch b/SOURCES/0127-netdrv-net-mlx5e-kTLS-Do-not-send-decrypted-marked-S.patch new file mode 100644 index 0000000..f4988a8 --- /dev/null +++ b/SOURCES/0127-netdrv-net-mlx5e-kTLS-Do-not-send-decrypted-marked-S.patch @@ -0,0 +1,78 @@ +From 7bdb3136d262ec2afca5f131fe787ec5b7d23f4f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Sun, 10 May 2020 15:04:52 -0400 +Subject: [PATCH 127/312] [netdrv] net/mlx5e: kTLS, Do not send + decrypted-marked SKBs via non-accel path + +Message-id: <20200510150452.10307-88-ahleihel@redhat.com> +Patchwork-id: 306711 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789380 v2 87/87] net/mlx5e: kTLS, Do not send decrypted-marked SKBs via non-accel path +Bugzilla: 1789380 +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson +RH-Acked-by: Tony Camuso +RH-Acked-by: Jonathan Toppins + +Bugzilla: http://bugzilla.redhat.com/1789380 +Upstream: v5.5 + +commit 342508c1c7540e281fd36151c175ba5ff954a99f +Author: Tariq Toukan +Date: Mon Jan 20 13:42:00 2020 +0200 + + net/mlx5e: kTLS, Do not send decrypted-marked SKBs via non-accel path + + When TCP out-of-order is identified (unexpected tcp seq mismatch), driver + analyzes the packet and decides what handling should it get: + 1. go to accelerated path (to be encrypted in HW), + 2. go to regular xmit path (send w/o encryption), + 3. drop. + + Packets marked with skb->decrypted by the TLS stack in the TX flow skips + SW encryption, and rely on the HW offload. + Verify that such packets are never sent un-encrypted on the wire. + Add a WARN to catch such bugs, and prefer dropping the packet in these cases. + + Fixes: 46a3ea98074e ("net/mlx5e: kTLS, Enhance TX resync flow") + Signed-off-by: Tariq Toukan + Signed-off-by: Boris Pismenny + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index 592e921aa167..f260dd96873b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -458,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, + enum mlx5e_ktls_sync_retval ret = + mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); + +- if (likely(ret == MLX5E_KTLS_SYNC_DONE)) ++ switch (ret) { ++ case MLX5E_KTLS_SYNC_DONE: + *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); +- else if (ret == MLX5E_KTLS_SYNC_FAIL) ++ break; ++ case MLX5E_KTLS_SYNC_SKIP_NO_DATA: ++ if (likely(!skb->decrypted)) ++ goto out; ++ WARN_ON_ONCE(1); ++ /* fall-through */ ++ default: /* MLX5E_KTLS_SYNC_FAIL */ + goto err_out; +- else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */ +- goto out; ++ } + } + + priv_tx->expected_seq = seq + datalen; +-- +2.13.6 + diff --git a/SOURCES/0128-netdrv-net-mlx5-limit-the-function-in-local-scope.patch b/SOURCES/0128-netdrv-net-mlx5-limit-the-function-in-local-scope.patch new file mode 100644 index 0000000..0eebdff --- /dev/null +++ b/SOURCES/0128-netdrv-net-mlx5-limit-the-function-in-local-scope.patch @@ -0,0 +1,68 @@ +From 01232c328228cc2b6fe0aa051be8fdea47fb4d4b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:34 -0400 +Subject: [PATCH 128/312] [netdrv] net/mlx5: limit the function in local scope + +Message-id: <20200512105530.4207-9-ahleihel@redhat.com> +Patchwork-id: 306880 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 008/124] net/mlx5: limit the function in local scope +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 8007880a2ca97c34e7ccd1fcf12daf854b792544 +Author: Zhu Yanjun +Date: Sat Dec 14 10:51:17 2019 +0200 + + net/mlx5: limit the function in local scope + + The function mlx5_buf_alloc_node is only used by the function in the + local scope. So it is appropriate to limit this function in the local + scope. + + Signed-off-by: Zhu Yanjun + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 4 ++-- + include/linux/mlx5/driver.h | 2 -- + 2 files changed, 2 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +index c4179dc8c335..f99593ef8605 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +@@ -71,8 +71,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, + return cpu_handle; + } + +-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, +- struct mlx5_frag_buf *buf, int node) ++static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, ++ struct mlx5_frag_buf *buf, int node) + { + dma_addr_t t; + +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index 904d864f7259..0d728007078c 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -935,8 +935,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev); + void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); + void mlx5_drain_health_wq(struct mlx5_core_dev *dev); + void mlx5_trigger_health_work(struct mlx5_core_dev *dev); +-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, +- struct mlx5_frag_buf *buf, int node); + int mlx5_buf_alloc(struct mlx5_core_dev *dev, + int size, struct mlx5_frag_buf *buf); + void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); +-- +2.13.6 + diff --git a/SOURCES/0129-netdrv-mlx5-work-around-high-stack-usage-with-gcc.patch b/SOURCES/0129-netdrv-mlx5-work-around-high-stack-usage-with-gcc.patch new file mode 100644 index 0000000..9eb31dd --- /dev/null +++ b/SOURCES/0129-netdrv-mlx5-work-around-high-stack-usage-with-gcc.patch @@ -0,0 +1,62 @@ +From 32bd5c04ec418a69b6e9701faa84ef0a7abb2745 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:35 -0400 +Subject: [PATCH 129/312] [netdrv] mlx5: work around high stack usage with gcc + +Message-id: <20200512105530.4207-10-ahleihel@redhat.com> +Patchwork-id: 306881 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 009/124] mlx5: work around high stack usage with gcc +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 42ae1a5c76691928ed217c7e40269db27f5225e9 +Author: Arnd Bergmann +Date: Sat Jan 4 22:51:44 2020 +0100 + + mlx5: work around high stack usage with gcc + + In some configurations, gcc tries too hard to optimize this code: + + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c: In function 'mlx5e_grp_sw_update_stats': + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c:302:1: error: the frame size of 1336 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] + + As was stated in the bug report, the reason is that gcc runs into a corner + case in the register allocator that is rather hard to fix in a good way. + + As there is an easy way to work around it, just add a comment and the + barrier that stops gcc from trying to overoptimize the function. + + Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 + Cc: Adhemerval Zanella + Signed-off-by: Arnd Bergmann + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +index 23587f55fad7..30b216d9284c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +@@ -349,6 +349,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) + s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; + #endif + s->tx_cqes += sq_stats->cqes; ++ ++ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ ++ barrier(); + } + } + } +-- +2.13.6 + diff --git a/SOURCES/0130-netdrv-net-mlx5e-Support-accept-action-on-nic-table.patch b/SOURCES/0130-netdrv-net-mlx5e-Support-accept-action-on-nic-table.patch new file mode 100644 index 0000000..52069b5 --- /dev/null +++ b/SOURCES/0130-netdrv-net-mlx5e-Support-accept-action-on-nic-table.patch @@ -0,0 +1,72 @@ +From a5c1c749cc7431312fdc02460312097395ffef29 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:36 -0400 +Subject: [PATCH 130/312] [netdrv] net/mlx5e: Support accept action on nic + table + +Message-id: <20200512105530.4207-11-ahleihel@redhat.com> +Patchwork-id: 306882 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 010/124] net/mlx5e: Support accept action on nic table +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 15fc92ec3ad4cfb34218e6b6c38c1355938fe49e +Author: Tonghao Zhang +Date: Tue Dec 10 22:49:42 2019 +0800 + + net/mlx5e: Support accept action on nic table + + In one case, we may forward packets from one vport + to others, but only one packets flow will be accepted, + which destination ip was assign to VF. + + +-----+ +-----+ +-----+ + | VFn | | VF1 | | VF0 | accept + +--+--+ +--+--+ hairpin +--^--+ + | | <--------------- | + | | | + +--+-----------v-+ +--+-------------+ + | eswitch PF1 | | eswitch PF0 | + +----------------+ +----------------+ + + tc filter add dev $PF0 protocol all parent ffff: prio 1 handle 1 \ + flower skip_sw action mirred egress redirect dev $VF0_REP + tc filter add dev $VF0 protocol ip parent ffff: prio 1 handle 1 \ + flower skip_sw dst_ip $VF0_IP action pass + tc filter add dev $VF0 protocol all parent ffff: prio 2 handle 2 \ + flower skip_sw action mirred egress redirect dev $VF1 + + Signed-off-by: Tonghao Zhang + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index f701d62a86b1..93631f477140 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2831,6 +2831,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { ++ case FLOW_ACTION_ACCEPT: ++ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | ++ MLX5_FLOW_CONTEXT_ACTION_COUNT; ++ break; + case FLOW_ACTION_DROP: + action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + if (MLX5_CAP_FLOWTABLE(priv->mdev, +-- +2.13.6 + diff --git a/SOURCES/0131-netdrv-net-mlx5-Increase-the-max-number-of-channels-.patch b/SOURCES/0131-netdrv-net-mlx5-Increase-the-max-number-of-channels-.patch new file mode 100644 index 0000000..cdb2a06 --- /dev/null +++ b/SOURCES/0131-netdrv-net-mlx5-Increase-the-max-number-of-channels-.patch @@ -0,0 +1,128 @@ +From 5f7d29b04eb5cc609b4cae6aabd0e77353452f62 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:37 -0400 +Subject: [PATCH 131/312] [netdrv] net/mlx5: Increase the max number of + channels to 128 + +Message-id: <20200512105530.4207-12-ahleihel@redhat.com> +Patchwork-id: 306883 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 011/124] net/mlx5: Increase the max number of channels to 128 +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 57c7fce14b1ad512a42abe33cb721a2ea3520d4b +Author: Fan Li +Date: Mon Dec 16 14:46:15 2019 +0200 + + net/mlx5: Increase the max number of channels to 128 + + Currently the max number of channels is limited to 64, which is half of + the indirection table size to allow some flexibility. But on servers + with more than 64 cores, users may want to utilize more queues. + + This patch increases the advertised max number of channels to 128 by + changing the ratio between channels and indirection table slots to 1:1. + At the same time, the driver still enable no more than 64 channels at + loading. Users can change it by ethtool afterwards. + + Signed-off-by: Fan Li + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 +++--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 +++++++----- + drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 4 ++-- + 3 files changed, 12 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 319797f42105..bd34b1851162 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -134,7 +134,7 @@ struct page_pool; + #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 + #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) + #define MLX5E_MIN_NUM_CHANNELS 0x1 +-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) ++#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE + #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) + #define MLX5E_TX_CQ_POLL_BUDGET 128 + #define MLX5E_TX_XSK_POLL_BUDGET 64 +@@ -1170,11 +1170,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv); + void mlx5e_detach_netdev(struct mlx5e_priv *priv); + void mlx5e_destroy_netdev(struct mlx5e_priv *priv); + void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); +-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, ++void mlx5e_build_nic_params(struct mlx5e_priv *priv, + struct mlx5e_xsk *xsk, + struct mlx5e_rss_params *rss_params, + struct mlx5e_params *params, +- u16 max_channels, u16 mtu); ++ u16 mtu); + void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); + void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 2f337a70e157..7815cae1af54 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4737,17 +4737,19 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, + tirc_default_config[tt].rx_hash_fields; + } + +-void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, ++void mlx5e_build_nic_params(struct mlx5e_priv *priv, + struct mlx5e_xsk *xsk, + struct mlx5e_rss_params *rss_params, + struct mlx5e_params *params, +- u16 max_channels, u16 mtu) ++ u16 mtu) + { ++ struct mlx5_core_dev *mdev = priv->mdev; + u8 rx_cq_period_mode; + + params->sw_mtu = mtu; + params->hard_mtu = MLX5E_ETH_HARD_MTU; +- params->num_channels = max_channels; ++ params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, ++ priv->max_nch); + params->num_tc = 1; + + /* SQ */ +@@ -4986,8 +4988,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, + if (err) + return err; + +- mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params, +- priv->max_nch, netdev->mtu); ++ mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params, ++ netdev->mtu); + + mlx5e_timestamp_init(priv); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +index c4323bd42132..56078b23f1a0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +@@ -87,8 +87,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, + mlx5e_set_netdev_mtu_boundaries(priv); + netdev->mtu = netdev->max_mtu; + +- mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params, +- priv->max_nch, netdev->mtu); ++ mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params, ++ netdev->mtu); + mlx5i_build_nic_params(mdev, &priv->channels.params); + + mlx5e_timestamp_init(priv); +-- +2.13.6 + diff --git a/SOURCES/0132-netdrv-net-mlx5-Reduce-No-CQ-found-log-level-from-wa.patch b/SOURCES/0132-netdrv-net-mlx5-Reduce-No-CQ-found-log-level-from-wa.patch new file mode 100644 index 0000000..02cf39b --- /dev/null +++ b/SOURCES/0132-netdrv-net-mlx5-Reduce-No-CQ-found-log-level-from-wa.patch @@ -0,0 +1,70 @@ +From d35bd7cf635913f6e4d8dc3ec8fdaf7ea166d313 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:38 -0400 +Subject: [PATCH 132/312] [netdrv] net/mlx5: Reduce No CQ found log level from + warn to debug + +Message-id: <20200512105530.4207-13-ahleihel@redhat.com> +Patchwork-id: 306884 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 012/124] net/mlx5: Reduce No CQ found log level from warn to debug +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 7396ae3d1cfe05be42e5f6b1883c9ed594cc42ba +Author: Parav Pandit +Date: Thu Dec 5 16:06:20 2019 -0600 + + net/mlx5: Reduce No CQ found log level from warn to debug + + In below sequence, a EQE entry arrives for a CQ which is on the path of + being destroyed. + + cpu-0 cpu-1 + ------ ----- + mlx5_core_destroy_cq() mlx5_eq_comp_int() + mlx5_eq_del_cq() [..] + radix_tree_delete() [..] + [..] mlx5_eq_cq_get() /* Didn't find CQ is + * a valid case. + */ + /* destroy CQ in hw */ + mlx5_cmd_exec() + + This is still a valid scenario and correct delete CQ sequence, as + mirror of the CQ create sequence. + Hence, suppress the non harmful debug message from warn to debug level. + Keep the debug log message rate limited because user application can + trigger it repeatedly. + + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +index 580c71cb9dfa..2c716abc0f27 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +@@ -156,7 +156,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, + cq->comp(cq, eqe); + mlx5_cq_put(cq); + } else { +- mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); ++ dev_dbg_ratelimited(eq->dev->device, ++ "Completion event for bogus CQ 0x%x\n", cqn); + } + + ++eq->cons_index; +-- +2.13.6 + diff --git a/SOURCES/0133-netdrv-net-mlx5-Use-async-EQ-setup-cleanup-helpers-f.patch b/SOURCES/0133-netdrv-net-mlx5-Use-async-EQ-setup-cleanup-helpers-f.patch new file mode 100644 index 0000000..a0e49a3 --- /dev/null +++ b/SOURCES/0133-netdrv-net-mlx5-Use-async-EQ-setup-cleanup-helpers-f.patch @@ -0,0 +1,206 @@ +From 0f58f263b3f152104de15940709eca91b8a8f47a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:39 -0400 +Subject: [PATCH 133/312] [netdrv] net/mlx5: Use async EQ setup cleanup helpers + for multiple EQs + +Message-id: <20200512105530.4207-14-ahleihel@redhat.com> +Patchwork-id: 306885 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 013/124] net/mlx5: Use async EQ setup cleanup helpers for multiple EQs +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 3ed879965cc4ea13fe0908468b653c4ff2cb1309 +Author: Parav Pandit +Date: Fri Dec 6 15:13:41 2019 -0600 + + net/mlx5: Use async EQ setup cleanup helpers for multiple EQs + + Use helper routines to setup and teardown multiple EQs and reuse the + code in setup, cleanup and error unwinding flows. + + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 114 ++++++++++++--------------- + 1 file changed, 49 insertions(+), 65 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +index 2c716abc0f27..cccea3a8eddd 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +@@ -564,6 +564,39 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) + gather_user_async_events(dev, mask); + } + ++static int ++setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq, ++ struct mlx5_eq_param *param, const char *name) ++{ ++ int err; ++ ++ eq->irq_nb.notifier_call = mlx5_eq_async_int; ++ ++ err = create_async_eq(dev, &eq->core, param); ++ if (err) { ++ mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err); ++ return err; ++ } ++ err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb); ++ if (err) { ++ mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err); ++ destroy_async_eq(dev, &eq->core); ++ } ++ return err; ++} ++ ++static void cleanup_async_eq(struct mlx5_core_dev *dev, ++ struct mlx5_eq_async *eq, const char *name) ++{ ++ int err; ++ ++ mlx5_eq_disable(dev, &eq->core, &eq->irq_nb); ++ err = destroy_async_eq(dev, &eq->core); ++ if (err) ++ mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n", ++ name, err); ++} ++ + static int create_async_eqs(struct mlx5_core_dev *dev) + { + struct mlx5_eq_table *table = dev->priv.eq_table; +@@ -573,77 +606,45 @@ static int create_async_eqs(struct mlx5_core_dev *dev) + MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); + mlx5_eq_notifier_register(dev, &table->cq_err_nb); + +- table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int; + param = (struct mlx5_eq_param) { + .irq_index = 0, + .nent = MLX5_NUM_CMD_EQE, ++ .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, + }; +- +- param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD; +- err = create_async_eq(dev, &table->cmd_eq.core, ¶m); +- if (err) { +- mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); +- goto err0; +- } +- err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); +- if (err) { +- mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err); ++ err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd"); ++ if (err) + goto err1; +- } ++ + mlx5_cmd_use_events(dev); + +- table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; + param = (struct mlx5_eq_param) { + .irq_index = 0, + .nent = MLX5_NUM_ASYNC_EQE, + }; + + gather_async_events_mask(dev, param.mask); +- err = create_async_eq(dev, &table->async_eq.core, ¶m); +- if (err) { +- mlx5_core_warn(dev, "failed to create async EQ %d\n", err); ++ err = setup_async_eq(dev, &table->async_eq, ¶m, "async"); ++ if (err) + goto err2; +- } +- err = mlx5_eq_enable(dev, &table->async_eq.core, +- &table->async_eq.irq_nb); +- if (err) { +- mlx5_core_warn(dev, "failed to enable async EQ %d\n", err); +- goto err3; +- } + +- table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; + param = (struct mlx5_eq_param) { + .irq_index = 0, + .nent = /* TODO: sriov max_vf + */ 1, ++ .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, + }; + +- param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST; +- err = create_async_eq(dev, &table->pages_eq.core, ¶m); +- if (err) { +- mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); +- goto err4; +- } +- err = mlx5_eq_enable(dev, &table->pages_eq.core, +- &table->pages_eq.irq_nb); +- if (err) { +- mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err); +- goto err5; +- } ++ err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages"); ++ if (err) ++ goto err3; + +- return err; ++ return 0; + +-err5: +- destroy_async_eq(dev, &table->pages_eq.core); +-err4: +- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); + err3: +- destroy_async_eq(dev, &table->async_eq.core); ++ cleanup_async_eq(dev, &table->async_eq, "async"); + err2: + mlx5_cmd_use_polling(dev); +- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); ++ cleanup_async_eq(dev, &table->cmd_eq, "cmd"); + err1: +- destroy_async_eq(dev, &table->cmd_eq.core); +-err0: + mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); + return err; + } +@@ -651,28 +652,11 @@ static int create_async_eqs(struct mlx5_core_dev *dev) + static void destroy_async_eqs(struct mlx5_core_dev *dev) + { + struct mlx5_eq_table *table = dev->priv.eq_table; +- int err; +- +- mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb); +- err = destroy_async_eq(dev, &table->pages_eq.core); +- if (err) +- mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", +- err); +- +- mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb); +- err = destroy_async_eq(dev, &table->async_eq.core); +- if (err) +- mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", +- err); + ++ cleanup_async_eq(dev, &table->pages_eq, "pages"); ++ cleanup_async_eq(dev, &table->async_eq, "async"); + mlx5_cmd_use_polling(dev); +- +- mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb); +- err = destroy_async_eq(dev, &table->cmd_eq.core); +- if (err) +- mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", +- err); +- ++ cleanup_async_eq(dev, &table->cmd_eq, "cmd"); + mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); + } + +-- +2.13.6 + diff --git a/SOURCES/0134-include-net-mlx5-Add-Virtio-Emulation-related-device.patch b/SOURCES/0134-include-net-mlx5-Add-Virtio-Emulation-related-device.patch new file mode 100644 index 0000000..79d2242 --- /dev/null +++ b/SOURCES/0134-include-net-mlx5-Add-Virtio-Emulation-related-device.patch @@ -0,0 +1,82 @@ +From 3b0c6baddf528e8895d3a42617df25f80825b150 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:40 -0400 +Subject: [PATCH 134/312] [include] net/mlx5: Add Virtio Emulation related + device capabilities + +Message-id: <20200512105530.4207-15-ahleihel@redhat.com> +Patchwork-id: 306887 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 014/124] net/mlx5: Add Virtio Emulation related device capabilities +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 90fbca5952436e7817910b33eb4464ddd77a8964 +Author: Yishai Hadas +Date: Thu Dec 12 13:09:24 2019 +0200 + + net/mlx5: Add Virtio Emulation related device capabilities + + Add Virtio Emulation related fields to the device capabilities. + + It includes a general bit to indicate whether Virtio Emulation is + supported and the capabilities structure itself. + + Signed-off-by: Yishai Hadas + Reviewed-by: Shahaf Shuler + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 4c703796c233..1285f31b5f5c 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -87,6 +87,7 @@ enum { + enum { + MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), + MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11), ++ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13), + }; + + enum { +@@ -956,6 +957,19 @@ struct mlx5_ifc_device_event_cap_bits { + u8 user_unaffiliated_events[4][0x40]; + }; + ++struct mlx5_ifc_device_virtio_emulation_cap_bits { ++ u8 reserved_at_0[0x20]; ++ ++ u8 reserved_at_20[0x13]; ++ u8 log_doorbell_stride[0x5]; ++ u8 reserved_at_38[0x3]; ++ u8 log_doorbell_bar_size[0x5]; ++ ++ u8 doorbell_bar_offset[0x40]; ++ ++ u8 reserved_at_80[0x780]; ++}; ++ + enum { + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, +@@ -2756,6 +2770,7 @@ union mlx5_ifc_hca_cap_union_bits { + struct mlx5_ifc_fpga_cap_bits fpga_cap; + struct mlx5_ifc_tls_cap_bits tls_cap; + struct mlx5_ifc_device_mem_cap_bits device_mem_cap; ++ struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap; + u8 reserved_at_0[0x8000]; + }; + +-- +2.13.6 + diff --git a/SOURCES/0135-netdrv-net-mlx5-Expose-vDPA-emulation-device-capabil.patch b/SOURCES/0135-netdrv-net-mlx5-Expose-vDPA-emulation-device-capabil.patch new file mode 100644 index 0000000..577f64f --- /dev/null +++ b/SOURCES/0135-netdrv-net-mlx5-Expose-vDPA-emulation-device-capabil.patch @@ -0,0 +1,87 @@ +From 91b74fb713b93ef7a747fbf65251972735b8ab78 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:41 -0400 +Subject: [PATCH 135/312] [netdrv] net/mlx5: Expose vDPA emulation device + capabilities + +Message-id: <20200512105530.4207-16-ahleihel@redhat.com> +Patchwork-id: 306886 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 015/124] net/mlx5: Expose vDPA emulation device capabilities +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit ca1992c62cadb6c8e1e1b47e197b550f3cd89b76 +Author: Yishai Hadas +Date: Thu Dec 12 13:09:25 2019 +0200 + + net/mlx5: Expose vDPA emulation device capabilities + + Expose vDPA emulation device capabilities from the core layer. + It includes reading the capabilities from the firmware and exposing + helper functions to access the data. + + Signed-off-by: Yishai Hadas + Reviewed-by: Shahaf Shuler + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fw.c | 7 +++++++ + include/linux/mlx5/device.h | 9 +++++++++ + 2 files changed, 16 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c +index a19790dee7b2..c375edfe528c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c +@@ -245,6 +245,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) + return err; + } + ++ if (MLX5_CAP_GEN_64(dev, general_obj_types) & ++ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { ++ err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION); ++ if (err) ++ return err; ++ } ++ + return 0; + } + +diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h +index f3773e8536bb..62c9c47affb3 100644 +--- a/include/linux/mlx5/device.h ++++ b/include/linux/mlx5/device.h +@@ -1096,6 +1096,7 @@ enum mlx5_cap_type { + MLX5_CAP_DEV_MEM, + MLX5_CAP_RESERVED_16, + MLX5_CAP_TLS, ++ MLX5_CAP_VDPA_EMULATION = 0x13, + MLX5_CAP_DEV_EVENT = 0x14, + /* NUM OF CAP Types */ + MLX5_CAP_NUM +@@ -1288,6 +1289,14 @@ enum mlx5_qcam_feature_groups { + #define MLX5_CAP_DEV_EVENT(mdev, cap)\ + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) + ++#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ ++ MLX5_GET(device_virtio_emulation_cap, \ ++ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) ++ ++#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ ++ MLX5_GET64(device_virtio_emulation_cap, \ ++ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) ++ + enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, +-- +2.13.6 + diff --git a/SOURCES/0136-include-net-mlx5-Add-RoCE-accelerator-counters.patch b/SOURCES/0136-include-net-mlx5-Add-RoCE-accelerator-counters.patch new file mode 100644 index 0000000..e49786b --- /dev/null +++ b/SOURCES/0136-include-net-mlx5-Add-RoCE-accelerator-counters.patch @@ -0,0 +1,71 @@ +From b35a2cafae3805917b9ad9064e9410f0524ac5e9 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:50 -0400 +Subject: [PATCH 136/312] [include] net/mlx5: Add RoCE accelerator counters + +Message-id: <20200512105530.4207-25-ahleihel@redhat.com> +Patchwork-id: 306896 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 024/124] net/mlx5: Add RoCE accelerator counters +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 8fd5b75d979bd8483b7e015c85834fbc82d499ac +Author: Leon Romanovsky +Date: Wed Jan 15 16:54:58 2020 +0200 + + net/mlx5: Add RoCE accelerator counters + + Add RoCE accelerator definitions. + + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 17 +++++++++++++++-- + 1 file changed, 15 insertions(+), 2 deletions(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 1285f31b5f5c..e7f293ce80cf 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1200,7 +1200,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { + u8 reserved_at_130[0xa]; + u8 log_max_ra_res_dc[0x6]; + +- u8 reserved_at_140[0xa]; ++ u8 reserved_at_140[0x9]; ++ u8 roce_accl[0x1]; + u8 log_max_ra_req_qp[0x6]; + u8 reserved_at_150[0xa]; + u8 log_max_ra_res_qp[0x6]; +@@ -4751,7 +4752,19 @@ struct mlx5_ifc_query_q_counter_out_bits { + + u8 req_cqe_flush_error[0x20]; + +- u8 reserved_at_620[0x1e0]; ++ u8 reserved_at_620[0x20]; ++ ++ u8 roce_adp_retrans[0x20]; ++ ++ u8 roce_adp_retrans_to[0x20]; ++ ++ u8 roce_slow_restart[0x20]; ++ ++ u8 roce_slow_restart_cnps[0x20]; ++ ++ u8 roce_slow_restart_trans[0x20]; ++ ++ u8 reserved_at_6e0[0x120]; + }; + + struct mlx5_ifc_query_q_counter_in_bits { +-- +2.13.6 + diff --git a/SOURCES/0137-include-net-mlx5-Expose-relaxed-ordering-bits.patch b/SOURCES/0137-include-net-mlx5-Expose-relaxed-ordering-bits.patch new file mode 100644 index 0000000..57cc620 --- /dev/null +++ b/SOURCES/0137-include-net-mlx5-Expose-relaxed-ordering-bits.patch @@ -0,0 +1,63 @@ +From 7d40102a473952195a7d0499d5084e85444dd2ff Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:51 -0400 +Subject: [PATCH 137/312] [include] net/mlx5: Expose relaxed ordering bits + +Message-id: <20200512105530.4207-26-ahleihel@redhat.com> +Patchwork-id: 306897 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 025/124] net/mlx5: Expose relaxed ordering bits +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit a880a6dd853713e02a2804dd7ecaf39c7d3d9b18 +Author: Michael Guralnik +Date: Wed Jan 8 20:05:31 2020 +0200 + + net/mlx5: Expose relaxed ordering bits + + Expose relaxed ordering bits in HCA capability and mkey context structs. + + Signed-off-by: Yishai Hadas + Signed-off-by: Michael Guralnik + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index e7f293ce80cf..5afcda75c7d6 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1177,7 +1177,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { + u8 log_max_cq[0x5]; + + u8 log_max_eq_sz[0x8]; +- u8 reserved_at_e8[0x2]; ++ u8 relaxed_ordering_write[0x1]; ++ u8 relaxed_ordering_read[0x1]; + u8 log_max_mkey[0x6]; + u8 reserved_at_f0[0x8]; + u8 dump_fill_mkey[0x1]; +@@ -3292,7 +3293,9 @@ struct mlx5_ifc_mkc_bits { + + u8 translations_octword_size[0x20]; + +- u8 reserved_at_1c0[0x1b]; ++ u8 reserved_at_1c0[0x19]; ++ u8 relaxed_ordering_read[0x1]; ++ u8 reserved_at_1d9[0x1]; + u8 log_page_size[0x5]; + + u8 reserved_at_1e0[0x20]; +-- +2.13.6 + diff --git a/SOURCES/0138-include-net-mlx5-Add-copy-header-action-struct-layou.patch b/SOURCES/0138-include-net-mlx5-Add-copy-header-action-struct-layou.patch new file mode 100644 index 0000000..0ec3ee4 --- /dev/null +++ b/SOURCES/0138-include-net-mlx5-Add-copy-header-action-struct-layou.patch @@ -0,0 +1,75 @@ +From 63f0cedaf8b1f570ce456a4049cc378e2da429a9 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:58 -0400 +Subject: [PATCH 138/312] [include] net/mlx5: Add copy header action struct + layout + +Message-id: <20200512105530.4207-33-ahleihel@redhat.com> +Patchwork-id: 306905 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 032/124] net/mlx5: Add copy header action struct layout +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 31d8bde1c8812c9b44065dcd98e554488c6a98d2 +Author: Hamdan Igbaria +Date: Thu Jan 9 13:26:53 2020 +0200 + + net/mlx5: Add copy header action struct layout + + Add definition for copy header action, copy action is used + to copy header fields from source to destination. + + Signed-off-by: Hamdan Igbaria + Signed-off-by: Alex Vesker + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 5afcda75c7d6..ef0ed32db813 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -5502,6 +5502,21 @@ struct mlx5_ifc_add_action_in_bits { + u8 data[0x20]; + }; + ++struct mlx5_ifc_copy_action_in_bits { ++ u8 action_type[0x4]; ++ u8 src_field[0xc]; ++ u8 reserved_at_10[0x3]; ++ u8 src_offset[0x5]; ++ u8 reserved_at_18[0x3]; ++ u8 length[0x5]; ++ ++ u8 reserved_at_20[0x4]; ++ u8 dst_field[0xc]; ++ u8 reserved_at_30[0x3]; ++ u8 dst_offset[0x5]; ++ u8 reserved_at_38[0x8]; ++}; ++ + union mlx5_ifc_set_action_in_add_action_in_auto_bits { + struct mlx5_ifc_set_action_in_bits set_action_in; + struct mlx5_ifc_add_action_in_bits add_action_in; +@@ -5511,6 +5526,7 @@ union mlx5_ifc_set_action_in_add_action_in_auto_bits { + enum { + MLX5_ACTION_TYPE_SET = 0x1, + MLX5_ACTION_TYPE_ADD = 0x2, ++ MLX5_ACTION_TYPE_COPY = 0x3, + }; + + enum { +-- +2.13.6 + diff --git a/SOURCES/0139-include-net-mlx5-Add-mlx5_ifc-definitions-for-connec.patch b/SOURCES/0139-include-net-mlx5-Add-mlx5_ifc-definitions-for-connec.patch new file mode 100644 index 0000000..bdade6f --- /dev/null +++ b/SOURCES/0139-include-net-mlx5-Add-mlx5_ifc-definitions-for-connec.patch @@ -0,0 +1,113 @@ +From de02021ce3ace07649d7864cfa2521cd565a7d19 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:53:59 -0400 +Subject: [PATCH 139/312] [include] net/mlx5: Add mlx5_ifc definitions for + connection tracking support + +Message-id: <20200512105530.4207-34-ahleihel@redhat.com> +Patchwork-id: 306904 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 033/124] net/mlx5: Add mlx5_ifc definitions for connection tracking support +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 822e114b50641d3b57d2eb30939e60d8b4758288 +Author: Paul Blakey +Date: Mon Apr 1 13:31:32 2019 +0300 + + net/mlx5: Add mlx5_ifc definitions for connection tracking support + + Add the required hardware definitions to mlx5_ifc: + ignore_flow_level, registers, copy_header, and fwd_and_modify cap. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Sholomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 24 ++++++++++++++++++++---- + 1 file changed, 20 insertions(+), 4 deletions(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index ef0ed32db813..5b9d2eb36ad4 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -375,8 +375,17 @@ struct mlx5_ifc_flow_table_fields_supported_bits { + u8 outer_esp_spi[0x1]; + u8 reserved_at_58[0x2]; + u8 bth_dst_qp[0x1]; ++ u8 reserved_at_5b[0x5]; + +- u8 reserved_at_5b[0x25]; ++ u8 reserved_at_60[0x18]; ++ u8 metadata_reg_c_7[0x1]; ++ u8 metadata_reg_c_6[0x1]; ++ u8 metadata_reg_c_5[0x1]; ++ u8 metadata_reg_c_4[0x1]; ++ u8 metadata_reg_c_3[0x1]; ++ u8 metadata_reg_c_2[0x1]; ++ u8 metadata_reg_c_1[0x1]; ++ u8 metadata_reg_c_0[0x1]; + }; + + struct mlx5_ifc_flow_table_prop_layout_bits { +@@ -401,7 +410,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { + u8 reformat_l3_tunnel_to_l2[0x1]; + u8 reformat_l2_to_l3_tunnel[0x1]; + u8 reformat_and_modify_action[0x1]; +- u8 reserved_at_15[0x2]; ++ u8 ignore_flow_level[0x1]; ++ u8 reserved_at_16[0x1]; + u8 table_miss_action_domain[0x1]; + u8 termination_table[0x1]; + u8 reserved_at_19[0x7]; +@@ -725,7 +735,9 @@ enum { + + struct mlx5_ifc_flow_table_eswitch_cap_bits { + u8 fdb_to_vport_reg_c_id[0x8]; +- u8 reserved_at_8[0xf]; ++ u8 reserved_at_8[0xd]; ++ u8 fdb_modify_header_fwd_to_table[0x1]; ++ u8 reserved_at_16[0x1]; + u8 flow_source[0x1]; + u8 reserved_at_18[0x2]; + u8 multi_fdb_encap[0x1]; +@@ -4022,7 +4034,8 @@ struct mlx5_ifc_set_fte_in_bits { + u8 reserved_at_a0[0x8]; + u8 table_id[0x18]; + +- u8 reserved_at_c0[0x18]; ++ u8 ignore_flow_level[0x1]; ++ u8 reserved_at_c1[0x17]; + u8 modify_enable_mask[0x8]; + + u8 reserved_at_e0[0x20]; +@@ -5520,6 +5533,7 @@ struct mlx5_ifc_copy_action_in_bits { + union mlx5_ifc_set_action_in_add_action_in_auto_bits { + struct mlx5_ifc_set_action_in_bits set_action_in; + struct mlx5_ifc_add_action_in_bits add_action_in; ++ struct mlx5_ifc_copy_action_in_bits copy_action_in; + u8 reserved_at_0[0x40]; + }; + +@@ -5562,6 +5576,8 @@ enum { + MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55, + MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56, ++ MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57, ++ MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58, + MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, + MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, + }; +-- +2.13.6 + diff --git a/SOURCES/0140-include-net-mlx5e-Expose-FEC-feilds-and-related-capa.patch b/SOURCES/0140-include-net-mlx5e-Expose-FEC-feilds-and-related-capa.patch new file mode 100644 index 0000000..bd71635 --- /dev/null +++ b/SOURCES/0140-include-net-mlx5e-Expose-FEC-feilds-and-related-capa.patch @@ -0,0 +1,74 @@ +From dd07c451afec494fe2a72c343b730c3a00563374 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:00 -0400 +Subject: [PATCH 140/312] [include] net/mlx5e: Expose FEC feilds and related + capability bit + +Message-id: <20200512105530.4207-35-ahleihel@redhat.com> +Patchwork-id: 306906 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 034/124] net/mlx5e: Expose FEC feilds and related capability bit +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit a58837f52d432f32995b1c00e803cc4db18762d3 +Author: Aya Levin +Date: Mon Dec 30 14:22:57 2019 +0200 + + net/mlx5e: Expose FEC feilds and related capability bit + + Introduce 50G per lane FEC modes capability bit and newly supported + fields in PPLM register which allow this configuration. + + Signed-off-by: Aya Levin + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 5b9d2eb36ad4..60d1b97197ac 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -8474,6 +8474,18 @@ struct mlx5_ifc_pplm_reg_bits { + u8 fec_override_admin_50g[0x4]; + u8 fec_override_admin_25g[0x4]; + u8 fec_override_admin_10g_40g[0x4]; ++ ++ u8 fec_override_cap_400g_8x[0x10]; ++ u8 fec_override_cap_200g_4x[0x10]; ++ ++ u8 fec_override_cap_100g_2x[0x10]; ++ u8 fec_override_cap_50g_1x[0x10]; ++ ++ u8 fec_override_admin_400g_8x[0x10]; ++ u8 fec_override_admin_200g_4x[0x10]; ++ ++ u8 fec_override_admin_100g_2x[0x10]; ++ u8 fec_override_admin_50g_1x[0x10]; + }; + + struct mlx5_ifc_ppcnt_reg_bits { +@@ -8800,7 +8812,9 @@ struct mlx5_ifc_mpegc_reg_bits { + }; + + struct mlx5_ifc_pcam_enhanced_features_bits { +- u8 reserved_at_0[0x6d]; ++ u8 reserved_at_0[0x68]; ++ u8 fec_50G_per_lane_in_pplm[0x1]; ++ u8 reserved_at_69[0x4]; + u8 rx_icrc_encapsulated_counter[0x1]; + u8 reserved_at_6e[0x4]; + u8 ptys_extended_ethernet[0x1]; +-- +2.13.6 + diff --git a/SOURCES/0141-netdrv-net-mlx5-Refactor-mlx5_create_auto_grouped_fl.patch b/SOURCES/0141-netdrv-net-mlx5-Refactor-mlx5_create_auto_grouped_fl.patch new file mode 100644 index 0000000..d88dd0b --- /dev/null +++ b/SOURCES/0141-netdrv-net-mlx5-Refactor-mlx5_create_auto_grouped_fl.patch @@ -0,0 +1,291 @@ +From d85ea593966ba92587dcce23a1ebfbd8b8539a95 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:01 -0400 +Subject: [PATCH 141/312] [netdrv] net/mlx5: Refactor + mlx5_create_auto_grouped_flow_table + +Message-id: <20200512105530.4207-36-ahleihel@redhat.com> +Patchwork-id: 306907 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 035/124] net/mlx5: Refactor mlx5_create_auto_grouped_flow_table +Bugzilla: 1789383 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789383 +Upstream: v5.6-rc1 + +commit 61dc7b0141c51f5fa4aed97e49f9cf102ec51479 +Author: Paul Blakey +Date: Thu Nov 14 16:59:58 2019 +0200 + + net/mlx5: Refactor mlx5_create_auto_grouped_flow_table + + Refactor mlx5_create_auto_grouped_flow_table() to use ft_attr param + which already carries the max_fte, prio and flags memebers, and is + used the same in similar mlx5_create_flow_table() function. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata + +[ The refactored function renamed to + mlx5_create_auto_grouped_flow_table_attr_; + mlx5_create_auto_grouped_flow_table with the old calling convention + is added as a wrapper for it ] +--- + .../ethernet/mellanox/mlx5/core/en_fs_ethtool.c | 9 +++-- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 15 +++++---- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 7 ++-- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 13 ++++---- + .../mellanox/mlx5/core/eswitch_offloads_termtbl.c | 11 ++++--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 38 ++++++++++++++-------- + include/linux/mlx5/fs.h | 9 +++++ + 7 files changed, 66 insertions(+), 36 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +index acd946f2ddbe..e002175f19e9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +@@ -58,6 +58,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs, + int num_tuples) + { ++ struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5e_ethtool_table *eth_ft; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; +@@ -102,9 +103,11 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, + table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, + flow_table_properties_nic_receive.log_max_ft_size)), + MLX5E_ETHTOOL_NUM_ENTRIES); +- ft = mlx5_create_auto_grouped_flow_table(ns, prio, +- table_size, +- MLX5E_ETHTOOL_NUM_GROUPS, 0, 0); ++ ++ ft_attr.prio = prio; ++ ft_attr.max_fte = table_size; ++ ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS; ++ ft = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); + if (IS_ERR(ft)) + return (void *)ft; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 93631f477140..916a49b916c9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -954,7 +954,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, + + mutex_lock(&priv->fs.tc.t_lock); + if (IS_ERR_OR_NULL(priv->fs.tc.t)) { +- int tc_grp_size, tc_tbl_size; ++ struct mlx5_flow_table_attr ft_attr = {}; ++ int tc_grp_size, tc_tbl_size, tc_num_grps; + u32 max_flow_counter; + + max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | +@@ -964,13 +965,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, + + tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS, + BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size))); ++ tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS; + ++ ft_attr.prio = MLX5E_TC_PRIO; ++ ft_attr.max_fte = tc_tbl_size; ++ ft_attr.level = MLX5E_TC_FT_LEVEL; ++ ft_attr.autogroup.max_num_groups = tc_num_grps; + priv->fs.tc.t = +- mlx5_create_auto_grouped_flow_table(priv->fs.ns, +- MLX5E_TC_PRIO, +- tc_tbl_size, +- MLX5E_TC_TABLE_NUM_GROUPS, +- MLX5E_TC_FT_LEVEL, 0); ++ mlx5_create_auto_grouped_flow_table_attr_(priv->fs.ns, ++ &ft_attr); + if (IS_ERR(priv->fs.tc.t)) { + mutex_unlock(&priv->fs.tc.t_lock); + NL_SET_ERR_MSG_MOD(extack, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 93cf6eb77163..8e53bc0a6b6e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -277,6 +277,7 @@ enum { + + static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) + { ++ struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; +@@ -289,8 +290,10 @@ static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) + } + + /* num FTE 2, num FG 2 */ +- fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO, +- 2, 2, 0, 0); ++ ft_attr.prio = LEGACY_VEPA_PRIO; ++ ft_attr.max_fte = 2; ++ ft_attr.autogroup.max_num_groups = 2; ++ fdb = mlx5_create_auto_grouped_flow_table_attr_(root_ns, &ft_attr); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 9d2d850463bb..9837baba3b8b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -904,6 +904,7 @@ create_next_size_table(struct mlx5_eswitch *esw, + int level, + u32 flags) + { ++ struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *fdb; + int sz; + +@@ -911,12 +912,12 @@ create_next_size_table(struct mlx5_eswitch *esw, + if (!sz) + return ERR_PTR(-ENOSPC); + +- fdb = mlx5_create_auto_grouped_flow_table(ns, +- table_prio, +- sz, +- ESW_OFFLOADS_NUM_GROUPS, +- level, +- flags); ++ ft_attr.max_fte = sz; ++ ft_attr.prio = table_prio; ++ ft_attr.level = level; ++ ft_attr.flags = flags; ++ ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; ++ fdb = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); + if (IS_ERR(fdb)) { + esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n", + (int)PTR_ERR(fdb), table_prio, level, sz); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +index 366bda1bb1c3..d2f6af3a8a28 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +@@ -50,8 +50,8 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, + struct mlx5_flow_act *flow_act) + { + static const struct mlx5_flow_spec spec = {}; ++ struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *root_ns; +- int prio, flags; + int err; + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); +@@ -63,10 +63,11 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, + /* As this is the terminating action then the termination table is the + * same prio as the slow path + */ +- prio = FDB_SLOW_PATH; +- flags = MLX5_FLOW_TABLE_TERMINATION; +- tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, prio, 1, 1, +- 0, flags); ++ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION; ++ ft_attr.prio = FDB_SLOW_PATH; ++ ft_attr.max_fte = 1; ++ ft_attr.autogroup.max_num_groups = 1; ++ tt->termtbl = mlx5_create_auto_grouped_flow_table_attr_(root_ns, &ft_attr); + if (IS_ERR(tt->termtbl)) { + esw_warn(dev, "Failed to create termination table\n"); + return -EOPNOTSUPP; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 0e6a89b81972..cb5f2643fecb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1102,6 +1102,28 @@ mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns, + EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); + + struct mlx5_flow_table* ++mlx5_create_auto_grouped_flow_table_attr_(struct mlx5_flow_namespace *ns, ++ struct mlx5_flow_table_attr *ft_attr) ++{ ++ struct mlx5_flow_table *ft; ++ ++ if (ft_attr->autogroup.max_num_groups > ft_attr->max_fte) ++ return ERR_PTR(-EINVAL); ++ ++ ft = mlx5_create_flow_table(ns, ft_attr); ++ if (IS_ERR(ft)) ++ return ft; ++ ++ ft->autogroup.active = true; ++ ft->autogroup.required_groups = ft_attr->autogroup.max_num_groups; ++ /* We save place for flow groups in addition to max types */ ++ ft->autogroup.group_size = ft->max_fte / ++ (ft->autogroup.required_groups + 1); ++ ++ return ft; ++} ++ ++struct mlx5_flow_table* + mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, +@@ -1110,26 +1132,14 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + u32 flags) + { + struct mlx5_flow_table_attr ft_attr = {}; +- struct mlx5_flow_table *ft; +- +- if (max_num_groups > num_flow_table_entries) +- return ERR_PTR(-EINVAL); + + ft_attr.max_fte = num_flow_table_entries; + ft_attr.prio = prio; + ft_attr.level = level; + ft_attr.flags = flags; ++ ft_attr.autogroup.max_num_groups = max_num_groups; + +- ft = mlx5_create_flow_table(ns, &ft_attr); +- if (IS_ERR(ft)) +- return ft; +- +- ft->autogroup.active = true; +- ft->autogroup.required_groups = max_num_groups; +- /* We save place for flow groups in addition to max types */ +- ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1); +- +- return ft; ++ return mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); + } + EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); + +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index bb66d1da0ef3..a9de8e28ae6f 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -160,12 +160,21 @@ struct mlx5_flow_table_attr { + int max_fte; + u32 level; + u32 flags; ++ ++ struct { ++ int max_num_groups; ++ } autogroup; + }; + + struct mlx5_flow_table * + mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + struct mlx5_flow_table_attr *ft_attr); + ++/* Renamed from mlx5_create_auto_grouped_flow_table for usage inside mlx5 DUP */ ++struct mlx5_flow_table * ++mlx5_create_auto_grouped_flow_table_attr_(struct mlx5_flow_namespace *ns, ++ struct mlx5_flow_table_attr *ft_attr); ++ + struct mlx5_flow_table * + mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + int prio, +-- +2.13.6 + diff --git a/SOURCES/0142-netdrv-net-mlx5-fs_core-Introduce-unmanaged-flow-tab.patch b/SOURCES/0142-netdrv-net-mlx5-fs_core-Introduce-unmanaged-flow-tab.patch new file mode 100644 index 0000000..6cc831e --- /dev/null +++ b/SOURCES/0142-netdrv-net-mlx5-fs_core-Introduce-unmanaged-flow-tab.patch @@ -0,0 +1,158 @@ +From 033fa865cbce7eef7ccf369fa254ab03c8b6a2fc Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:02 -0400 +Subject: [PATCH 142/312] [netdrv] net/mlx5: fs_core: Introduce unmanaged flow + tables + +Message-id: <20200512105530.4207-37-ahleihel@redhat.com> +Patchwork-id: 306908 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 036/124] net/mlx5: fs_core: Introduce unmanaged flow tables +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 5281a0c909194c477656e89401ac11dd7b29ad2d +Author: Paul Blakey +Date: Tue Jul 23 11:43:57 2019 +0300 + + net/mlx5: fs_core: Introduce unmanaged flow tables + + Currently, Most of the steering tree is statically declared ahead of time, + with steering prios instances allocated for each fdb chain to assign max + number of levels for each of them. This allows fs_core to manage the + connections and levels of the flow tables hierarcy to prevent loops, but + restricts us with the number of supported chains and priorities. + + Introduce unmananged flow tables, allowing the user to manage the flow + table connections. A unamanged table is detached from the fs_core flow + table hierarcy, and is only connected back to the hierarchy by explicit + FTEs forward actions. + + This will be used together with firmware that supports ignoring the flow + table levels to increase the number of supported chains and prios. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 41 ++++++++++++++++------- + include/linux/mlx5/fs.h | 2 ++ + 2 files changed, 31 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index cb5f2643fecb..703799975f22 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1006,7 +1006,8 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa + u16 vport) + { + struct mlx5_flow_root_namespace *root = find_root(&ns->node); +- struct mlx5_flow_table *next_ft = NULL; ++ bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED; ++ struct mlx5_flow_table *next_ft; + struct fs_prio *fs_prio = NULL; + struct mlx5_flow_table *ft; + int log_table_sz; +@@ -1023,14 +1024,21 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa + err = -EINVAL; + goto unlock_root; + } +- if (ft_attr->level >= fs_prio->num_levels) { +- err = -ENOSPC; +- goto unlock_root; ++ if (!unmanaged) { ++ /* The level is related to the ++ * priority level range. ++ */ ++ if (ft_attr->level >= fs_prio->num_levels) { ++ err = -ENOSPC; ++ goto unlock_root; ++ } ++ ++ ft_attr->level += fs_prio->start_level; + } ++ + /* The level is related to the + * priority level range. + */ +- ft_attr->level += fs_prio->start_level; + ft = alloc_flow_table(ft_attr->level, + vport, + ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0, +@@ -1043,19 +1051,27 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa + + tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); + log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; +- next_ft = find_next_chained_ft(fs_prio); ++ next_ft = unmanaged ? ft_attr->next_ft : ++ find_next_chained_ft(fs_prio); + ft->def_miss_action = ns->def_miss_action; + err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft); + if (err) + goto free_ft; + +- err = connect_flow_table(root->dev, ft, fs_prio); +- if (err) +- goto destroy_ft; ++ if (!unmanaged) { ++ err = connect_flow_table(root->dev, ft, fs_prio); ++ if (err) ++ goto destroy_ft; ++ } ++ + ft->node.active = true; + down_write_ref_node(&fs_prio->node, false); +- tree_add_node(&ft->node, &fs_prio->node); +- list_add_flow_table(ft, fs_prio); ++ if (!unmanaged) { ++ tree_add_node(&ft->node, &fs_prio->node); ++ list_add_flow_table(ft, fs_prio); ++ } else { ++ ft->node.root = fs_prio->node.root; ++ } + fs_prio->num_ft++; + up_write_ref_node(&fs_prio->node, false); + mutex_unlock(&root->chain_lock); +@@ -2044,7 +2060,8 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) + int err = 0; + + mutex_lock(&root->chain_lock); +- err = disconnect_flow_table(ft); ++ if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED)) ++ err = disconnect_flow_table(ft); + if (err) { + mutex_unlock(&root->chain_lock); + return err; +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index a9de8e28ae6f..72bc6585f732 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -48,6 +48,7 @@ enum { + MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), + MLX5_FLOW_TABLE_TERMINATION = BIT(2), ++ MLX5_FLOW_TABLE_UNMANAGED = BIT(3), + }; + + #define LEFTOVERS_RULE_NUM 2 +@@ -160,6 +161,7 @@ struct mlx5_flow_table_attr { + int max_fte; + u32 level; + u32 flags; ++ struct mlx5_flow_table *next_ft; + + struct { + int max_num_groups; +-- +2.13.6 + diff --git a/SOURCES/0143-netdrv-net-mlx5-Add-ignore-level-support-fwd-to-tabl.patch b/SOURCES/0143-netdrv-net-mlx5-Add-ignore-level-support-fwd-to-tabl.patch new file mode 100644 index 0000000..240e13d --- /dev/null +++ b/SOURCES/0143-netdrv-net-mlx5-Add-ignore-level-support-fwd-to-tabl.patch @@ -0,0 +1,118 @@ +From a395ed0bff5abd7782871413c7e9ffb8f3fca589 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:03 -0400 +Subject: [PATCH 143/312] [netdrv] net/mlx5: Add ignore level support fwd to + table rules + +Message-id: <20200512105530.4207-38-ahleihel@redhat.com> +Patchwork-id: 306910 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 037/124] net/mlx5: Add ignore level support fwd to table rules +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit ff189b43568216c6211e9e7ddd9026cb8295e744 +Author: Paul Blakey +Date: Sun Jan 5 15:15:54 2020 +0200 + + net/mlx5: Add ignore level support fwd to table rules + + If user sets ignore flow level flag on a rule, that rule can point to + a flow table of any level, including those with levels equal or less + than the level of the flow table it is added on. + + This with unamanged tables will be used to create a FDB chain/prio + hierarchy much larger than currently supported level range. + + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 3 +++ + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 18 +++++++++++++++--- + include/linux/mlx5/fs.h | 1 + + 3 files changed, 19 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +index 3c816e81f8d9..b25465d9e030 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +@@ -432,6 +432,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, + MLX5_SET(set_fte_in, in, table_type, ft->type); + MLX5_SET(set_fte_in, in, table_id, ft->id); + MLX5_SET(set_fte_in, in, flow_index, fte->index); ++ MLX5_SET(set_fte_in, in, ignore_flow_level, ++ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); ++ + if (ft->vport) { + MLX5_SET(set_fte_in, in, vport_number, ft->vport); + MLX5_SET(set_fte_in, in, other_vport, 1); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 703799975f22..2f6c8890f25e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1555,18 +1555,30 @@ static bool counter_is_valid(u32 action) + } + + static bool dest_is_valid(struct mlx5_flow_destination *dest, +- u32 action, ++ struct mlx5_flow_act *flow_act, + struct mlx5_flow_table *ft) + { ++ bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL; ++ u32 action = flow_act->action; ++ + if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) + return counter_is_valid(action); + + if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + return true; + ++ if (ignore_level) { ++ if (ft->type != FS_FT_FDB) ++ return false; ++ ++ if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && ++ dest->ft->type != FS_FT_FDB) ++ return false; ++ } ++ + if (!dest || ((dest->type == + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) && +- (dest->ft->level <= ft->level))) ++ (dest->ft->level <= ft->level && !ignore_level))) + return false; + return true; + } +@@ -1797,7 +1809,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, + return ERR_PTR(-EINVAL); + + for (i = 0; i < dest_num; i++) { +- if (!dest_is_valid(&dest[i], flow_act->action, ft)) ++ if (!dest_is_valid(&dest[i], flow_act, ft)) + return ERR_PTR(-EINVAL); + } + nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT); +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index 72bc6585f732..5649a8a70c2e 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -207,6 +207,7 @@ struct mlx5_fs_vlan { + + enum { + FLOW_ACT_NO_APPEND = BIT(0), ++ FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1), + }; + + struct mlx5_flow_act { +-- +2.13.6 + diff --git a/SOURCES/0144-netdrv-net-mlx5-Allow-creating-autogroups-with-reser.patch b/SOURCES/0144-netdrv-net-mlx5-Allow-creating-autogroups-with-reser.patch new file mode 100644 index 0000000..e7c0d8a --- /dev/null +++ b/SOURCES/0144-netdrv-net-mlx5-Allow-creating-autogroups-with-reser.patch @@ -0,0 +1,159 @@ +From be97eb6496043414729200bb083deda814c7d33d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:04 -0400 +Subject: [PATCH 144/312] [netdrv] net/mlx5: Allow creating autogroups with + reserved entries + +Message-id: <20200512105530.4207-39-ahleihel@redhat.com> +Patchwork-id: 306911 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 038/124] net/mlx5: Allow creating autogroups with reserved entries +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 79cdb0aaea8b5478db34afa1d4d5ecc808689a67 +Author: Paul Blakey +Date: Thu Nov 14 17:02:59 2019 +0200 + + net/mlx5: Allow creating autogroups with reserved entries + + Exclude the last n entries for an autogrouped flow table. + + Reserving entries at the end of the FT will ensure that this FG will be + the last to be evaluated. This will be used in the next patch to create + a miss group enabling custom actions on FT miss. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 26 +++++++++++++++-------- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 3 +++ + include/linux/mlx5/fs.h | 3 +++ + 3 files changed, 23 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 2f6c8890f25e..4c7c707f9e2d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -579,7 +579,9 @@ static void del_sw_flow_group(struct fs_node *node) + + rhashtable_destroy(&fg->ftes_hash); + ida_destroy(&fg->fte_allocator); +- if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size) ++ if (ft->autogroup.active && ++ fg->max_ftes == ft->autogroup.group_size && ++ fg->start_index < ft->autogroup.max_fte) + ft->autogroup.num_groups--; + err = rhltable_remove(&ft->fgs_hash, + &fg->hash, +@@ -1121,9 +1123,14 @@ struct mlx5_flow_table* + mlx5_create_auto_grouped_flow_table_attr_(struct mlx5_flow_namespace *ns, + struct mlx5_flow_table_attr *ft_attr) + { ++ int num_reserved_entries = ft_attr->autogroup.num_reserved_entries; ++ int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries; ++ int max_num_groups = ft_attr->autogroup.max_num_groups; + struct mlx5_flow_table *ft; + +- if (ft_attr->autogroup.max_num_groups > ft_attr->max_fte) ++ if (max_num_groups > autogroups_max_fte) ++ return ERR_PTR(-EINVAL); ++ if (num_reserved_entries > ft_attr->max_fte) + return ERR_PTR(-EINVAL); + + ft = mlx5_create_flow_table(ns, ft_attr); +@@ -1131,10 +1138,10 @@ mlx5_create_auto_grouped_flow_table_attr_(struct mlx5_flow_namespace *ns, + return ft; + + ft->autogroup.active = true; +- ft->autogroup.required_groups = ft_attr->autogroup.max_num_groups; ++ ft->autogroup.required_groups = max_num_groups; ++ ft->autogroup.max_fte = autogroups_max_fte; + /* We save place for flow groups in addition to max types */ +- ft->autogroup.group_size = ft->max_fte / +- (ft->autogroup.required_groups + 1); ++ ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1); + + return ft; + } +@@ -1175,7 +1182,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg; + int err; + +- if (ft->autogroup.active) ++ if (ft->autogroup.active && start_index < ft->autogroup.max_fte) + return ERR_PTR(-EPERM); + + down_write_ref_node(&ft->node, false); +@@ -1348,9 +1355,10 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft + const struct mlx5_flow_spec *spec) + { + struct list_head *prev = &ft->node.children; +- struct mlx5_flow_group *fg; ++ u32 max_fte = ft->autogroup.max_fte; + unsigned int candidate_index = 0; + unsigned int group_size = 0; ++ struct mlx5_flow_group *fg; + + if (!ft->autogroup.active) + return ERR_PTR(-ENOENT); +@@ -1358,7 +1366,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft + if (ft->autogroup.num_groups < ft->autogroup.required_groups) + group_size = ft->autogroup.group_size; + +- /* ft->max_fte == ft->autogroup.max_types */ ++ /* max_fte == ft->autogroup.max_types */ + if (group_size == 0) + group_size = 1; + +@@ -1371,7 +1379,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft + prev = &fg->node.list; + } + +- if (candidate_index + group_size > ft->max_fte) ++ if (candidate_index + group_size > max_fte) + return ERR_PTR(-ENOSPC); + + fg = alloc_insert_flow_group(ft, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +index c6221ccbdddf..20f54e53dd01 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +@@ -164,6 +164,9 @@ struct mlx5_flow_table { + unsigned int required_groups; + unsigned int group_size; + unsigned int num_groups; ++#ifndef __GENKSYMS__ ++ unsigned int max_fte; ++#endif + } autogroup; + /* Protect fwd_rules */ + struct mutex lock; +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index 5649a8a70c2e..b918d9724fc2 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -165,6 +165,9 @@ struct mlx5_flow_table_attr { + + struct { + int max_num_groups; ++#ifndef __GENKSYMS__ ++ int num_reserved_entries; ++#endif + } autogroup; + }; + +-- +2.13.6 + diff --git a/SOURCES/0145-netdrv-net-mlx5e-Fix-printk-format-warning.patch b/SOURCES/0145-netdrv-net-mlx5e-Fix-printk-format-warning.patch new file mode 100644 index 0000000..b50be3d --- /dev/null +++ b/SOURCES/0145-netdrv-net-mlx5e-Fix-printk-format-warning.patch @@ -0,0 +1,54 @@ +From baaa60a0e32476c4c9c2875a31698a13525fb22f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:05 -0400 +Subject: [PATCH 145/312] [netdrv] net/mlx5e: Fix printk format warning + +Message-id: <20200512105530.4207-40-ahleihel@redhat.com> +Patchwork-id: 306909 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 039/124] net/mlx5e: Fix printk format warning +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit ca9c74ae9be5e78541c2058db9a754947a7d4a9b +Author: Olof Johansson +Date: Thu Dec 19 16:15:17 2019 -0800 + + net/mlx5e: Fix printk format warning + + Use "%zu" for size_t. Seen on ARM allmodconfig: + + drivers/net/ethernet/mellanox/mlx5/core/wq.c: In function 'mlx5_wq_cyc_wqe_dump': + include/linux/kern_levels.h:5:18: warning: format '%ld' expects argument of type 'long int', but argument 5 has type 'size_t' {aka 'unsigned int'} [-Wformat=] + + Fixes: 130c7b46c93d ("net/mlx5e: TX, Dump WQs wqe descriptors on CQE with error events") + Signed-off-by: Olof Johansson + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/wq.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +index f2a0e72285ba..02f7e4a39578 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +@@ -89,7 +89,7 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) + len = nstrides << wq->fbc.log_stride; + wqe = mlx5_wq_cyc_get_wqe(wq, ix); + +- pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %ld\n", ++ pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n", + mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false); + } +-- +2.13.6 + diff --git a/SOURCES/0146-netdrv-net-mlx5e-Add-mlx5e_flower_parse_meta-support.patch b/SOURCES/0146-netdrv-net-mlx5e-Add-mlx5e_flower_parse_meta-support.patch new file mode 100644 index 0000000..a801db3 --- /dev/null +++ b/SOURCES/0146-netdrv-net-mlx5e-Add-mlx5e_flower_parse_meta-support.patch @@ -0,0 +1,126 @@ +From ce3e28c0d41fdac1ea0dc6ab747811303f791a16 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:06 -0400 +Subject: [PATCH 146/312] [netdrv] net/mlx5e: Add mlx5e_flower_parse_meta + support + +Message-id: <20200512105530.4207-41-ahleihel@redhat.com> +Patchwork-id: 306913 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 040/124] net/mlx5e: Add mlx5e_flower_parse_meta support +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 6d65bc64e232896251daba7c43933f0f35859bc3 +Author: wenxu +Date: Tue Jan 7 17:16:06 2020 +0800 + + net/mlx5e: Add mlx5e_flower_parse_meta support + + In the flowtables offload all the devices in the flowtables + share the same flow_block. An offload rule will be installed on + all the devices. This scenario is not correct. + + It is no problem if there are only two devices in the flowtable, + The rule with ingress and egress on the same device can be reject + by driver. + + But more than two devices in the flowtable will install the wrong + rules on hardware. + + For example: + Three devices in a offload flowtables: dev_a, dev_b, dev_c + + A rule ingress from dev_a and egress to dev_b: + The rule will install on device dev_a. + The rule will try to install on dev_b but failed for ingress + and egress on the same device. + The rule will install on dev_c. This is not correct. + + The flowtables offload avoid this case through restricting the ingress dev + with FLOW_DISSECTOR_KEY_META. + + So the mlx5e driver also should support the FLOW_DISSECTOR_KEY_META parse. + + Signed-off-by: wenxu + Acked-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 39 +++++++++++++++++++++++++ + 1 file changed, 39 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 916a49b916c9..5f56830ab709 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1797,6 +1797,40 @@ static void *get_match_headers_value(u32 flags, + outer_headers); + } + ++static int mlx5e_flower_parse_meta(struct net_device *filter_dev, ++ struct flow_cls_offload *f) ++{ ++ struct flow_rule *rule = flow_cls_offload_flow_rule(f); ++ struct netlink_ext_ack *extack = f->common.extack; ++ struct net_device *ingress_dev; ++ struct flow_match_meta match; ++ ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) ++ return 0; ++ ++ flow_rule_match_meta(rule, &match); ++ if (match.mask->ingress_ifindex != 0xFFFFFFFF) { ++ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); ++ return -EINVAL; ++ } ++ ++ ingress_dev = __dev_get_by_index(dev_net(filter_dev), ++ match.key->ingress_ifindex); ++ if (!ingress_dev) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Can't find the ingress port to match on"); ++ return -EINVAL; ++ } ++ ++ if (ingress_dev != filter_dev) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Can't match on the ingress filter port"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ + static int __parse_cls_flower(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, +@@ -1817,6 +1851,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + u16 addr_type = 0; + u8 ip_proto = 0; + u8 *match_level; ++ int err; + + match_level = outer_match_level; + +@@ -1860,6 +1895,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + spec); + } + ++ err = mlx5e_flower_parse_meta(filter_dev, f); ++ if (err) ++ return err; ++ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + +-- +2.13.6 + diff --git a/SOURCES/0147-netdrv-net-mlx5-DR-Modify-set-action-limitation-exte.patch b/SOURCES/0147-netdrv-net-mlx5-DR-Modify-set-action-limitation-exte.patch new file mode 100644 index 0000000..dbee8e5 --- /dev/null +++ b/SOURCES/0147-netdrv-net-mlx5-DR-Modify-set-action-limitation-exte.patch @@ -0,0 +1,354 @@ +From ff78c16e4544e8a6e60b243d7ddd8d5c8d677fc4 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:07 -0400 +Subject: [PATCH 147/312] [netdrv] net/mlx5: DR, Modify set action limitation + extension + +Message-id: <20200512105530.4207-42-ahleihel@redhat.com> +Patchwork-id: 306912 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 041/124] net/mlx5: DR, Modify set action limitation extension +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.6-rc1 + +commit a51dcc10a04ac476240befb875b0a21388a429cd +Author: Hamdan Igbaria +Date: Tue Dec 24 18:07:41 2019 +0200 + + net/mlx5: DR, Modify set action limitation extension + + Modify set actions are not supported on both tx + and rx, added a check for that. + Also refactored the code in a way that every modify + action has his own functions, this needed so in the + future we could add copy action more smoothly. + + Signed-off-by: Hamdan Igbaria + Signed-off-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../mellanox/mlx5/core/steering/dr_action.c | 226 +++++++++++++++------ + 1 file changed, 165 insertions(+), 61 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +index 9359eed10889..ad32b88a83dc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +@@ -1314,58 +1314,85 @@ dr_action_modify_get_hw_info(u16 sw_field) + } + + static int +-dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, +- __be64 *sw_action, +- __be64 *hw_action, +- const struct dr_action_modify_field_conv **ret_hw_info) ++dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn, ++ __be64 *sw_action, ++ __be64 *hw_action, ++ const struct dr_action_modify_field_conv **ret_hw_info) + { + const struct dr_action_modify_field_conv *hw_action_info; +- u8 offset, length, max_length, action; ++ u8 max_length; + u16 sw_field; +- u8 hw_opcode; + u32 data; + + /* Get SW modify action data */ +- action = MLX5_GET(set_action_in, sw_action, action_type); +- length = MLX5_GET(set_action_in, sw_action, length); +- offset = MLX5_GET(set_action_in, sw_action, offset); + sw_field = MLX5_GET(set_action_in, sw_action, field); + data = MLX5_GET(set_action_in, sw_action, data); + + /* Convert SW data to HW modify action format */ + hw_action_info = dr_action_modify_get_hw_info(sw_field); + if (!hw_action_info) { +- mlx5dr_dbg(dmn, "Modify action invalid field given\n"); ++ mlx5dr_dbg(dmn, "Modify add action invalid field given\n"); + return -EINVAL; + } + + max_length = hw_action_info->end - hw_action_info->start + 1; + +- switch (action) { +- case MLX5_ACTION_TYPE_SET: +- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_SET; +- /* PRM defines that length zero specific length of 32bits */ +- if (!length) +- length = 32; ++ MLX5_SET(dr_action_hw_set, hw_action, ++ opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD); + +- if (length + offset > max_length) { +- mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n"); +- return -EINVAL; +- } +- break; ++ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, ++ hw_action_info->hw_field); + +- case MLX5_ACTION_TYPE_ADD: +- hw_opcode = MLX5DR_ACTION_MDFY_HW_OP_ADD; +- offset = 0; +- length = max_length; +- break; ++ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, ++ hw_action_info->start); + +- default: +- mlx5dr_info(dmn, "Unsupported action_type for modify action\n"); +- return -EOPNOTSUPP; ++ /* PRM defines that length zero specific length of 32bits */ ++ MLX5_SET(dr_action_hw_set, hw_action, destination_length, ++ max_length == 32 ? 0 : max_length); ++ ++ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data); ++ ++ *ret_hw_info = hw_action_info; ++ ++ return 0; ++} ++ ++static int ++dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn, ++ __be64 *sw_action, ++ __be64 *hw_action, ++ const struct dr_action_modify_field_conv **ret_hw_info) ++{ ++ const struct dr_action_modify_field_conv *hw_action_info; ++ u8 offset, length, max_length; ++ u16 sw_field; ++ u32 data; ++ ++ /* Get SW modify action data */ ++ length = MLX5_GET(set_action_in, sw_action, length); ++ offset = MLX5_GET(set_action_in, sw_action, offset); ++ sw_field = MLX5_GET(set_action_in, sw_action, field); ++ data = MLX5_GET(set_action_in, sw_action, data); ++ ++ /* Convert SW data to HW modify action format */ ++ hw_action_info = dr_action_modify_get_hw_info(sw_field); ++ if (!hw_action_info) { ++ mlx5dr_dbg(dmn, "Modify set action invalid field given\n"); ++ return -EINVAL; + } + +- MLX5_SET(dr_action_hw_set, hw_action, opcode, hw_opcode); ++ /* PRM defines that length zero specific length of 32bits */ ++ length = length ? length : 32; ++ ++ max_length = hw_action_info->end - hw_action_info->start + 1; ++ ++ if (length + offset > max_length) { ++ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n"); ++ return -EINVAL; ++ } ++ ++ MLX5_SET(dr_action_hw_set, hw_action, ++ opcode, MLX5DR_ACTION_MDFY_HW_OP_SET); + + MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, + hw_action_info->hw_field); +@@ -1384,48 +1411,120 @@ dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, + } + + static int +-dr_action_modify_check_field_limitation(struct mlx5dr_domain *dmn, +- const __be64 *sw_action) ++dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, ++ __be64 *sw_action, ++ __be64 *hw_action, ++ const struct dr_action_modify_field_conv **ret_hw_info) + { +- u16 sw_field; + u8 action; ++ int ret; + +- sw_field = MLX5_GET(set_action_in, sw_action, field); ++ *hw_action = 0; ++ ++ /* Get SW modify action type */ + action = MLX5_GET(set_action_in, sw_action, action_type); + +- /* Check if SW field is supported in current domain (RX/TX) */ +- if (action == MLX5_ACTION_TYPE_SET) { +- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { +- if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { +- mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", +- sw_field); +- return -EINVAL; +- } +- } ++ switch (action) { ++ case MLX5_ACTION_TYPE_SET: ++ ret = dr_action_modify_sw_to_hw_set(dmn, sw_action, ++ hw_action, ++ ret_hw_info); ++ break; + +- if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { +- if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { +- mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", +- sw_field); +- return -EINVAL; +- } ++ case MLX5_ACTION_TYPE_ADD: ++ ret = dr_action_modify_sw_to_hw_add(dmn, sw_action, ++ hw_action, ++ ret_hw_info); ++ break; ++ ++ default: ++ mlx5dr_info(dmn, "Unsupported action_type for modify action\n"); ++ ret = -EOPNOTSUPP; ++ } ++ ++ return ret; ++} ++ ++static int ++dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action, ++ const __be64 *sw_action) ++{ ++ u16 sw_field = MLX5_GET(set_action_in, sw_action, field); ++ struct mlx5dr_domain *dmn = action->rewrite.dmn; ++ ++ if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { ++ action->rewrite.allow_rx = 0; ++ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { ++ mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", ++ sw_field); ++ return -EINVAL; + } +- } else if (action == MLX5_ACTION_TYPE_ADD) { +- if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL && +- sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT && +- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM && +- sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) { +- mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", sw_field); ++ } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { ++ action->rewrite.allow_tx = 0; ++ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { ++ mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", ++ sw_field); + return -EINVAL; + } +- } else { +- mlx5dr_info(dmn, "Unsupported action %d modify action\n", action); +- return -EOPNOTSUPP; ++ } ++ ++ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { ++ mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n"); ++ return -EINVAL; + } + + return 0; + } + ++static int ++dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action, ++ const __be64 *sw_action) ++{ ++ u16 sw_field = MLX5_GET(set_action_in, sw_action, field); ++ struct mlx5dr_domain *dmn = action->rewrite.dmn; ++ ++ if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL && ++ sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT && ++ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM && ++ sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) { ++ mlx5dr_dbg(dmn, "Unsupported field %d for add action\n", ++ sw_field); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++dr_action_modify_check_field_limitation(struct mlx5dr_action *action, ++ const __be64 *sw_action) ++{ ++ struct mlx5dr_domain *dmn = action->rewrite.dmn; ++ u8 action_type; ++ int ret; ++ ++ action_type = MLX5_GET(set_action_in, sw_action, action_type); ++ ++ switch (action_type) { ++ case MLX5_ACTION_TYPE_SET: ++ ret = dr_action_modify_check_set_field_limitation(action, ++ sw_action); ++ break; ++ ++ case MLX5_ACTION_TYPE_ADD: ++ ret = dr_action_modify_check_add_field_limitation(action, ++ sw_action); ++ break; ++ ++ default: ++ mlx5dr_info(dmn, "Unsupported action %d modify action\n", ++ action_type); ++ ret = -EOPNOTSUPP; ++ } ++ ++ return ret; ++} ++ + static bool + dr_action_modify_check_is_ttl_modify(const u64 *sw_action) + { +@@ -1434,7 +1533,7 @@ dr_action_modify_check_is_ttl_modify(const u64 *sw_action) + return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL; + } + +-static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn, ++static int dr_actions_convert_modify_header(struct mlx5dr_action *action, + u32 max_hw_actions, + u32 num_sw_actions, + __be64 sw_actions[], +@@ -1446,16 +1545,21 @@ static int dr_actions_convert_modify_header(struct mlx5dr_domain *dmn, + u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED; + u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE; + u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE; ++ struct mlx5dr_domain *dmn = action->rewrite.dmn; + int ret, i, hw_idx = 0; + __be64 *sw_action; + __be64 hw_action; + + *modify_ttl = false; + ++ action->rewrite.allow_rx = 1; ++ action->rewrite.allow_tx = 1; ++ + for (i = 0; i < num_sw_actions; i++) { + sw_action = &sw_actions[i]; + +- ret = dr_action_modify_check_field_limitation(dmn, sw_action); ++ ret = dr_action_modify_check_field_limitation(action, ++ sw_action); + if (ret) + return ret; + +@@ -1544,7 +1648,7 @@ static int dr_action_create_modify_action(struct mlx5dr_domain *dmn, + goto free_chunk; + } + +- ret = dr_actions_convert_modify_header(dmn, ++ ret = dr_actions_convert_modify_header(action, + max_hw_actions, + num_sw_actions, + actions, +-- +2.13.6 + diff --git a/SOURCES/0148-netdrv-net-mlx5-DR-Modify-header-copy-support.patch b/SOURCES/0148-netdrv-net-mlx5-DR-Modify-header-copy-support.patch new file mode 100644 index 0000000..c6e38c9 --- /dev/null +++ b/SOURCES/0148-netdrv-net-mlx5-DR-Modify-header-copy-support.patch @@ -0,0 +1,306 @@ +From 2b68292318b04855edb2ad274d8acc3d34a066aa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:08 -0400 +Subject: [PATCH 148/312] [netdrv] net/mlx5: DR, Modify header copy support + +Message-id: <20200512105530.4207-43-ahleihel@redhat.com> +Patchwork-id: 306914 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 042/124] net/mlx5: DR, Modify header copy support +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.6-rc1 + +commit c21a49b360e16657f42f1a9269c3aa714738fec7 +Author: Hamdan Igbaria +Date: Thu Jan 9 13:27:16 2020 +0200 + + net/mlx5: DR, Modify header copy support + + Modify header supports ADD/SET and from this patch + also COPY. Copy allows to copy header fields and + metadata. + + Signed-off-by: Hamdan Igbaria + Signed-off-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../mellanox/mlx5/core/steering/dr_action.c | 150 ++++++++++++++++++--- + .../mellanox/mlx5/core/steering/mlx5_ifc_dr.h | 16 +++ + 2 files changed, 151 insertions(+), 15 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +index ad32b88a83dc..286fcec5eff2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +@@ -1411,15 +1411,82 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn, + } + + static int ++dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn, ++ __be64 *sw_action, ++ __be64 *hw_action, ++ const struct dr_action_modify_field_conv **ret_dst_hw_info, ++ const struct dr_action_modify_field_conv **ret_src_hw_info) ++{ ++ u8 src_offset, dst_offset, src_max_length, dst_max_length, length; ++ const struct dr_action_modify_field_conv *hw_dst_action_info; ++ const struct dr_action_modify_field_conv *hw_src_action_info; ++ u16 src_field, dst_field; ++ ++ /* Get SW modify action data */ ++ src_field = MLX5_GET(copy_action_in, sw_action, src_field); ++ dst_field = MLX5_GET(copy_action_in, sw_action, dst_field); ++ src_offset = MLX5_GET(copy_action_in, sw_action, src_offset); ++ dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset); ++ length = MLX5_GET(copy_action_in, sw_action, length); ++ ++ /* Convert SW data to HW modify action format */ ++ hw_src_action_info = dr_action_modify_get_hw_info(src_field); ++ hw_dst_action_info = dr_action_modify_get_hw_info(dst_field); ++ if (!hw_src_action_info || !hw_dst_action_info) { ++ mlx5dr_dbg(dmn, "Modify copy action invalid field given\n"); ++ return -EINVAL; ++ } ++ ++ /* PRM defines that length zero specific length of 32bits */ ++ length = length ? length : 32; ++ ++ src_max_length = hw_src_action_info->end - ++ hw_src_action_info->start + 1; ++ dst_max_length = hw_dst_action_info->end - ++ hw_dst_action_info->start + 1; ++ ++ if (length + src_offset > src_max_length || ++ length + dst_offset > dst_max_length) { ++ mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n"); ++ return -EINVAL; ++ } ++ ++ MLX5_SET(dr_action_hw_copy, hw_action, ++ opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY); ++ ++ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, ++ hw_dst_action_info->hw_field); ++ ++ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, ++ hw_dst_action_info->start + dst_offset); ++ ++ MLX5_SET(dr_action_hw_copy, hw_action, destination_length, ++ length == 32 ? 0 : length); ++ ++ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, ++ hw_src_action_info->hw_field); ++ ++ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, ++ hw_src_action_info->start + dst_offset); ++ ++ *ret_dst_hw_info = hw_dst_action_info; ++ *ret_src_hw_info = hw_src_action_info; ++ ++ return 0; ++} ++ ++static int + dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, + __be64 *sw_action, + __be64 *hw_action, +- const struct dr_action_modify_field_conv **ret_hw_info) ++ const struct dr_action_modify_field_conv **ret_dst_hw_info, ++ const struct dr_action_modify_field_conv **ret_src_hw_info) + { + u8 action; + int ret; + + *hw_action = 0; ++ *ret_src_hw_info = NULL; + + /* Get SW modify action type */ + action = MLX5_GET(set_action_in, sw_action, action_type); +@@ -1428,13 +1495,20 @@ dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn, + case MLX5_ACTION_TYPE_SET: + ret = dr_action_modify_sw_to_hw_set(dmn, sw_action, + hw_action, +- ret_hw_info); ++ ret_dst_hw_info); + break; + + case MLX5_ACTION_TYPE_ADD: + ret = dr_action_modify_sw_to_hw_add(dmn, sw_action, + hw_action, +- ret_hw_info); ++ ret_dst_hw_info); ++ break; ++ ++ case MLX5_ACTION_TYPE_COPY: ++ ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action, ++ hw_action, ++ ret_dst_hw_info, ++ ret_src_hw_info); + break; + + default: +@@ -1496,6 +1570,43 @@ dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action, + } + + static int ++dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action, ++ const __be64 *sw_action) ++{ ++ struct mlx5dr_domain *dmn = action->rewrite.dmn; ++ u16 sw_fields[2]; ++ int i; ++ ++ sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field); ++ sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field); ++ ++ for (i = 0; i < 2; i++) { ++ if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) { ++ action->rewrite.allow_rx = 0; ++ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) { ++ mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n", ++ sw_fields[i]); ++ return -EINVAL; ++ } ++ } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) { ++ action->rewrite.allow_tx = 0; ++ if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) { ++ mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n", ++ sw_fields[i]); ++ return -EINVAL; ++ } ++ } ++ } ++ ++ if (!action->rewrite.allow_rx && !action->rewrite.allow_tx) { ++ mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int + dr_action_modify_check_field_limitation(struct mlx5dr_action *action, + const __be64 *sw_action) + { +@@ -1516,6 +1627,11 @@ dr_action_modify_check_field_limitation(struct mlx5dr_action *action, + sw_action); + break; + ++ case MLX5_ACTION_TYPE_COPY: ++ ret = dr_action_modify_check_copy_field_limitation(action, ++ sw_action); ++ break; ++ + default: + mlx5dr_info(dmn, "Unsupported action %d modify action\n", + action_type); +@@ -1541,7 +1657,8 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, + u32 *num_hw_actions, + bool *modify_ttl) + { +- const struct dr_action_modify_field_conv *hw_action_info; ++ const struct dr_action_modify_field_conv *hw_dst_action_info; ++ const struct dr_action_modify_field_conv *hw_src_action_info; + u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED; + u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE; + u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE; +@@ -1570,32 +1687,35 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, + ret = dr_action_modify_sw_to_hw(dmn, + sw_action, + &hw_action, +- &hw_action_info); ++ &hw_dst_action_info, ++ &hw_src_action_info); + if (ret) + return ret; + + /* Due to a HW limitation we cannot modify 2 different L3 types */ +- if (l3_type && hw_action_info->l3_type && +- hw_action_info->l3_type != l3_type) { ++ if (l3_type && hw_dst_action_info->l3_type && ++ hw_dst_action_info->l3_type != l3_type) { + mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n"); + return -EINVAL; + } +- if (hw_action_info->l3_type) +- l3_type = hw_action_info->l3_type; ++ if (hw_dst_action_info->l3_type) ++ l3_type = hw_dst_action_info->l3_type; + + /* Due to a HW limitation we cannot modify two different L4 types */ +- if (l4_type && hw_action_info->l4_type && +- hw_action_info->l4_type != l4_type) { ++ if (l4_type && hw_dst_action_info->l4_type && ++ hw_dst_action_info->l4_type != l4_type) { + mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n"); + return -EINVAL; + } +- if (hw_action_info->l4_type) +- l4_type = hw_action_info->l4_type; ++ if (hw_dst_action_info->l4_type) ++ l4_type = hw_dst_action_info->l4_type; + + /* HW reads and executes two actions at once this means we + * need to create a gap if two actions access the same field + */ +- if ((hw_idx % 2) && hw_field == hw_action_info->hw_field) { ++ if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field || ++ (hw_src_action_info && ++ hw_field == hw_src_action_info->hw_field))) { + /* Check if after gap insertion the total number of HW + * modify actions doesn't exceeds the limit + */ +@@ -1605,7 +1725,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action, + return -EINVAL; + } + } +- hw_field = hw_action_info->hw_field; ++ hw_field = hw_dst_action_info->hw_field; + + hw_actions[hw_idx] = hw_action; + hw_idx++; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +index 1722f4668269..e01c3766c7de 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +@@ -32,6 +32,7 @@ enum { + }; + + enum { ++ MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1, + MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2, + MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3, + }; +@@ -625,4 +626,19 @@ struct mlx5_ifc_dr_action_hw_set_bits { + u8 inline_data[0x20]; + }; + ++struct mlx5_ifc_dr_action_hw_copy_bits { ++ u8 opcode[0x8]; ++ u8 destination_field_code[0x8]; ++ u8 reserved_at_10[0x2]; ++ u8 destination_left_shifter[0x6]; ++ u8 reserved_at_18[0x2]; ++ u8 destination_length[0x6]; ++ ++ u8 reserved_at_20[0x8]; ++ u8 source_field_code[0x8]; ++ u8 reserved_at_30[0x2]; ++ u8 source_left_shifter[0x6]; ++ u8 reserved_at_38[0x8]; ++}; ++ + #endif /* MLX5_IFC_DR_H */ +-- +2.13.6 + diff --git a/SOURCES/0149-netdrv-net-mlx5-DR-Allow-connecting-flow-table-to-a-.patch b/SOURCES/0149-netdrv-net-mlx5-DR-Allow-connecting-flow-table-to-a-.patch new file mode 100644 index 0000000..ba9b80c --- /dev/null +++ b/SOURCES/0149-netdrv-net-mlx5-DR-Allow-connecting-flow-table-to-a-.patch @@ -0,0 +1,60 @@ +From d47363e4069aca93d2aad93d44daeaf8f187cbac Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:09 -0400 +Subject: [PATCH 149/312] [netdrv] net/mlx5: DR, Allow connecting flow table to + a lower/same level table + +Message-id: <20200512105530.4207-44-ahleihel@redhat.com> +Patchwork-id: 306915 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 043/124] net/mlx5: DR, Allow connecting flow table to a lower/same level table +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.6-rc1 + +commit 4c7cea2f31f1dde9c578e4729e0d34f340052037 +Author: Yevgeny Kliteynik +Date: Mon Jan 20 11:51:36 2020 +0200 + + net/mlx5: DR, Allow connecting flow table to a lower/same level table + + Allow connecting SW steering source table to a lower/same level + destination table. + Lifting this limitation is required to support Connection Tracking. + + Signed-off-by: Yevgeny Kliteynik + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +index 286fcec5eff2..6dec2a550a10 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +@@ -677,9 +677,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + goto out_invalid_arg; + } + if (action->dest_tbl.tbl->level <= matcher->tbl->level) { ++ mlx5_core_warn_once(dmn->mdev, ++ "Connecting table to a lower/same level destination table\n"); + mlx5dr_dbg(dmn, +- "Destination table level should be higher than source table\n"); +- goto out_invalid_arg; ++ "Connecting table at level %d to a destination table at level %d\n", ++ matcher->tbl->level, ++ action->dest_tbl.tbl->level); + } + attr.final_icm_addr = rx_rule ? + action->dest_tbl.tbl->rx.s_anchor->chunk->icm_addr : +-- +2.13.6 + diff --git a/SOURCES/0150-netdrv-net-mlx5-IPsec-Fix-esp-modify-function-attrib.patch b/SOURCES/0150-netdrv-net-mlx5-IPsec-Fix-esp-modify-function-attrib.patch new file mode 100644 index 0000000..484d9cd --- /dev/null +++ b/SOURCES/0150-netdrv-net-mlx5-IPsec-Fix-esp-modify-function-attrib.patch @@ -0,0 +1,57 @@ +From 061a619728519c89cd3009d87289e129f4382c3e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:13 -0400 +Subject: [PATCH 150/312] [netdrv] net/mlx5: IPsec, Fix esp modify function + attribute + +Message-id: <20200512105530.4207-48-ahleihel@redhat.com> +Patchwork-id: 306919 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 047/124] net/mlx5: IPsec, Fix esp modify function attribute +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 0dc2c534f17c05bed0622b37a744bc38b48ca88a +Author: Raed Salem +Date: Tue Dec 24 09:54:45 2019 +0200 + + net/mlx5: IPsec, Fix esp modify function attribute + + The function mlx5_fpga_esp_validate_xfrm_attrs is wrongly used + with negative negation as zero value indicates success but it + used as failure return value instead. + + Fix by remove the unary not negation operator. + + Fixes: 05564d0ae075 ("net/mlx5: Add flow-steering commands for FPGA IPSec implementation") + Signed-off-by: Raed Salem + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +index e4ec0e03c289..4ed4d4d8e073 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +@@ -1478,7 +1478,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) + return 0; + +- if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { ++ if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { + mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); + return -EOPNOTSUPP; + } +-- +2.13.6 + diff --git a/SOURCES/0151-netdrv-net-mlx5-IPsec-fix-memory-leak-at-mlx5_fpga_i.patch b/SOURCES/0151-netdrv-net-mlx5-IPsec-fix-memory-leak-at-mlx5_fpga_i.patch new file mode 100644 index 0000000..33c6346 --- /dev/null +++ b/SOURCES/0151-netdrv-net-mlx5-IPsec-fix-memory-leak-at-mlx5_fpga_i.patch @@ -0,0 +1,57 @@ +From b11609d6b8e59125faa68ca14e005ab09b8cebde Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:14 -0400 +Subject: [PATCH 151/312] [netdrv] net/mlx5: IPsec, fix memory leak at + mlx5_fpga_ipsec_delete_sa_ctx + +Message-id: <20200512105530.4207-49-ahleihel@redhat.com> +Patchwork-id: 306920 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 048/124] net/mlx5: IPsec, fix memory leak at mlx5_fpga_ipsec_delete_sa_ctx +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 08db2cf577487f5123aebcc2f913e0b8a2c14b43 +Author: Raed Salem +Date: Wed Oct 23 16:41:21 2019 +0300 + + net/mlx5: IPsec, fix memory leak at mlx5_fpga_ipsec_delete_sa_ctx + + SA context is allocated at mlx5_fpga_ipsec_create_sa_ctx, + however the counterpart mlx5_fpga_ipsec_delete_sa_ctx function + nullifies sa_ctx pointer without freeing the memory allocated, + hence the memory leak. + + Fix by free SA context when the SA is released. + + Fixes: d6c4f0298cec ("net/mlx5: Refactor accel IPSec code") + Signed-off-by: Raed Salem + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +index 4ed4d4d8e073..4c61d25d2e88 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +@@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context) + mutex_lock(&fpga_xfrm->lock); + if (!--fpga_xfrm->num_rules) { + mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); ++ kfree(fpga_xfrm->sa_ctx); + fpga_xfrm->sa_ctx = NULL; + } + mutex_unlock(&fpga_xfrm->lock); +-- +2.13.6 + diff --git a/SOURCES/0152-netdrv-net-mlx5e-TX-Error-completion-is-for-last-WQE.patch b/SOURCES/0152-netdrv-net-mlx5e-TX-Error-completion-is-for-last-WQE.patch new file mode 100644 index 0000000..aa289d1 --- /dev/null +++ b/SOURCES/0152-netdrv-net-mlx5e-TX-Error-completion-is-for-last-WQE.patch @@ -0,0 +1,143 @@ +From ca8424eebb93cf0c2b311f0f12ea911361b4833f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:15 -0400 +Subject: [PATCH 152/312] [netdrv] net/mlx5e: TX, Error completion is for last + WQE in batch + +Message-id: <20200512105530.4207-50-ahleihel@redhat.com> +Patchwork-id: 306921 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 049/124] net/mlx5e: TX, Error completion is for last WQE in batch +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit b57e66ad42d051ed31319c28ed1b62b191299a29 +Author: Tariq Toukan +Date: Thu Jan 9 15:53:37 2020 +0200 + + net/mlx5e: TX, Error completion is for last WQE in batch + + For a cyclic work queue, when not requesting a completion per WQE, + a single CQE might indicate the completion of several WQEs. + However, in case some WQE in the batch causes an error, then an error + completion is issued, breaking the batch, and pointing to the offending + WQE in the wqe_counter field. + + Hence, WQE-specific error CQE handling (like printing, breaking, etc...) + should be performed only for the last WQE in batch. + + Fixes: 130c7b46c93d ("net/mlx5e: TX, Dump WQs wqe descriptors on CQE with error events") + Fixes: fd9b4be8002c ("net/mlx5e: RX, Support multiple outstanding UMR posts") + Signed-off-by: Tariq Toukan + Reviewed-by: Aya Levin + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 16 ++++++------ + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 33 +++++++++++-------------- + 2 files changed, 23 insertions(+), 26 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 64d6ecbece80..f0170fb2edbc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -614,13 +614,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + + wqe_counter = be16_to_cpu(cqe->wqe_counter); + +- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { +- netdev_WARN_ONCE(cq->channel->netdev, +- "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); +- if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) +- queue_work(cq->channel->priv->wq, &sq->recover_work); +- break; +- } + do { + struct mlx5e_sq_wqe_info *wi; + u16 ci; +@@ -630,6 +623,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.ico_wqe[ci]; + ++ if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { ++ netdev_WARN_ONCE(cq->channel->netdev, ++ "Bad OP in ICOSQ CQE: 0x%x\n", ++ get_cqe_opcode(cqe)); ++ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) ++ queue_work(cq->channel->priv->wq, &sq->recover_work); ++ break; ++ } ++ + if (likely(wi->opcode == MLX5_OPCODE_UMR)) { + sqcc += MLX5E_UMR_WQEBBS; + wi->umr.rq->mpwqe.umr_completed++; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index 3ce27194ee7e..3bfeb7c06b25 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -452,34 +452,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) + + i = 0; + do { ++ struct mlx5e_tx_wqe_info *wi; + u16 wqe_counter; + bool last_wqe; ++ u16 ci; + + mlx5_cqwq_pop(&cq->wq); + + wqe_counter = be16_to_cpu(cqe->wqe_counter); + +- if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { +- if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, +- &sq->state)) { +- struct mlx5e_tx_wqe_info *wi; +- u16 ci; +- +- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); +- wi = &sq->db.wqe_info[ci]; +- mlx5e_dump_error_cqe(sq, +- (struct mlx5_err_cqe *)cqe); +- mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); +- queue_work(cq->channel->priv->wq, +- &sq->recover_work); +- } +- stats->cqe_err++; +- } +- + do { +- struct mlx5e_tx_wqe_info *wi; + struct sk_buff *skb; +- u16 ci; + int j; + + last_wqe = (sqcc == wqe_counter); +@@ -517,6 +500,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) + napi_consume_skb(skb, napi_budget); + } while (!last_wqe); + ++ if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { ++ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, ++ &sq->state)) { ++ mlx5e_dump_error_cqe(sq, ++ (struct mlx5_err_cqe *)cqe); ++ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); ++ queue_work(cq->channel->priv->wq, ++ &sq->recover_work); ++ } ++ stats->cqe_err++; ++ } ++ + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + + stats->cqes += i; +-- +2.13.6 + diff --git a/SOURCES/0153-netdrv-net-mlx5-Deprecate-usage-of-generic-TLS-HW-ca.patch b/SOURCES/0153-netdrv-net-mlx5-Deprecate-usage-of-generic-TLS-HW-ca.patch new file mode 100644 index 0000000..db2ac14 --- /dev/null +++ b/SOURCES/0153-netdrv-net-mlx5-Deprecate-usage-of-generic-TLS-HW-ca.patch @@ -0,0 +1,106 @@ +From f7b14b69355d929a435c95b1d3821ea74dc91919 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:16 -0400 +Subject: [PATCH 153/312] [netdrv] net/mlx5: Deprecate usage of generic TLS HW + capability bit + +Message-id: <20200512105530.4207-51-ahleihel@redhat.com> +Patchwork-id: 306923 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 050/124] net/mlx5: Deprecate usage of generic TLS HW capability bit +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc1 + +commit 61c00cca41aeeaa8e5263c2f81f28534bc1efafb +Author: Tariq Toukan +Date: Mon Jan 27 14:18:14 2020 +0200 + + net/mlx5: Deprecate usage of generic TLS HW capability bit + + Deprecate the generic TLS cap bit, use the new TX-specific + TLS cap bit instead. + + Fixes: a12ff35e0fb7 ("net/mlx5: Introduce TLS TX offload hardware bits and structures") + Signed-off-by: Tariq Toukan + Reviewed-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/fw.c | 2 +- + include/linux/mlx5/mlx5_ifc.h | 7 ++++--- + 4 files changed, 7 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +index d787bc0a4155..e09bc3858d57 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +@@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); + + static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) + { +- if (!MLX5_CAP_GEN(mdev, tls)) ++ if (!MLX5_CAP_GEN(mdev, tls_tx)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +index 71384ad1a443..ef1ed15a53b4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +@@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, + int datalen; + u32 skb_seq; + +- if (MLX5_CAP_GEN(sq->channel->mdev, tls)) { ++ if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) { + skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); + goto out; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c +index c375edfe528c..67c7c07d471a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c +@@ -239,7 +239,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) + return err; + } + +- if (MLX5_CAP_GEN(dev, tls)) { ++ if (MLX5_CAP_GEN(dev, tls_tx)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); + if (err) + return err; +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 60d1b97197ac..cfe89228ca78 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1449,14 +1449,15 @@ struct mlx5_ifc_cmd_hca_cap_bits { + + u8 reserved_at_440[0x20]; + +- u8 tls[0x1]; +- u8 reserved_at_461[0x2]; ++ u8 reserved_at_460[0x3]; + u8 log_max_uctx[0x5]; + u8 reserved_at_468[0x3]; + u8 log_max_umem[0x5]; + u8 max_num_eqs[0x10]; + +- u8 reserved_at_480[0x3]; ++ u8 reserved_at_480[0x1]; ++ u8 tls_tx[0x1]; ++ u8 reserved_at_482[0x1]; + u8 log_max_l2_table[0x5]; + u8 reserved_at_488[0x8]; + u8 log_uar_page_sz[0x10]; +-- +2.13.6 + diff --git a/SOURCES/0154-netdrv-net-mlx5-Fix-sleep-while-atomic-in-mlx5_eswit.patch b/SOURCES/0154-netdrv-net-mlx5-Fix-sleep-while-atomic-in-mlx5_eswit.patch new file mode 100644 index 0000000..76994e0 --- /dev/null +++ b/SOURCES/0154-netdrv-net-mlx5-Fix-sleep-while-atomic-in-mlx5_eswit.patch @@ -0,0 +1,82 @@ +From 5c6201319ca7491d4315c36b9d15d399c5158c6c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:21 -0400 +Subject: [PATCH 154/312] [netdrv] net/mlx5: Fix sleep while atomic in + mlx5_eswitch_get_vepa + +Message-id: <20200512105530.4207-56-ahleihel@redhat.com> +Patchwork-id: 306927 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 055/124] net/mlx5: Fix sleep while atomic in mlx5_eswitch_get_vepa +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc3 + +commit 3d9c5e023a0dbf3e117bb416cfefd9405bf5af0c +Author: Huy Nguyen +Date: Mon Feb 3 16:32:18 2020 -0600 + + net/mlx5: Fix sleep while atomic in mlx5_eswitch_get_vepa + + rtnl_bridge_getlink is protected by rcu lock, so mlx5_eswitch_get_vepa + cannot take mutex lock. Two possible issues can happen: + 1. User at the same time change vepa mode via RTM_SETLINK command. + 2. User at the same time change the switchdev mode via devlink netlink + interface. + + Case 1 cannot happen because rtnl executes one message in order. + Case 2 can happen but we do not expect user to change the switchdev mode + when changing vepa. Even if a user does it, so he will read a value + which is no longer valid. + + Fixes: 8da202b24913 ("net/mlx5: E-Switch, Add support for VEPA in legacy mode.") + Signed-off-by: Huy Nguyen + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 14 +++----------- + 1 file changed, 3 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 8e53bc0a6b6e..2151787235e0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -2456,25 +2456,17 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) + + int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) + { +- int err = 0; +- + if (!esw) + return -EOPNOTSUPP; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + +- mutex_lock(&esw->state_lock); +- if (esw->mode != MLX5_ESWITCH_LEGACY) { +- err = -EOPNOTSUPP; +- goto out; +- } ++ if (esw->mode != MLX5_ESWITCH_LEGACY) ++ return -EOPNOTSUPP; + + *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; +- +-out: +- mutex_unlock(&esw->state_lock); +- return err; ++ return 0; + } + + int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, +-- +2.13.6 + diff --git a/SOURCES/0155-netdrv-net-mlx5e-Reset-RQ-doorbell-counter-before-mo.patch b/SOURCES/0155-netdrv-net-mlx5e-Reset-RQ-doorbell-counter-before-mo.patch new file mode 100644 index 0000000..38d1e78 --- /dev/null +++ b/SOURCES/0155-netdrv-net-mlx5e-Reset-RQ-doorbell-counter-before-mo.patch @@ -0,0 +1,178 @@ +From 4e8d6fa65af502f450f0b460aa34934727bed4e9 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:22 -0400 +Subject: [PATCH 155/312] [netdrv] net/mlx5e: Reset RQ doorbell counter before + moving RQ state from RST to RDY + +Message-id: <20200512105530.4207-57-ahleihel@redhat.com> +Patchwork-id: 306929 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 056/124] net/mlx5e: Reset RQ doorbell counter before moving RQ state from RST to RDY +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc3 + +commit 5ee090ed0da649b1febae2b7c285ac77d1e55a0c +Author: Aya Levin +Date: Mon Dec 9 14:08:18 2019 +0200 + + net/mlx5e: Reset RQ doorbell counter before moving RQ state from RST to RDY + + Initialize RQ doorbell counters to zero prior to moving an RQ from RST + to RDY state. Per HW spec, when RQ is back to RDY state, the descriptor + ID on the completion is reset. The doorbell record must comply. + + Fixes: 8276ea1353a4 ("net/mlx5e: Report and recover from CQE with error on RQ") + Signed-off-by: Aya Levin + Reported-by: Tariq Toukan + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 8 +++++ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 ++ + drivers/net/ethernet/mellanox/mlx5/core/wq.c | 39 +++++++++++++++++------ + drivers/net/ethernet/mellanox/mlx5/core/wq.h | 2 ++ + 4 files changed, 43 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +index 7c8796d9743f..a226277b0980 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +@@ -179,6 +179,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) + } + } + ++static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) ++{ ++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) ++ mlx5_wq_ll_reset(&rq->mpwqe.wq); ++ else ++ mlx5_wq_cyc_reset(&rq->wqe.wq); ++} ++ + /* SW parser related functions */ + + struct mlx5e_swp_spec { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 7815cae1af54..c9b9c6cb1677 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -713,6 +713,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) + if (!in) + return -ENOMEM; + ++ if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY) ++ mlx5e_rqwq_reset(rq); ++ + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + MLX5_SET(modify_rq_in, in, rq_state, curr_state); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +index 02f7e4a39578..01f075fac276 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c +@@ -94,6 +94,13 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides) + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false); + } + ++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq) ++{ ++ wq->wqe_ctr = 0; ++ wq->cur_sz = 0; ++ mlx5_wq_cyc_update_db_record(wq); ++} ++ + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *qpc, struct mlx5_wq_qp *wq, + struct mlx5_wq_ctrl *wq_ctrl) +@@ -192,6 +199,19 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + return err; + } + ++static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq) ++{ ++ struct mlx5_wqe_srq_next_seg *next_seg; ++ int i; ++ ++ for (i = 0; i < wq->fbc.sz_m1; i++) { ++ next_seg = mlx5_wq_ll_get_wqe(wq, i); ++ next_seg->next_wqe_index = cpu_to_be16(i + 1); ++ } ++ next_seg = mlx5_wq_ll_get_wqe(wq, i); ++ wq->tail_next = &next_seg->next_wqe_index; ++} ++ + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl) +@@ -199,9 +219,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); + u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; +- struct mlx5_wqe_srq_next_seg *next_seg; + int err; +- int i; + + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); + if (err) { +@@ -220,13 +238,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + + mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); + +- for (i = 0; i < fbc->sz_m1; i++) { +- next_seg = mlx5_wq_ll_get_wqe(wq, i); +- next_seg->next_wqe_index = cpu_to_be16(i + 1); +- } +- next_seg = mlx5_wq_ll_get_wqe(wq, i); +- wq->tail_next = &next_seg->next_wqe_index; +- ++ mlx5_wq_ll_init_list(wq); + wq_ctrl->mdev = mdev; + + return 0; +@@ -237,6 +249,15 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + return err; + } + ++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq) ++{ ++ wq->head = 0; ++ wq->wqe_ctr = 0; ++ wq->cur_sz = 0; ++ mlx5_wq_ll_init_list(wq); ++ mlx5_wq_ll_update_db_record(wq); ++} ++ + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) + { + mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +index d9a94bc223c0..4cadc336593f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h +@@ -80,6 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl); + void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides); ++void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq); + + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *qpc, struct mlx5_wq_qp *wq, +@@ -92,6 +93,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl); ++void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq); + + void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); + +-- +2.13.6 + diff --git a/SOURCES/0156-netdrv-net-mlx5e-Fix-crash-in-recovery-flow-without-.patch b/SOURCES/0156-netdrv-net-mlx5e-Fix-crash-in-recovery-flow-without-.patch new file mode 100644 index 0000000..416869d --- /dev/null +++ b/SOURCES/0156-netdrv-net-mlx5e-Fix-crash-in-recovery-flow-without-.patch @@ -0,0 +1,72 @@ +From f25db32978b9c901d1e89325b0fcf76e663a8baa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:23 -0400 +Subject: [PATCH 156/312] [netdrv] net/mlx5e: Fix crash in recovery flow + without devlink reporter + +Message-id: <20200512105530.4207-58-ahleihel@redhat.com> +Patchwork-id: 306928 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 057/124] net/mlx5e: Fix crash in recovery flow without devlink reporter +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6-rc3 + +commit 1ad6c43c6a7b8627240c6cc19c69e31fedc596a7 +Author: Aya Levin +Date: Wed Feb 12 15:17:25 2020 +0200 + + net/mlx5e: Fix crash in recovery flow without devlink reporter + + When health reporters are not supported, recovery function is invoked + directly, not via devlink health reporters. + + In this direct flow, the recover function input parameter was passed + incorrectly and is causing a kernel oops. This patch is fixing the input + parameter. + + Following call trace is observed on rx error health reporting. + + Internal error: Oops: 96000007 [#1] PREEMPT SMP + Process kworker/u16:4 (pid: 4584, stack limit = 0x00000000c9e45703) + Call trace: + mlx5e_rx_reporter_err_rq_cqe_recover+0x30/0x164 [mlx5_core] + mlx5e_health_report+0x60/0x6c [mlx5_core] + mlx5e_reporter_rq_cqe_err+0x6c/0x90 [mlx5_core] + mlx5e_rq_err_cqe_work+0x20/0x2c [mlx5_core] + process_one_work+0x168/0x3d0 + worker_thread+0x58/0x3d0 + kthread+0x108/0x134 + + Fixes: c50de4af1d63 ("net/mlx5e: Generalize tx reporter's functionality") + Signed-off-by: Aya Levin + Signed-off-by: Parav Pandit + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/health.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +index 3a975641f902..20b907dc1e29 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +@@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv, + netdev_err(priv->netdev, err_str); + + if (!reporter) +- return err_ctx->recover(&err_ctx->ctx); ++ return err_ctx->recover(err_ctx->ctx); + + return devlink_health_report(reporter, err_str, err_ctx); + } +-- +2.13.6 + diff --git a/SOURCES/0157-netdrv-net-mlx5-DR-Fix-postsend-actions-write-length.patch b/SOURCES/0157-netdrv-net-mlx5-DR-Fix-postsend-actions-write-length.patch new file mode 100644 index 0000000..89d8ec7 --- /dev/null +++ b/SOURCES/0157-netdrv-net-mlx5-DR-Fix-postsend-actions-write-length.patch @@ -0,0 +1,68 @@ +From b6b835feb5f12a271d398d07385a6a7a71f32550 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:26 -0400 +Subject: [PATCH 157/312] [netdrv] net/mlx5: DR, Fix postsend actions write + length + +Message-id: <20200512105530.4207-61-ahleihel@redhat.com> +Patchwork-id: 306933 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 060/124] net/mlx5: DR, Fix postsend actions write length +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.6 + +commit 692b0399a22530b2de8490bea75a7d20d59391d0 +Author: Hamdan Igbaria +Date: Mon Feb 24 14:41:29 2020 +0200 + + net/mlx5: DR, Fix postsend actions write length + + Fix the send info write length to be (actions x action) size in bytes. + + Fixes: 297cccebdc5a ("net/mlx5: DR, Expose an internal API to issue RDMA operations") + Signed-off-by: Hamdan Igbaria + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c | 1 - + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c | 3 ++- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +index 6dec2a550a10..2d93228ff633 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +@@ -933,7 +933,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn, + + action->rewrite.data = (void *)ops; + action->rewrite.num_of_actions = i; +- action->rewrite.chunk->byte_size = i * sizeof(*ops); + + ret = mlx5dr_send_postsend_action(dmn, action); + if (ret) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +index c7f10d4f8f8d..095ec7b1399d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +@@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn, + int ret; + + send_info.write.addr = (uintptr_t)action->rewrite.data; +- send_info.write.length = action->rewrite.chunk->byte_size; ++ send_info.write.length = action->rewrite.num_of_actions * ++ DR_MODIFY_ACTION_SIZE; + send_info.write.lkey = 0; + send_info.remote_addr = action->rewrite.chunk->mr_addr; + send_info.rkey = action->rewrite.chunk->rkey; +-- +2.13.6 + diff --git a/SOURCES/0158-netdrv-net-mlx5e-kTLS-Fix-TCP-seq-off-by-1-issue-in-.patch b/SOURCES/0158-netdrv-net-mlx5e-kTLS-Fix-TCP-seq-off-by-1-issue-in-.patch new file mode 100644 index 0000000..eb76b0e --- /dev/null +++ b/SOURCES/0158-netdrv-net-mlx5e-kTLS-Fix-TCP-seq-off-by-1-issue-in-.patch @@ -0,0 +1,59 @@ +From 0d3c68c01dee6407d29e6c93bf5ecefef6ba1922 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:27 -0400 +Subject: [PATCH 158/312] [netdrv] net/mlx5e: kTLS, Fix TCP seq off-by-1 issue + in TX resync flow + +Message-id: <20200512105530.4207-62-ahleihel@redhat.com> +Patchwork-id: 306931 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 061/124] net/mlx5e: kTLS, Fix TCP seq off-by-1 issue in TX resync flow +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit 56917766def72f5afdf4235adb91b6897ff26d9d +Author: Tariq Toukan +Date: Thu Feb 20 13:40:24 2020 +0200 + + net/mlx5e: kTLS, Fix TCP seq off-by-1 issue in TX resync flow + + We have an off-by-1 issue in the TCP seq comparison. + The last sequence number that belongs to the TCP packet's payload + is not "start_seq + len", but one byte before it. + Fix it so the 'ends_before' is evaluated properly. + + This fixes a bug that results in error completions in the + kTLS HW offload flows. + + Fixes: ffbd9ca94e2e ("net/mlx5e: kTLS, Fix corner-case checks in TX resync flow") + Signed-off-by: Tariq Toukan + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index f260dd96873b..52a56622034a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, + * this packet was already acknowledged and its record info + * was released. + */ +- ends_before = before(tcp_seq + datalen, tls_record_start_seq(record)); ++ ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record)); + + if (unlikely(tls_record_is_start_marker(record))) { + ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; +-- +2.13.6 + diff --git a/SOURCES/0159-netdrv-net-mlx5e-kTLS-Fix-wrong-value-in-record-trac.patch b/SOURCES/0159-netdrv-net-mlx5e-kTLS-Fix-wrong-value-in-record-trac.patch new file mode 100644 index 0000000..1fc5827 --- /dev/null +++ b/SOURCES/0159-netdrv-net-mlx5e-kTLS-Fix-wrong-value-in-record-trac.patch @@ -0,0 +1,56 @@ +From e5f6c16cccd60aaed2a00bf07ae5a142816c05cf Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:28 -0400 +Subject: [PATCH 159/312] [netdrv] net/mlx5e: kTLS, Fix wrong value in record + tracker enum + +Message-id: <20200512105530.4207-63-ahleihel@redhat.com> +Patchwork-id: 306934 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 062/124] net/mlx5e: kTLS, Fix wrong value in record tracker enum +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit f28ca65efa87b3fb8da3d69ca7cb1ebc0448de66 +Author: Tariq Toukan +Date: Mon Feb 24 13:56:53 2020 +0200 + + net/mlx5e: kTLS, Fix wrong value in record tracker enum + + Fix to match the HW spec: TRACKING state is 1, SEARCHING is 2. + No real issue for now, as these values are not currently used. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +index a3efa29a4629..63116be6b1d6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +@@ -38,8 +38,8 @@ enum { + + enum { + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0, +- MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1, +- MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2, ++ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1, ++ MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2, + }; + + struct mlx5e_ktls_offload_context_tx { +-- +2.13.6 + diff --git a/SOURCES/0160-netdrv-net-mlx5e-Fix-endianness-handling-in-pedit-ma.patch b/SOURCES/0160-netdrv-net-mlx5e-Fix-endianness-handling-in-pedit-ma.patch new file mode 100644 index 0000000..36cf299 --- /dev/null +++ b/SOURCES/0160-netdrv-net-mlx5e-Fix-endianness-handling-in-pedit-ma.patch @@ -0,0 +1,60 @@ +From e1ab0b9cf6e66899078f6c7b44a32c3322f0b4b4 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:29 -0400 +Subject: [PATCH 160/312] [netdrv] net/mlx5e: Fix endianness handling in pedit + mask + +Message-id: <20200512105530.4207-64-ahleihel@redhat.com> +Patchwork-id: 306935 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 063/124] net/mlx5e: Fix endianness handling in pedit mask +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit 404402abd5f90aa90a134eb9604b1750c1941529 +Author: Sebastian Hense +Date: Thu Feb 20 08:11:36 2020 +0100 + + net/mlx5e: Fix endianness handling in pedit mask + + The mask value is provided as 64 bit and has to be casted in + either 32 or 16 bit. On big endian systems the wrong half was + casted which resulted in an all zero mask. + + Fixes: 2b64beba0251 ("net/mlx5e: Support header re-write of partial fields in TC pedit offload") + Signed-off-by: Sebastian Hense + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 5f56830ab709..2c89f1251354 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2463,10 +2463,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + continue; + + if (f->field_bsize == 32) { +- mask_be32 = *(__be32 *)&mask; ++ mask_be32 = (__be32)mask; + mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); + } else if (f->field_bsize == 16) { +- mask_be16 = *(__be16 *)&mask; ++ mask_be32 = (__be32)mask; ++ mask_be16 = *(__be16 *)&mask_be32; + mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); + } + +-- +2.13.6 + diff --git a/SOURCES/0161-netdrv-net-mlx5-Clear-LAG-notifier-pointer-after-unr.patch b/SOURCES/0161-netdrv-net-mlx5-Clear-LAG-notifier-pointer-after-unr.patch new file mode 100644 index 0000000..9cf1e84 --- /dev/null +++ b/SOURCES/0161-netdrv-net-mlx5-Clear-LAG-notifier-pointer-after-unr.patch @@ -0,0 +1,59 @@ +From c8e8db53ff86aa1a728b4e39136722e54acd86d0 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:30 -0400 +Subject: [PATCH 161/312] [netdrv] net/mlx5: Clear LAG notifier pointer after + unregister + +Message-id: <20200512105530.4207-65-ahleihel@redhat.com> +Patchwork-id: 306936 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 064/124] net/mlx5: Clear LAG notifier pointer after unregister +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit 0b136454741b2f6cb18d55e355d396db9248b2ab +Author: Eli Cohen +Date: Wed Feb 19 09:03:28 2020 +0200 + + net/mlx5: Clear LAG notifier pointer after unregister + + After returning from unregister_netdevice_notifier_dev_net(), set the + notifier_call field to NULL so successive call to mlx5_lag_add() will + function as expected. + + Fixes: 7907f23adc18 ("net/mlx5: Implement RoCE LAG feature") + Signed-off-by: Eli Cohen + Reviewed-by: Vlad Buslov + Reviewed-by: Raed Salem + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/lag.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c +index fc0d9583475d..79b1bfd2b592 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c +@@ -618,8 +618,10 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) + break; + + if (i == MLX5_MAX_PORTS) { +- if (ldev->nb.notifier_call) ++ if (ldev->nb.notifier_call) { + unregister_netdevice_notifier(&ldev->nb); ++ ldev->nb.notifier_call = NULL; ++ } + mlx5_lag_mp_cleanup(ldev); + cancel_delayed_work_sync(&ldev->bond_work); + mlx5_lag_dev_free(ldev); +-- +2.13.6 + diff --git a/SOURCES/0162-netdrv-net-mlx5_core-Set-IB-capability-mask1-to-fix-.patch b/SOURCES/0162-netdrv-net-mlx5_core-Set-IB-capability-mask1-to-fix-.patch new file mode 100644 index 0000000..2e145e1 --- /dev/null +++ b/SOURCES/0162-netdrv-net-mlx5_core-Set-IB-capability-mask1-to-fix-.patch @@ -0,0 +1,56 @@ +From 923f2933d2da0e93d9a92633806fd953b19992d3 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:32 -0400 +Subject: [PATCH 162/312] [netdrv] net/mlx5_core: Set IB capability mask1 to + fix ib_srpt connection failure + +Message-id: <20200512105530.4207-67-ahleihel@redhat.com> +Patchwork-id: 306938 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 066/124] net/mlx5_core: Set IB capability mask1 to fix ib_srpt connection failure +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit 306f354c67397b3138300cde875c5cab45b857f7 +Author: Leon Romanovsky +Date: Mon Mar 16 09:31:03 2020 +0200 + + net/mlx5_core: Set IB capability mask1 to fix ib_srpt connection failure + + The cap_mask1 isn't protected by field_select and not listed among RW + fields, but it is required to be written to properly initialize ports + in IB virtualization mode. + + Link: https://lore.kernel.org/linux-rdma/88bab94d2fd72f3145835b4518bc63dda587add6.camel@redhat.com + Fixes: ab118da4c10a ("net/mlx5: Don't write read-only fields in MODIFY_HCA_VPORT_CONTEXT command") + Signed-off-by: Leon Romanovsky + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/vport.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c +index 1faac31f74d0..23f879da9104 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c +@@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, + MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid); + if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID) + MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid); ++ MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1); ++ MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, ++ req->cap_mask1_perm); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); + ex: + kfree(in); +-- +2.13.6 + diff --git a/SOURCES/0163-netdrv-net-mlx5e-Enhance-ICOSQ-WQE-info-fields.patch b/SOURCES/0163-netdrv-net-mlx5e-Enhance-ICOSQ-WQE-info-fields.patch new file mode 100644 index 0000000..b1529f0 --- /dev/null +++ b/SOURCES/0163-netdrv-net-mlx5e-Enhance-ICOSQ-WQE-info-fields.patch @@ -0,0 +1,119 @@ +From 140d271b80ed5361f6acf5cdbe265f215156868d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:33 -0400 +Subject: [PATCH 163/312] [netdrv] net/mlx5e: Enhance ICOSQ WQE info fields + +Message-id: <20200512105530.4207-68-ahleihel@redhat.com> +Patchwork-id: 306939 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 067/124] net/mlx5e: Enhance ICOSQ WQE info fields +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit 1de0306c3a05d305e45b1f1fabe2f4e94222eb6b +Author: Aya Levin +Date: Mon Mar 9 09:44:18 2020 +0200 + + net/mlx5e: Enhance ICOSQ WQE info fields + + Add number of WQEBBs (WQE's Basic Block) to WQE info struct. Set the + number of WQEBBs on WQE post, and increment the consumer counter (cc) + on completion. + + In case of error completions, the cc was mistakenly not incremented, + keeping a gap between cc and pc (producer counter). This failed the + recovery flow on the ICOSQ from a CQE error which timed-out waiting for + the cc and pc to meet. + + Fixes: be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ") + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 +++++------ + drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 1 + + 3 files changed, 7 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index bd34b1851162..44f35adbf775 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -370,6 +370,7 @@ enum { + + struct mlx5e_sq_wqe_info { + u8 opcode; ++ u8 num_wqebbs; + + /* Auxiliary data for different opcodes. */ + union { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index f0170fb2edbc..1f42e88f4ec4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -478,6 +478,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->opcode = MLX5_OPCODE_NOP; ++ wi->num_wqebbs = 1; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + } +@@ -526,6 +527,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) + umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); + + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; ++ sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS; + sq->db.ico_wqe[pi].umr.rq = rq; + sq->pc += MLX5E_UMR_WQEBBS; + +@@ -622,6 +624,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.ico_wqe[ci]; ++ sqcc += wi->num_wqebbs; + + if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { + netdev_WARN_ONCE(cq->channel->netdev, +@@ -632,16 +635,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + break; + } + +- if (likely(wi->opcode == MLX5_OPCODE_UMR)) { +- sqcc += MLX5E_UMR_WQEBBS; ++ if (likely(wi->opcode == MLX5_OPCODE_UMR)) + wi->umr.rq->mpwqe.umr_completed++; +- } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) { +- sqcc++; +- } else { ++ else if (unlikely(wi->opcode != MLX5_OPCODE_NOP)) + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OPCODE in ICOSQ WQE info: 0x%x\n", + wi->opcode); +- } + + } while (!last_wqe); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +index 6d16dee38ede..a2daa3dfe15a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +@@ -77,6 +77,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq) + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; ++ sq->db.ico_wqe[pi].num_wqebbs = 1; + nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); + } +-- +2.13.6 + diff --git a/SOURCES/0164-netdrv-net-mlx5e-Fix-missing-reset-of-SW-metadata-in.patch b/SOURCES/0164-netdrv-net-mlx5e-Fix-missing-reset-of-SW-metadata-in.patch new file mode 100644 index 0000000..07dbd0f --- /dev/null +++ b/SOURCES/0164-netdrv-net-mlx5e-Fix-missing-reset-of-SW-metadata-in.patch @@ -0,0 +1,63 @@ +From b994a9349762bcaad79091144ec96ce7a4b8d5a7 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:34 -0400 +Subject: [PATCH 164/312] [netdrv] net/mlx5e: Fix missing reset of SW metadata + in Striding RQ reset + +Message-id: <20200512105530.4207-69-ahleihel@redhat.com> +Patchwork-id: 306940 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 068/124] net/mlx5e: Fix missing reset of SW metadata in Striding RQ reset +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit 39369fd536d485a99a59d8e357c0d4d3ce19a3b8 +Author: Aya Levin +Date: Thu Mar 12 12:35:32 2020 +0200 + + net/mlx5e: Fix missing reset of SW metadata in Striding RQ reset + + When resetting the RQ (moving RQ state from RST to RDY), the driver + resets the WQ's SW metadata. + In striding RQ mode, we maintain a field that reflects the actual + expected WQ head (including in progress WQEs posted to the ICOSQ). + It was mistakenly not reset together with the WQ. Fix this here. + + Fixes: 8276ea1353a4 ("net/mlx5e: Report and recover from CQE with error on RQ") + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +index a226277b0980..f07b1399744e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +@@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) + + static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) + { +- if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) ++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + mlx5_wq_ll_reset(&rq->mpwqe.wq); +- else ++ rq->mpwqe.actual_wq_head = 0; ++ } else { + mlx5_wq_cyc_reset(&rq->wqe.wq); ++ } + } + + /* SW parser related functions */ +-- +2.13.6 + diff --git a/SOURCES/0165-netdrv-net-mlx5e-Fix-ICOSQ-recovery-flow-with-Stridi.patch b/SOURCES/0165-netdrv-net-mlx5e-Fix-ICOSQ-recovery-flow-with-Stridi.patch new file mode 100644 index 0000000..b836ad6 --- /dev/null +++ b/SOURCES/0165-netdrv-net-mlx5e-Fix-ICOSQ-recovery-flow-with-Stridi.patch @@ -0,0 +1,131 @@ +From 61d364158c814d57a209665b8dcabf3e0babee89 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:35 -0400 +Subject: [PATCH 165/312] [netdrv] net/mlx5e: Fix ICOSQ recovery flow with + Striding RQ + +Message-id: <20200512105530.4207-70-ahleihel@redhat.com> +Patchwork-id: 306941 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 069/124] net/mlx5e: Fix ICOSQ recovery flow with Striding RQ +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit e239c6d686e1c37fb2ab143162dfb57471a8643f +Author: Aya Levin +Date: Mon Mar 16 16:53:10 2020 +0200 + + net/mlx5e: Fix ICOSQ recovery flow with Striding RQ + + In striding RQ mode, the buffers of an RX WQE are first + prepared and posted to the HW using a UMR WQEs via the ICOSQ. + We maintain the state of these in-progress WQEs in the RQ + SW struct. + + In the flow of ICOSQ recovery, the corresponding RQ is not + in error state, hence: + + - The buffers of the in-progress WQEs must be released + and the RQ metadata should reflect it. + - Existing RX WQEs in the RQ should not be affected. + + For this, wrap the dealloc of the in-progress WQEs in + a function, and use it in the ICOSQ recovery flow + instead of mlx5e_free_rx_descs(). + + Fixes: be5323c8379f ("net/mlx5e: Report and recover from CQE error on ICOSQ") + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + + .../ethernet/mellanox/mlx5/core/en/reporter_rx.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 31 +++++++++++++++++----- + 3 files changed, 26 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 44f35adbf775..f0f33971be6c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1048,6 +1048,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); + void mlx5e_activate_rq(struct mlx5e_rq *rq); + void mlx5e_deactivate_rq(struct mlx5e_rq *rq); + void mlx5e_free_rx_descs(struct mlx5e_rq *rq); ++void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); + void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); + void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +index 6c72b592315b..a01e2de2488f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +@@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) + goto out; + + mlx5e_reset_icosq_cc_pc(icosq); +- mlx5e_free_rx_descs(rq); ++ mlx5e_free_rx_in_progress_descs(rq); + clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state); + mlx5e_activate_icosq(icosq); + mlx5e_activate_rq(rq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index c9b9c6cb1677..2f87b0b4660f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -814,6 +814,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) + return -ETIMEDOUT; + } + ++void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) ++{ ++ struct mlx5_wq_ll *wq; ++ u16 head; ++ int i; ++ ++ if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) ++ return; ++ ++ wq = &rq->mpwqe.wq; ++ head = wq->head; ++ ++ /* Outstanding UMR WQEs (in progress) start at wq->head */ ++ for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { ++ rq->dealloc_wqe(rq, head); ++ head = mlx5_wq_ll_get_wqe_next_ix(wq, head); ++ } ++ ++ rq->mpwqe.actual_wq_head = wq->head; ++ rq->mpwqe.umr_in_progress = 0; ++ rq->mpwqe.umr_completed = 0; ++} ++ + void mlx5e_free_rx_descs(struct mlx5e_rq *rq) + { + __be16 wqe_ix_be; +@@ -821,14 +844,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) + + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; +- u16 head = wq->head; +- int i; + +- /* Outstanding UMR WQEs (in progress) start at wq->head */ +- for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { +- rq->dealloc_wqe(rq, head); +- head = mlx5_wq_ll_get_wqe_next_ix(wq, head); +- } ++ mlx5e_free_rx_in_progress_descs(rq); + + while (!mlx5_wq_ll_is_empty(wq)) { + struct mlx5e_rx_wqe_ll *wqe; +-- +2.13.6 + diff --git a/SOURCES/0166-netdrv-net-mlx5e-Do-not-recover-from-a-non-fatal-syn.patch b/SOURCES/0166-netdrv-net-mlx5e-Do-not-recover-from-a-non-fatal-syn.patch new file mode 100644 index 0000000..019da51 --- /dev/null +++ b/SOURCES/0166-netdrv-net-mlx5e-Do-not-recover-from-a-non-fatal-syn.patch @@ -0,0 +1,58 @@ +From fb32c890e6cf38a3f6f2b848154c88049e42f051 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:36 -0400 +Subject: [PATCH 166/312] [netdrv] net/mlx5e: Do not recover from a non-fatal + syndrome + +Message-id: <20200512105530.4207-71-ahleihel@redhat.com> +Patchwork-id: 306943 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 070/124] net/mlx5e: Do not recover from a non-fatal syndrome +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.6 + +commit 187a9830c921d92c4a9a8e2921ecc4b35a97532c +Author: Aya Levin +Date: Thu Mar 19 13:25:17 2020 +0200 + + net/mlx5e: Do not recover from a non-fatal syndrome + + For non-fatal syndromes like LOCAL_LENGTH_ERR, recovery shouldn't be + triggered. In these scenarios, the RQ is not actually in ERR state. + This misleads the recovery flow which assumes that the RQ is really in + error state and no more completions arrive, causing crashes on bad page + state. + + Fixes: 8276ea1353a4 ("net/mlx5e: Report and recover from CQE with error on RQ") + Signed-off-by: Aya Levin + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/health.h | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +index d3693fa547ac..e54f70d9af22 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +@@ -10,8 +10,7 @@ + + static inline bool cqe_syndrome_needs_recover(u8 syndrome) + { +- return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR || +- syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || ++ return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || + syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR || + syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR; + } +-- +2.13.6 + diff --git a/SOURCES/0167-netdrv-net-mlx5e-Define-one-flow-for-TXQ-selection-w.patch b/SOURCES/0167-netdrv-net-mlx5e-Define-one-flow-for-TXQ-selection-w.patch new file mode 100644 index 0000000..aee28c8 --- /dev/null +++ b/SOURCES/0167-netdrv-net-mlx5e-Define-one-flow-for-TXQ-selection-w.patch @@ -0,0 +1,84 @@ +From 094a7b6bbcdb5b5ca180f1e70e7375a37d66cdb5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:39 -0400 +Subject: [PATCH 167/312] [netdrv] net/mlx5e: Define one flow for TXQ selection + when TCs are configured + +Message-id: <20200512105530.4207-74-ahleihel@redhat.com> +Patchwork-id: 306946 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 073/124] net/mlx5e: Define one flow for TXQ selection when TCs are configured +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en_tx.c + Context diff due to missing commit: + a350eccee583 ("net: remove 'fallback' argument from dev->ndo_select_queue()") + ---> We still use fallback instead of netdev_pick_tx. + +commit 4229e0ea2c9936b3093990353b211bcd7802a2d5 +Author: Eran Ben Elisha +Date: Sun Dec 8 14:29:45 2019 +0200 + + net/mlx5e: Define one flow for TXQ selection when TCs are configured + + We shall always extract channel index out of the txq, regardless + of the relation between txq_ix and num channels. The extraction is + always valid, as if txq is smaller than number of channels, + txq_ix == priv->txq2sq[txq_ix]->ch_ix. + + By doing so, we can remove an if clause from the select queue method, + and have one flow for all packets. + + Signed-off-by: Eran Ben Elisha + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index 3bfeb7c06b25..bb73d9ea131e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -73,8 +73,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + { + struct mlx5e_priv *priv = netdev_priv(dev); + int txq_ix = fallback(dev, skb, NULL); +- u16 num_channels; + int up = 0; ++ int ch_ix; + + if (!netdev_get_num_tc(dev)) + return txq_ix; +@@ -87,14 +87,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + if (skb_vlan_tag_present(skb)) + up = skb_vlan_tag_get_prio(skb); + +- /* txq_ix can be larger than num_channels since +- * dev->num_real_tx_queues = num_channels * num_tc ++ /* Normalize any picked txq_ix to [0, num_channels), ++ * So we can return a txq_ix that matches the channel and ++ * packet UP. + */ +- num_channels = priv->channels.params.num_channels; +- if (txq_ix >= num_channels) +- txq_ix = priv->txq2sq[txq_ix]->ch_ix; ++ ch_ix = priv->txq2sq[txq_ix]->ch_ix; + +- return priv->channel_tc2realtxq[txq_ix][up]; ++ return priv->channel_tc2realtxq[ch_ix][up]; + } + + static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) +-- +2.13.6 + diff --git a/SOURCES/0168-netdrv-net-mlx5e-Add-missing-LRO-cap-check.patch b/SOURCES/0168-netdrv-net-mlx5e-Add-missing-LRO-cap-check.patch new file mode 100644 index 0000000..bb4be84 --- /dev/null +++ b/SOURCES/0168-netdrv-net-mlx5e-Add-missing-LRO-cap-check.patch @@ -0,0 +1,55 @@ +From 43b18ac8b9d4ab89a3e278f05f0a36a13f2582d8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:40 -0400 +Subject: [PATCH 168/312] [netdrv] net/mlx5e: Add missing LRO cap check + +Message-id: <20200512105530.4207-75-ahleihel@redhat.com> +Patchwork-id: 306945 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 074/124] net/mlx5e: Add missing LRO cap check +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 02377e6edf135289ebdf6ff4b40a422db4b780ff +Author: Tariq Toukan +Date: Thu Jan 2 16:17:41 2020 +0200 + + net/mlx5e: Add missing LRO cap check + + The LRO boolean state in params->lro_en must not be set in case + the NIC is not capable. + Enforce this check and remove the TODO comment. + + Signed-off-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 2f87b0b4660f..be3b5f911358 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4794,9 +4794,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, + mlx5e_build_rq_params(mdev, params); + + /* HW LRO */ +- +- /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ +- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { ++ if (MLX5_CAP_ETH(mdev, lro_cap) && ++ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + /* No XSK params: checking the availability of striding RQ in general. */ + if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) + params->lro_en = !slow_pci_heuristic(mdev); +-- +2.13.6 + diff --git a/SOURCES/0169-netdrv-net-mlx5e-Encapsulate-updating-netdev-queues-.patch b/SOURCES/0169-netdrv-net-mlx5e-Encapsulate-updating-netdev-queues-.patch new file mode 100644 index 0000000..c263e3f --- /dev/null +++ b/SOURCES/0169-netdrv-net-mlx5e-Encapsulate-updating-netdev-queues-.patch @@ -0,0 +1,79 @@ +From 76873cb29b40ccf7c938297b7f007f609dfea0a6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:41 -0400 +Subject: [PATCH 169/312] [netdrv] net/mlx5e: Encapsulate updating netdev + queues into a function + +Message-id: <20200512105530.4207-76-ahleihel@redhat.com> +Patchwork-id: 306947 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 075/124] net/mlx5e: Encapsulate updating netdev queues into a function +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit c2c95271f9f39ea9b34db2301b3b6c5105cdb447 +Author: Maxim Mikityanskiy +Date: Tue Sep 3 17:38:43 2019 +0300 + + net/mlx5e: Encapsulate updating netdev queues into a function + + As a preparation for one of the following commits, create a function to + encapsulate the code that notifies the kernel about the new amount of + RX and TX queues. The code will be called multiple times in the next + commit. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index be3b5f911358..eed6e024675e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2887,6 +2887,17 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) + netdev_set_tc_queue(netdev, tc, nch, 0); + } + ++static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv) ++{ ++ int num_txqs = priv->channels.num * priv->channels.params.num_tc; ++ int num_rxqs = priv->channels.num * priv->profile->rq_groups; ++ struct net_device *netdev = priv->netdev; ++ ++ mlx5e_netdev_set_tcs(netdev); ++ netif_set_real_num_tx_queues(netdev, num_txqs); ++ netif_set_real_num_rx_queues(netdev, num_rxqs); ++} ++ + static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) + { + int i, ch; +@@ -2908,13 +2919,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) + + void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) + { +- int num_txqs = priv->channels.num * priv->channels.params.num_tc; +- int num_rxqs = priv->channels.num * priv->profile->rq_groups; +- struct net_device *netdev = priv->netdev; +- +- mlx5e_netdev_set_tcs(netdev); +- netif_set_real_num_tx_queues(netdev, num_txqs); +- netif_set_real_num_rx_queues(netdev, num_rxqs); ++ mlx5e_update_netdev_queues(priv); + + mlx5e_build_txq_maps(priv); + mlx5e_activate_channels(&priv->channels); +-- +2.13.6 + diff --git a/SOURCES/0170-netdrv-net-mlx5e-Rename-hw_modify-to-preactivate.patch b/SOURCES/0170-netdrv-net-mlx5e-Rename-hw_modify-to-preactivate.patch new file mode 100644 index 0000000..c2f4cc8 --- /dev/null +++ b/SOURCES/0170-netdrv-net-mlx5e-Rename-hw_modify-to-preactivate.patch @@ -0,0 +1,116 @@ +From b4469ee9c5ec4d217e295427ccda298052e20526 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:42 -0400 +Subject: [PATCH 170/312] [netdrv] net/mlx5e: Rename hw_modify to preactivate + +Message-id: <20200512105530.4207-77-ahleihel@redhat.com> +Patchwork-id: 306948 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 076/124] net/mlx5e: Rename hw_modify to preactivate +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit dca147b3dce5abb5284ff747211960fd2db5ec2e +Author: Maxim Mikityanskiy +Date: Thu Oct 31 09:39:34 2019 +0200 + + net/mlx5e: Rename hw_modify to preactivate + + mlx5e_safe_switch_channels accepts a callback to be called before + activating new channels. It is intended to configure some hardware + parameters in cases where channels are recreated because some + configuration has changed. + + Recently, this callback has started being used to update the driver's + internal MLX5E_STATE_XDP_OPEN flag, and the following patches also + intend to use this callback for software preparations. This patch + renames the hw_modify callback to preactivate, so that the name fits + better. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 +++--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 14 ++++++++------ + 2 files changed, 11 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index f0f33971be6c..99c7a8047b26 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1024,14 +1024,14 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *chs); + void mlx5e_close_channels(struct mlx5e_channels *chs); + +-/* Function pointer to be used to modify WH settings while ++/* Function pointer to be used to modify HW or kernel settings while + * switching channels + */ +-typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); ++typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv); + int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); + int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, +- mlx5e_fp_hw_modify hw_modify); ++ mlx5e_fp_preactivate preactivate); + void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); + void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index eed6e024675e..70bcd75e241e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2955,7 +2955,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) + + static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, +- mlx5e_fp_hw_modify hw_modify) ++ mlx5e_fp_preactivate preactivate) + { + struct net_device *netdev = priv->netdev; + int new_num_txqs; +@@ -2974,9 +2974,11 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + + priv->channels = *new_chs; + +- /* New channels are ready to roll, modify HW settings if needed */ +- if (hw_modify) +- hw_modify(priv); ++ /* New channels are ready to roll, call the preactivate hook if needed ++ * to modify HW settings or update kernel parameters. ++ */ ++ if (preactivate) ++ preactivate(priv); + + priv->profile->update_rx(priv); + mlx5e_activate_priv_channels(priv); +@@ -2988,7 +2990,7 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + + int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, +- mlx5e_fp_hw_modify hw_modify) ++ mlx5e_fp_preactivate preactivate) + { + int err; + +@@ -2996,7 +2998,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + if (err) + return err; + +- mlx5e_switch_priv_channels(priv, new_chs, hw_modify); ++ mlx5e_switch_priv_channels(priv, new_chs, preactivate); + return 0; + } + +-- +2.13.6 + diff --git a/SOURCES/0171-netdrv-net-mlx5e-Use-preactivate-hook-to-set-the-ind.patch b/SOURCES/0171-netdrv-net-mlx5e-Use-preactivate-hook-to-set-the-ind.patch new file mode 100644 index 0000000..4029cbf --- /dev/null +++ b/SOURCES/0171-netdrv-net-mlx5e-Use-preactivate-hook-to-set-the-ind.patch @@ -0,0 +1,127 @@ +From c7e15c25bf93c8163905184c17829ac840677ab6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:43 -0400 +Subject: [PATCH 171/312] [netdrv] net/mlx5e: Use preactivate hook to set the + indirection table + +Message-id: <20200512105530.4207-78-ahleihel@redhat.com> +Patchwork-id: 306949 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 077/124] net/mlx5e: Use preactivate hook to set the indirection table +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit fe867cac9e1967c553e4ac2aece5fc8675258010 +Author: Maxim Mikityanskiy +Date: Mon Nov 4 12:02:14 2019 +0200 + + net/mlx5e: Use preactivate hook to set the indirection table + + mlx5e_ethtool_set_channels updates the indirection table before + switching to the new channels. If the switch fails, the indirection + table is new, but the channels are old, which is wrong. Fix it by using + the preactivate hook of mlx5e_safe_switch_channels to update the + indirection table at the stage when nothing can fail anymore. + + As the code that updates the indirection table is now encapsulated into + a new function, use that function in the attach flow when the driver has + to reduce the number of channels, and prepare the code for the next + commit. + + Fixes: 85082dba0a ("net/mlx5e: Correctly handle RSS indirection table when changing number of channels") + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + + drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 10 ++-------- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 16 ++++++++++++++-- + 3 files changed, 17 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 99c7a8047b26..cef4c1cba2b8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1032,6 +1032,7 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); + int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_preactivate preactivate); ++int mlx5e_num_channels_changed(struct mlx5e_priv *priv); + void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); + void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 70ca9bf51cdd..e7043f68a38f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -432,9 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + *cur_params = new_channels.params; +- if (!netif_is_rxfh_configured(priv->netdev)) +- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, +- MLX5E_INDIR_RQT_SIZE, count); ++ mlx5e_num_channels_changed(priv); + goto out; + } + +@@ -442,12 +440,8 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, + if (arfs_enabled) + mlx5e_arfs_disable(priv); + +- if (!netif_is_rxfh_configured(priv->netdev)) +- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, +- MLX5E_INDIR_RQT_SIZE, count); +- + /* Switch to new channels, set new parameters and close old ones */ +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_num_channels_changed); + + if (arfs_enabled) { + int err2 = mlx5e_arfs_enable(priv); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 70bcd75e241e..1d72ee543447 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2898,6 +2898,17 @@ static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv) + netif_set_real_num_rx_queues(netdev, num_rxqs); + } + ++int mlx5e_num_channels_changed(struct mlx5e_priv *priv) ++{ ++ u16 count = priv->channels.params.num_channels; ++ ++ if (!netif_is_rxfh_configured(priv->netdev)) ++ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, ++ MLX5E_INDIR_RQT_SIZE, count); ++ ++ return 0; ++} ++ + static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) + { + int i, ch; +@@ -5309,9 +5320,10 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) + max_nch = mlx5e_get_max_num_channels(priv->mdev); + if (priv->channels.params.num_channels > max_nch) { + mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); ++ /* Reducing the number of channels - RXFH has to be reset. */ ++ priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; + priv->channels.params.num_channels = max_nch; +- mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, +- MLX5E_INDIR_RQT_SIZE, max_nch); ++ mlx5e_num_channels_changed(priv); + } + + err = profile->init_tx(priv); +-- +2.13.6 + diff --git a/SOURCES/0172-netdrv-net-mlx5e-Fix-configuration-of-XPS-cpumasks-a.patch b/SOURCES/0172-netdrv-net-mlx5e-Fix-configuration-of-XPS-cpumasks-a.patch new file mode 100644 index 0000000..12bb9ad --- /dev/null +++ b/SOURCES/0172-netdrv-net-mlx5e-Fix-configuration-of-XPS-cpumasks-a.patch @@ -0,0 +1,316 @@ +From 7a413876c0e669357b19e05c5e4a9e392b927d3f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:44 -0400 +Subject: [PATCH 172/312] [netdrv] net/mlx5e: Fix configuration of XPS cpumasks + and netdev queues in corner cases + +Message-id: <20200512105530.4207-79-ahleihel@redhat.com> +Patchwork-id: 306950 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 078/124] net/mlx5e: Fix configuration of XPS cpumasks and netdev queues in corner cases +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 3909a12e79135a66a797041ab337a8c7cb387bdf +Author: Maxim Mikityanskiy +Date: Tue Sep 3 17:55:45 2019 +0300 + + net/mlx5e: Fix configuration of XPS cpumasks and netdev queues in corner cases + + Currently, mlx5e notifies the kernel about the number of queues and sets + the default XPS cpumasks when channels are activated. This + implementation has several corner cases, in which the kernel may not be + updated on time, or XPS cpumasks may be reset when not directly touched + by the user. + + This commit fixes these corner cases to match the following expected + behavior: + + 1. The number of queues always corresponds to the number of channels + configured. + + 2. XPS cpumasks are set to driver's defaults on netdev attach. + + 3. XPS cpumasks set by user are not reset, unless the number of channels + changes. If the number of channels changes, they are reset to driver's + defaults. (In general case, when the number of channels increases or + decreases, it's not possible to guess how to convert the current XPS + cpumasks to work with the new number of channels, so we let the user + reconfigure it if they change the number of channels.) + + XPS cpumasks are no longer stored per channel. Only one temporary + cpumask is used. The old stored cpumasks didn't reflect the user's + changes and were not used after applying them. + + A scratchpad area is added to struct mlx5e_priv. As cpumask_var_t + requires allocation, and the preactivate hook can't fail, we need to + preallocate the temporary cpumask in advance. It's stored in the + scratchpad. + + Fixes: 149e566fef81 ("net/mlx5e: Expand XPS cpumask to cover all online cpus") + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 11 ++- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 95 +++++++++++++---------- + 2 files changed, 65 insertions(+), 41 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index cef4c1cba2b8..b90225f62234 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -737,7 +737,6 @@ struct mlx5e_channel { + DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); + int ix; + int cpu; +- cpumask_var_t xps_cpumask; + }; + + struct mlx5e_channels { +@@ -804,6 +803,15 @@ struct mlx5e_xsk { + bool ever_used; + }; + ++/* Temporary storage for variables that are allocated when struct mlx5e_priv is ++ * initialized, and used where we can't allocate them because that functions ++ * must not fail. Use with care and make sure the same variable is not used ++ * simultaneously by multiple users. ++ */ ++struct mlx5e_scratchpad { ++ cpumask_var_t cpumask; ++}; ++ + struct mlx5e_priv { + /* priv data path fields - start */ + struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; +@@ -864,6 +872,7 @@ struct mlx5e_priv { + struct devlink_health_reporter *tx_reporter; + struct devlink_health_reporter *rx_reporter; + struct mlx5e_xsk xsk; ++ struct mlx5e_scratchpad scratchpad; + }; + + struct mlx5e_profile { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 1d72ee543447..d97c865989e1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -1812,29 +1812,6 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) + return err; + } + +-static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c, +- struct mlx5e_params *params) +-{ +- int num_comp_vectors = mlx5_comp_vectors_count(c->mdev); +- int irq; +- +- if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL)) +- return -ENOMEM; +- +- for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) { +- int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq)); +- +- cpumask_set_cpu(cpu, c->xps_cpumask); +- } +- +- return 0; +-} +- +-static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c) +-{ +- free_cpumask_var(c->xps_cpumask); +-} +- + static int mlx5e_open_queues(struct mlx5e_channel *c, + struct mlx5e_params *params, + struct mlx5e_channel_param *cparam) +@@ -1985,10 +1962,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, + c->irq_desc = irq_to_desc(irq); + c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); + +- err = mlx5e_alloc_xps_cpumask(c, params); +- if (err) +- goto err_free_channel; +- + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); + + err = mlx5e_open_queues(c, params, cparam); +@@ -2011,9 +1984,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, + + err_napi_del: + netif_napi_del(&c->napi); +- mlx5e_free_xps_cpumask(c); + +-err_free_channel: + kvfree(c); + + return err; +@@ -2027,7 +1998,6 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) + mlx5e_activate_txqsq(&c->sq[tc]); + mlx5e_activate_icosq(&c->icosq); + mlx5e_activate_rq(&c->rq); +- netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix); + + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + mlx5e_activate_xsk(c); +@@ -2052,7 +2022,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) + mlx5e_close_xsk(c); + mlx5e_close_queues(c); + netif_napi_del(&c->napi); +- mlx5e_free_xps_cpumask(c); + + kvfree(c); + } +@@ -2887,10 +2856,10 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) + netdev_set_tc_queue(netdev, tc, nch, 0); + } + +-static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv) ++static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count) + { +- int num_txqs = priv->channels.num * priv->channels.params.num_tc; +- int num_rxqs = priv->channels.num * priv->profile->rq_groups; ++ int num_txqs = count * priv->channels.params.num_tc; ++ int num_rxqs = count * priv->profile->rq_groups; + struct net_device *netdev = priv->netdev; + + mlx5e_netdev_set_tcs(netdev); +@@ -2898,10 +2867,34 @@ static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv) + netif_set_real_num_rx_queues(netdev, num_rxqs); + } + ++static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, ++ struct mlx5e_params *params) ++{ ++ struct mlx5_core_dev *mdev = priv->mdev; ++ int num_comp_vectors, ix, irq; ++ ++ num_comp_vectors = mlx5_comp_vectors_count(mdev); ++ ++ for (ix = 0; ix < params->num_channels; ix++) { ++ cpumask_clear(priv->scratchpad.cpumask); ++ ++ for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { ++ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq)); ++ ++ cpumask_set_cpu(cpu, priv->scratchpad.cpumask); ++ } ++ ++ netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix); ++ } ++} ++ + int mlx5e_num_channels_changed(struct mlx5e_priv *priv) + { + u16 count = priv->channels.params.num_channels; + ++ mlx5e_update_netdev_queues(priv, count); ++ mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); ++ + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); +@@ -2930,8 +2923,6 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) + + void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) + { +- mlx5e_update_netdev_queues(priv); +- + mlx5e_build_txq_maps(priv); + mlx5e_activate_channels(&priv->channels); + mlx5e_xdp_tx_enable(priv); +@@ -3468,7 +3459,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, + goto out; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_num_channels_changed); + if (err) + goto out; + +@@ -5252,6 +5243,9 @@ int mlx5e_netdev_init(struct net_device *netdev, + priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); + priv->max_opened_tc = 1; + ++ if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) ++ return -ENOMEM; ++ + mutex_init(&priv->state_lock); + INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); + INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); +@@ -5260,7 +5254,7 @@ int mlx5e_netdev_init(struct net_device *netdev, + + priv->wq = create_singlethread_workqueue("mlx5e"); + if (!priv->wq) +- return -ENOMEM; ++ goto err_free_cpumask; + + /* netdev init */ + netif_carrier_off(netdev); +@@ -5270,11 +5264,17 @@ int mlx5e_netdev_init(struct net_device *netdev, + #endif + + return 0; ++ ++err_free_cpumask: ++ free_cpumask_var(priv->scratchpad.cpumask); ++ ++ return -ENOMEM; + } + + void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv) + { + destroy_workqueue(priv->wq); ++ free_cpumask_var(priv->scratchpad.cpumask); + } + + struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, +@@ -5309,6 +5309,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, + + int mlx5e_attach_netdev(struct mlx5e_priv *priv) + { ++ const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED; + const struct mlx5e_profile *profile; + int max_nch; + int err; +@@ -5320,11 +5321,25 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) + max_nch = mlx5e_get_max_num_channels(priv->mdev); + if (priv->channels.params.num_channels > max_nch) { + mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); +- /* Reducing the number of channels - RXFH has to be reset. */ ++ /* Reducing the number of channels - RXFH has to be reset, and ++ * mlx5e_num_channels_changed below will build the RQT. ++ */ + priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; + priv->channels.params.num_channels = max_nch; +- mlx5e_num_channels_changed(priv); + } ++ /* 1. Set the real number of queues in the kernel the first time. ++ * 2. Set our default XPS cpumask. ++ * 3. Build the RQT. ++ * ++ * rtnl_lock is required by netif_set_real_num_*_queues in case the ++ * netdev has been registered by this point (if this function was called ++ * in the reload or resume flow). ++ */ ++ if (take_rtnl) ++ rtnl_lock(); ++ mlx5e_num_channels_changed(priv); ++ if (take_rtnl) ++ rtnl_unlock(); + + err = profile->init_tx(priv); + if (err) +-- +2.13.6 + diff --git a/SOURCES/0173-netdrv-net-mlx5e-Remove-unneeded-netif_set_real_num_.patch b/SOURCES/0173-netdrv-net-mlx5e-Remove-unneeded-netif_set_real_num_.patch new file mode 100644 index 0000000..24341d1 --- /dev/null +++ b/SOURCES/0173-netdrv-net-mlx5e-Remove-unneeded-netif_set_real_num_.patch @@ -0,0 +1,63 @@ +From 4ffd117a8b63b9aed3e53f28e46d425da21c5270 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:45 -0400 +Subject: [PATCH 173/312] [netdrv] net/mlx5e: Remove unneeded + netif_set_real_num_tx_queues + +Message-id: <20200512105530.4207-80-ahleihel@redhat.com> +Patchwork-id: 306951 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 079/124] net/mlx5e: Remove unneeded netif_set_real_num_tx_queues +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 600a3952a2a6228246fa2acb084f2e4522ca9cb1 +Author: Maxim Mikityanskiy +Date: Mon Nov 25 14:29:46 2019 +0200 + + net/mlx5e: Remove unneeded netif_set_real_num_tx_queues + + The number of queues is now updated by mlx5e_update_netdev_queues in a + centralized way, when no channels are active. Remove an extra occurrence + of netif_set_real_num_tx_queues to prepare it for the next commit. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index d97c865989e1..ae91592165ea 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2960,17 +2960,11 @@ static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + mlx5e_fp_preactivate preactivate) + { + struct net_device *netdev = priv->netdev; +- int new_num_txqs; + int carrier_ok; + +- new_num_txqs = new_chs->num * new_chs->params.num_tc; +- + carrier_ok = netif_carrier_ok(netdev); + netif_carrier_off(netdev); + +- if (new_num_txqs < netdev->real_num_tx_queues) +- netif_set_real_num_tx_queues(netdev, new_num_txqs); +- + mlx5e_deactivate_priv_channels(priv); + mlx5e_close_channels(&priv->channels); + +-- +2.13.6 + diff --git a/SOURCES/0174-netdrv-net-mlx5e-Allow-mlx5e_switch_priv_channels-to.patch b/SOURCES/0174-netdrv-net-mlx5e-Allow-mlx5e_switch_priv_channels-to.patch new file mode 100644 index 0000000..a72b67a --- /dev/null +++ b/SOURCES/0174-netdrv-net-mlx5e-Allow-mlx5e_switch_priv_channels-to.patch @@ -0,0 +1,119 @@ +From a60735875b83703004c3c0cefa1afe5c28bfbb7c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:46 -0400 +Subject: [PATCH 174/312] [netdrv] net/mlx5e: Allow mlx5e_switch_priv_channels + to fail and recover + +Message-id: <20200512105530.4207-81-ahleihel@redhat.com> +Patchwork-id: 306952 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 080/124] net/mlx5e: Allow mlx5e_switch_priv_channels to fail and recover +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 35a78ed4c351319e8840d99ba9032bf2d175e168 +Author: Maxim Mikityanskiy +Date: Wed Nov 13 18:07:29 2019 +0200 + + net/mlx5e: Allow mlx5e_switch_priv_channels to fail and recover + + Currently mlx5e_switch_priv_channels expects that the preactivate hook + doesn't fail, however, it can fail, because it may set hardware + parameters. This commit addresses this issue and provides a way to + recover from failures of the preactivate hook: the old channels are not + closed until the point where nothing can fail anymore, so in case + preactivate fails, the driver can roll back the old channels and + activate them again. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 34 ++++++++++++++++++----- + 1 file changed, 27 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index ae91592165ea..390db68727ff 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2955,33 +2955,45 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) + mlx5e_deactivate_channels(&priv->channels); + } + +-static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, +- struct mlx5e_channels *new_chs, +- mlx5e_fp_preactivate preactivate) ++static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, ++ struct mlx5e_channels *new_chs, ++ mlx5e_fp_preactivate preactivate) + { + struct net_device *netdev = priv->netdev; ++ struct mlx5e_channels old_chs; + int carrier_ok; ++ int err = 0; + + carrier_ok = netif_carrier_ok(netdev); + netif_carrier_off(netdev); + + mlx5e_deactivate_priv_channels(priv); +- mlx5e_close_channels(&priv->channels); + ++ old_chs = priv->channels; + priv->channels = *new_chs; + + /* New channels are ready to roll, call the preactivate hook if needed + * to modify HW settings or update kernel parameters. + */ +- if (preactivate) +- preactivate(priv); ++ if (preactivate) { ++ err = preactivate(priv); ++ if (err) { ++ priv->channels = old_chs; ++ goto out; ++ } ++ } + ++ mlx5e_close_channels(&old_chs); + priv->profile->update_rx(priv); ++ ++out: + mlx5e_activate_priv_channels(priv); + + /* return carrier back if needed */ + if (carrier_ok) + netif_carrier_on(netdev); ++ ++ return err; + } + + int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, +@@ -2994,8 +3006,16 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + if (err) + return err; + +- mlx5e_switch_priv_channels(priv, new_chs, preactivate); ++ err = mlx5e_switch_priv_channels(priv, new_chs, preactivate); ++ if (err) ++ goto err_close; ++ + return 0; ++ ++err_close: ++ mlx5e_close_channels(new_chs); ++ ++ return err; + } + + int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) +-- +2.13.6 + diff --git a/SOURCES/0175-netdrv-net-mlx5e-Add-context-to-the-preactivate-hook.patch b/SOURCES/0175-netdrv-net-mlx5e-Add-context-to-the-preactivate-hook.patch new file mode 100644 index 0000000..cf06d1f --- /dev/null +++ b/SOURCES/0175-netdrv-net-mlx5e-Add-context-to-the-preactivate-hook.patch @@ -0,0 +1,339 @@ +From 044c5eae6528939777ab0c41134d4c14f8e07a8a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:47 -0400 +Subject: [PATCH 175/312] [netdrv] net/mlx5e: Add context to the preactivate + hook + +Message-id: <20200512105530.4207-82-ahleihel@redhat.com> +Patchwork-id: 306953 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 081/124] net/mlx5e: Add context to the preactivate hook +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit b9ab5d0ecf426a1bf16d706e7c284e00998d00be +Author: Maxim Mikityanskiy +Date: Mon Dec 2 15:48:25 2019 +0200 + + net/mlx5e: Add context to the preactivate hook + + Sometimes the preactivate hook of mlx5e_safe_switch_channels needs more + parameters than just struct mlx5e_priv *. For such cases, a new + parameter (void *context) is added to preactivate hooks. + + Some of the existing normal functions are currently used as preactivate + callbacks. To avoid adding an extra unused parameter, they are wrapped + in an automatic way using the MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX + macro. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 15 ++++++--- + drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 2 +- + .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 15 ++++----- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 36 ++++++++++++++-------- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- + .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 2 +- + 6 files changed, 45 insertions(+), 27 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index b90225f62234..58a7f28b146f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1036,12 +1036,19 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); + /* Function pointer to be used to modify HW or kernel settings while + * switching channels + */ +-typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv); ++typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context); ++#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \ ++int fn##_ctx(struct mlx5e_priv *priv, void *context) \ ++{ \ ++ return fn(priv); \ ++} + int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); + int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, +- mlx5e_fp_preactivate preactivate); ++ mlx5e_fp_preactivate preactivate, ++ void *context); + int mlx5e_num_channels_changed(struct mlx5e_priv *priv); ++int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); + void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); + void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); + +@@ -1122,10 +1129,10 @@ void mlx5e_update_ndo_stats(struct mlx5e_priv *priv); + void mlx5e_queue_update_stats(struct mlx5e_priv *priv); + int mlx5e_bits_invert(unsigned long a, int size); + +-typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); + int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); ++int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); + int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, +- change_hw_mtu_cb set_mtu_cb); ++ mlx5e_fp_preactivate preactivate); + + /* ethtool helpers */ + void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +index 01f2918063af..1375f6483a13 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +@@ -1126,7 +1126,7 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) + priv->channels.params.tx_min_inline_mode) + goto out; + +- mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + + out: + mutex_unlock(&priv->state_lock); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index e7043f68a38f..915f1d74afad 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -357,7 +357,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, + goto unlock; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + + unlock: + mutex_unlock(&priv->state_lock); +@@ -441,7 +441,8 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, + mlx5e_arfs_disable(priv); + + /* Switch to new channels, set new parameters and close old ones */ +- err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_num_channels_changed); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, ++ mlx5e_num_channels_changed_ctx, NULL); + + if (arfs_enabled) { + int err2 = mlx5e_arfs_enable(priv); +@@ -574,7 +575,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, + goto out; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + + out: + mutex_unlock(&priv->state_lock); +@@ -1733,7 +1734,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, + return 0; + } + +- return mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + } + + static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) +@@ -1766,7 +1767,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val + return 0; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + if (err) + return err; + +@@ -1823,7 +1824,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) + return 0; + } + +- return mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + } + + static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) +@@ -1867,7 +1868,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) + return 0; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + return err; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 390db68727ff..0d50ee4fd986 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2771,6 +2771,8 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) + return err; + } + ++static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); ++ + static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, u16 mtu) + { +@@ -2820,6 +2822,8 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) + return 0; + } + ++MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu); ++ + void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) + { + struct mlx5e_params *params = &priv->channels.params; +@@ -2902,6 +2906,8 @@ int mlx5e_num_channels_changed(struct mlx5e_priv *priv) + return 0; + } + ++MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); ++ + static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) + { + int i, ch; +@@ -2957,7 +2963,8 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) + + static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, +- mlx5e_fp_preactivate preactivate) ++ mlx5e_fp_preactivate preactivate, ++ void *context) + { + struct net_device *netdev = priv->netdev; + struct mlx5e_channels old_chs; +@@ -2976,7 +2983,7 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + * to modify HW settings or update kernel parameters. + */ + if (preactivate) { +- err = preactivate(priv); ++ err = preactivate(priv, context); + if (err) { + priv->channels = old_chs; + goto out; +@@ -2998,7 +3005,8 @@ static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + + int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, +- mlx5e_fp_preactivate preactivate) ++ mlx5e_fp_preactivate preactivate, ++ void *context) + { + int err; + +@@ -3006,7 +3014,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + if (err) + return err; + +- err = mlx5e_switch_priv_channels(priv, new_chs, preactivate); ++ err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); + if (err) + goto err_close; + +@@ -3023,7 +3031,7 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) + struct mlx5e_channels new_channels = {}; + + new_channels.params = priv->channels.params; +- return mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + } + + void mlx5e_timestamp_init(struct mlx5e_priv *priv) +@@ -3473,7 +3481,8 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, + goto out; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_num_channels_changed); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, ++ mlx5e_num_channels_changed_ctx, NULL); + if (err) + goto out; + +@@ -3686,7 +3695,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable) + goto out; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, ++ mlx5e_modify_tirs_lro_ctx, NULL); + out: + mutex_unlock(&priv->state_lock); + return err; +@@ -3905,7 +3915,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, + } + + int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, +- change_hw_mtu_cb set_mtu_cb) ++ mlx5e_fp_preactivate preactivate) + { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_channels new_channels = {}; +@@ -3954,13 +3964,13 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + + if (!reset) { + params->sw_mtu = new_mtu; +- if (set_mtu_cb) +- set_mtu_cb(priv); ++ if (preactivate) ++ preactivate(priv, NULL); + netdev->mtu = params->sw_mtu; + goto out; + } + +- err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL); + if (err) + goto out; + +@@ -3973,7 +3983,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + + static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) + { +- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); ++ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); + } + + int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) +@@ -4439,7 +4449,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) + mlx5e_set_rq_type(priv->mdev, &new_channels.params); + old_prog = priv->channels.params.xdp_prog; + +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + if (err) + goto unlock; + } else { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 2681bd39eab2..7d12e20e4582 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1302,7 +1302,7 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) + + static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu) + { +- return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); ++ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); + } + + static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +index 56078b23f1a0..673aaa815f57 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +@@ -483,7 +483,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) + new_channels.params = *params; + new_channels.params.sw_mtu = new_mtu; + +- err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); ++ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + if (err) + goto out; + +-- +2.13.6 + diff --git a/SOURCES/0176-netdrv-net-mlx5e-Change-inline-mode-correctly-when-c.patch b/SOURCES/0176-netdrv-net-mlx5e-Change-inline-mode-correctly-when-c.patch new file mode 100644 index 0000000..a597dc9 --- /dev/null +++ b/SOURCES/0176-netdrv-net-mlx5e-Change-inline-mode-correctly-when-c.patch @@ -0,0 +1,160 @@ +From 0cf024ed12b5e44253c1498cb3265d2874a0c324 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:48 -0400 +Subject: [PATCH 176/312] [netdrv] net/mlx5e: Change inline mode correctly when + changing trust state + +Message-id: <20200512105530.4207-83-ahleihel@redhat.com> +Patchwork-id: 306954 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 082/124] net/mlx5e: Change inline mode correctly when changing trust state +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 6e0504c69811ae9df7e7e1284950befbe3e6f496 +Author: Maxim Mikityanskiy +Date: Thu Nov 14 13:06:16 2019 +0200 + + net/mlx5e: Change inline mode correctly when changing trust state + + The current steps that are performed when the trust state changes, if + the channels are active: + + 1. The trust state is changed in hardware. + + 2. The new inline mode is calculated. + + 3. If the new inline mode is different, the channels are recreated using + the new inline mode. + + This approach has some issues: + + 1. There is a time gap between changing trust state in hardware and + starting sending enough inline headers (the latter happens after + recreation of channels). It leads to failed transmissions and error + CQEs. + + 2. If the new channels fail to open, we'll be left with the old ones, + but the hardware will be configured for the new trust state, so the + interval when we can see TX errors never ends. + + This patch fixes the issues above by moving the trust state change into + the preactivate hook that runs during the recreation of the channels + when no channels are active, so it eliminates the gap of partially + applied configuration. If the inline mode doesn't change with the change + of the trust state, the channels won't be recreated, just like before + this patch. + + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 55 +++++++++++++--------- + 1 file changed, 33 insertions(+), 22 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +index 1375f6483a13..47874d34156b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +@@ -1098,49 +1098,59 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) + mlx5e_dcbnl_dscp_app(priv, DELETE); + } + +-static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv, +- struct mlx5e_params *params) ++static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev, ++ struct mlx5e_params *params, ++ u8 trust_state) + { +- mlx5_query_min_inline(priv->mdev, ¶ms->tx_min_inline_mode); +- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP && ++ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); ++ if (trust_state == MLX5_QPTS_TRUST_DSCP && + params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) + params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; + } + +-static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) ++static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context) ++{ ++ u8 *trust_state = context; ++ int err; ++ ++ err = mlx5_set_trust_state(priv->mdev, *trust_state); ++ if (err) ++ return err; ++ priv->dcbx_dp.trust_state = *trust_state; ++ ++ return 0; ++} ++ ++static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) + { + struct mlx5e_channels new_channels = {}; ++ bool reset_channels = true; ++ int err = 0; + + mutex_lock(&priv->state_lock); + + new_channels.params = priv->channels.params; +- mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); ++ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params, ++ trust_state); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; +- goto out; ++ reset_channels = false; + } + + /* Skip if tx_min_inline is the same */ + if (new_channels.params.tx_min_inline_mode == + priv->channels.params.tx_min_inline_mode) +- goto out; ++ reset_channels = false; + +- mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); ++ if (reset_channels) ++ err = mlx5e_safe_switch_channels(priv, &new_channels, ++ mlx5e_update_trust_state_hw, ++ &trust_state); ++ else ++ err = mlx5e_update_trust_state_hw(priv, &trust_state); + +-out: + mutex_unlock(&priv->state_lock); +-} +- +-static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) +-{ +- int err; +- +- err = mlx5_set_trust_state(priv->mdev, trust_state); +- if (err) +- return err; +- priv->dcbx_dp.trust_state = trust_state; +- mlx5e_trust_update_sq_inline_mode(priv); + + return err; + } +@@ -1171,7 +1181,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) + if (err) + return err; + +- mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params); ++ mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, ++ priv->dcbx_dp.trust_state); + + err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio); + if (err) +-- +2.13.6 + diff --git a/SOURCES/0177-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-p.patch b/SOURCES/0177-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-p.patch new file mode 100644 index 0000000..535f057 --- /dev/null +++ b/SOURCES/0177-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-p.patch @@ -0,0 +1,76 @@ +From a1259ce446814d9e74865a3c346f7bb2a8ecb469 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:49 -0400 +Subject: [PATCH 177/312] [netdrv] net/mlx5e: RX, Use indirect calls wrapper + for posting descriptors + +Message-id: <20200512105530.4207-84-ahleihel@redhat.com> +Patchwork-id: 306955 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 083/124] net/mlx5e: RX, Use indirect calls wrapper for posting descriptors +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 2c8f80b3e318d0c434d1a6d38e38b1db83db0b95 +Author: Tariq Toukan +Date: Mon Jan 27 13:28:42 2020 +0200 + + net/mlx5e: RX, Use indirect calls wrapper for posting descriptors + + We can avoid an indirect call per NAPI cycle wrapping the RX descriptors + posting call with the appropriate helper. + + Signed-off-by: Tariq Toukan + Reviewed-by: Maxim Mikityanskiy + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +index a2daa3dfe15a..5dcdd18143e6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +@@ -31,6 +31,7 @@ + */ + + #include ++#include + #include "en.h" + #include "en/xdp.h" + #include "en/xsk/tx.h" +@@ -87,7 +88,10 @@ static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskr + bool busy_xsk = false; + + busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); +- busy_xsk |= xskrq->post_wqes(xskrq); ++ busy_xsk |= INDIRECT_CALL_2(xskrq->post_wqes, ++ mlx5e_post_rx_mpwqes, ++ mlx5e_post_rx_wqes, ++ xskrq); + + return busy_xsk; + } +@@ -129,7 +133,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) + + mlx5e_poll_ico_cq(&c->icosq.cq); + +- busy |= rq->post_wqes(rq); ++ busy |= INDIRECT_CALL_2(rq->post_wqes, ++ mlx5e_post_rx_mpwqes, ++ mlx5e_post_rx_wqes, ++ rq); + if (xsk_open) { + mlx5e_poll_ico_cq(&c->xskicosq.cq); + busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); +-- +2.13.6 + diff --git a/SOURCES/0178-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-h.patch b/SOURCES/0178-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-h.patch new file mode 100644 index 0000000..4b18ca9 --- /dev/null +++ b/SOURCES/0178-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-h.patch @@ -0,0 +1,64 @@ +From 8a307f6cfd32ede46f9a648fc3e1c0cfc9a442f6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:50 -0400 +Subject: [PATCH 178/312] [netdrv] net/mlx5e: RX, Use indirect calls wrapper + for handling compressed completions + +Message-id: <20200512105530.4207-85-ahleihel@redhat.com> +Patchwork-id: 306956 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 084/124] net/mlx5e: RX, Use indirect calls wrapper for handling compressed completions +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit e9c1d2539dc04caae0bf4ee96a7a8bdb9df8ea0d +Author: Tariq Toukan +Date: Mon Jan 27 13:34:31 2020 +0200 + + net/mlx5e: RX, Use indirect calls wrapper for handling compressed completions + + We can avoid an indirect call per compressed completion wrapping the + completion handling call with the appropriate helper. + + Signed-off-by: Tariq Toukan + Reviewed-by: Maxim Mikityanskiy + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 1f42e88f4ec4..b2109bbcb985 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -159,7 +159,8 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, + mlx5e_read_mini_arr_slot(wq, cqd, cqcc); + + mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); +- rq->handle_rx_cqe(rq, &cqd->title); ++ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, ++ mlx5e_handle_rx_cqe, rq, &cqd->title); + } + mlx5e_cqes_update_owner(wq, cqcc - wq->cc); + wq->cc = cqcc; +@@ -179,7 +180,8 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, + mlx5e_read_title_slot(rq, wq, cc); + mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); + mlx5e_decompress_cqe(rq, wq, cc); +- rq->handle_rx_cqe(rq, &cqd->title); ++ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, ++ mlx5e_handle_rx_cqe, rq, &cqd->title); + cqd->mini_arr_idx++; + + return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; +-- +2.13.6 + diff --git a/SOURCES/0179-netdrv-net-mlx5-sparse-warning-incorrect-type-in-ass.patch b/SOURCES/0179-netdrv-net-mlx5-sparse-warning-incorrect-type-in-ass.patch new file mode 100644 index 0000000..df6ec23 --- /dev/null +++ b/SOURCES/0179-netdrv-net-mlx5-sparse-warning-incorrect-type-in-ass.patch @@ -0,0 +1,52 @@ +From 0ab966999fca01c04be997507ba0aa3ac8e507c5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:51 -0400 +Subject: [PATCH 179/312] [netdrv] net/mlx5: sparse: warning: incorrect type in + assignment + +Message-id: <20200512105530.4207-86-ahleihel@redhat.com> +Patchwork-id: 306957 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 085/124] net/mlx5: sparse: warning: incorrect type in assignment +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 5edc4c7275ee05a8e76199ddd6c494840c8707aa +Author: Saeed Mahameed +Date: Wed Jan 22 15:05:21 2020 -0800 + + net/mlx5: sparse: warning: incorrect type in assignment + + drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c:191:13: + sparse: warning: incorrect type in assignment (different base types) + + Signed-off-by: Saeed Mahameed + Reviewed-by: Moshe Shemesh + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +index 94d7b69a95c7..c9c9b479bda5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +@@ -188,7 +188,7 @@ static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer) + + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); +- mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); ++ mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++) + mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE); + +-- +2.13.6 + diff --git a/SOURCES/0180-netdrv-net-mlx5-sparse-warning-Using-plain-integer-a.patch b/SOURCES/0180-netdrv-net-mlx5-sparse-warning-Using-plain-integer-a.patch new file mode 100644 index 0000000..067af90 --- /dev/null +++ b/SOURCES/0180-netdrv-net-mlx5-sparse-warning-Using-plain-integer-a.patch @@ -0,0 +1,51 @@ +From 16bde132fef4e56fe26d945178b1675b6e4d08b4 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:52 -0400 +Subject: [PATCH 180/312] [netdrv] net/mlx5: sparse: warning: Using plain + integer as NULL pointer + +Message-id: <20200512105530.4207-87-ahleihel@redhat.com> +Patchwork-id: 306958 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 086/124] net/mlx5: sparse: warning: Using plain integer as NULL pointer +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 586ee9e8a3b00757836787d91b4c369bc36d7928 +Author: Saeed Mahameed +Date: Wed Jan 22 15:06:35 2020 -0800 + + net/mlx5: sparse: warning: Using plain integer as NULL pointer + + Return NULL instead of 0. + + Signed-off-by: Saeed Mahameed + Reviewed-by: Moshe Shemesh + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c +index e065c2f68f5a..6cbccba56f70 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c +@@ -21,7 +21,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) + struct mlx5_dm *dm; + + if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)) +- return 0; ++ return NULL; + + dm = kzalloc(sizeof(*dm), GFP_KERNEL); + if (!dm) +-- +2.13.6 + diff --git a/SOURCES/0181-include-net-mlx5-fix-spelling-mistake-reserverd-rese.patch b/SOURCES/0181-include-net-mlx5-fix-spelling-mistake-reserverd-rese.patch new file mode 100644 index 0000000..bce73bc --- /dev/null +++ b/SOURCES/0181-include-net-mlx5-fix-spelling-mistake-reserverd-rese.patch @@ -0,0 +1,51 @@ +From 904bb79608c74b9427084cc653fec90fff5b3701 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:53 -0400 +Subject: [PATCH 181/312] [include] net/mlx5: fix spelling mistake "reserverd" + -> "reserved" + +Message-id: <20200512105530.4207-88-ahleihel@redhat.com> +Patchwork-id: 306959 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 087/124] net/mlx5: fix spelling mistake "reserverd" -> "reserved" +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit d8fab4815a371e8013e1a769c31da1bcaf618b01 +Author: Alexandre Belloni +Date: Fri Feb 14 15:30:01 2020 +0100 + + net/mlx5: fix spelling mistake "reserverd" -> "reserved" + + The reserved member should be named reserved. + + Signed-off-by: Alexandre Belloni + Signed-off-by: Leon Romanovsky + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc_fpga.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h +index 37e065a80a43..07d77323f78a 100644 +--- a/include/linux/mlx5/mlx5_ifc_fpga.h ++++ b/include/linux/mlx5/mlx5_ifc_fpga.h +@@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits { + struct mlx5_ifc_tls_resp_bits { + u8 syndrome[0x20]; + u8 stream_id[0x20]; +- u8 reserverd[0x40]; ++ u8 reserved[0x40]; + }; + + #define MLX5_TLS_COMMAND_SIZE (0x100) +-- +2.13.6 + diff --git a/SOURCES/0182-netdrv-net-mlx5e-Use-netdev_warn-for-errors-for-adde.patch b/SOURCES/0182-netdrv-net-mlx5e-Use-netdev_warn-for-errors-for-adde.patch new file mode 100644 index 0000000..c2dfb69 --- /dev/null +++ b/SOURCES/0182-netdrv-net-mlx5e-Use-netdev_warn-for-errors-for-adde.patch @@ -0,0 +1,107 @@ +From 8a64a8edf2727554f374f0b99dab8a00b167655c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:54 -0400 +Subject: [PATCH 182/312] [netdrv] net/mlx5e: Use netdev_warn() for errors for + added prefix + +Message-id: <20200512105530.4207-89-ahleihel@redhat.com> +Patchwork-id: 306960 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 088/124] net/mlx5e: Use netdev_warn() for errors for added prefix +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 237ac8ded45c803f409df74946f48dfafee18140 +Author: Roi Dayan +Date: Wed Dec 25 17:03:35 2019 +0200 + + net/mlx5e: Use netdev_warn() for errors for added prefix + + This helps identify the source of the message. + If netdev still doesn't exists use mlx5_core_warn(). + + Signed-off-by: Roi Dayan + Reviewed-by: Vlad Buslov + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 27 ++++++++++++++---------- + 1 file changed, 16 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 7d12e20e4582..5c9b67b71456 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -190,7 +190,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) + + err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); + if (err) { +- pr_warn("vport %d error %d reading stats\n", rep->vport, err); ++ netdev_warn(priv->netdev, "vport %d error %d reading stats\n", ++ rep->vport, err); + return; + } + +@@ -1986,8 +1987,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) + &mlx5e_uplink_rep_profile : &mlx5e_rep_profile; + netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); + if (!netdev) { +- pr_warn("Failed to create representor netdev for vport %d\n", +- rep->vport); ++ mlx5_core_warn(dev, ++ "Failed to create representor netdev for vport %d\n", ++ rep->vport); + kfree(rpriv); + return -EINVAL; + } +@@ -2004,29 +2006,32 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) + + err = mlx5e_attach_netdev(netdev_priv(netdev)); + if (err) { +- pr_warn("Failed to attach representor netdev for vport %d\n", +- rep->vport); ++ netdev_warn(netdev, ++ "Failed to attach representor netdev for vport %d\n", ++ rep->vport); + goto err_destroy_mdev_resources; + } + + err = mlx5e_rep_neigh_init(rpriv); + if (err) { +- pr_warn("Failed to initialized neighbours handling for vport %d\n", +- rep->vport); ++ netdev_warn(netdev, ++ "Failed to initialized neighbours handling for vport %d\n", ++ rep->vport); + goto err_detach_netdev; + } + + err = register_devlink_port(dev, rpriv); + if (err) { +- esw_warn(dev, "Failed to register devlink port %d\n", +- rep->vport); ++ netdev_warn(netdev, "Failed to register devlink port %d\n", ++ rep->vport); + goto err_neigh_cleanup; + } + + err = register_netdev(netdev); + if (err) { +- pr_warn("Failed to register representor netdev for vport %d\n", +- rep->vport); ++ netdev_warn(netdev, ++ "Failed to register representor netdev for vport %d\n", ++ rep->vport); + goto err_devlink_cleanup; + } + +-- +2.13.6 + diff --git a/SOURCES/0183-include-net-mlx5-Expose-link-speed-directly.patch b/SOURCES/0183-include-net-mlx5-Expose-link-speed-directly.patch new file mode 100644 index 0000000..4003de2 --- /dev/null +++ b/SOURCES/0183-include-net-mlx5-Expose-link-speed-directly.patch @@ -0,0 +1,52 @@ +From f9b352e93da1f7bd57411f597aecf0c1e1231404 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:55 -0400 +Subject: [PATCH 183/312] [include] net/mlx5: Expose link speed directly + +Message-id: <20200512105530.4207-90-ahleihel@redhat.com> +Patchwork-id: 306961 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 089/124] net/mlx5: Expose link speed directly +Bugzilla: 1831133 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1831133 +Upstream: v5.7-rc1 + +commit dc392fc56f39a00a46d6db2d150571ccafe99734 +Author: Mark Bloch +Date: Mon Mar 2 16:15:21 2020 -0800 + + net/mlx5: Expose link speed directly + + Expose port rate as part of the port speed register fields. + + Signed-off-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index cfe89228ca78..a8adb6e7d1fd 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -8292,7 +8292,8 @@ struct mlx5_ifc_ptys_reg_bits { + u8 proto_mask[0x3]; + + u8 an_status[0x4]; +- u8 reserved_at_24[0x1c]; ++ u8 reserved_at_24[0xc]; ++ u8 data_rate_oper[0x10]; + + u8 ext_eth_proto_capability[0x20]; + +-- +2.13.6 + diff --git a/SOURCES/0184-netdrv-net-mlx5-Expose-port-speed-when-possible.patch b/SOURCES/0184-netdrv-net-mlx5-Expose-port-speed-when-possible.patch new file mode 100644 index 0000000..7fa60ef --- /dev/null +++ b/SOURCES/0184-netdrv-net-mlx5-Expose-port-speed-when-possible.patch @@ -0,0 +1,89 @@ +From b5743d69b40d3913a3d825221929275be67c0110 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:56 -0400 +Subject: [PATCH 184/312] [netdrv] net/mlx5: Expose port speed when possible + +Message-id: <20200512105530.4207-91-ahleihel@redhat.com> +Patchwork-id: 306962 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 090/124] net/mlx5: Expose port speed when possible +Bugzilla: 1831133 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1831133 +Upstream: v5.7-rc1 + +commit c268ca6087f553bfc0e16ffec412b983ffe32fd4 +Author: Mark Bloch +Date: Tue Feb 25 18:04:40 2020 +0000 + + net/mlx5: Expose port speed when possible + + When port speed can't be reported based on ext_eth_proto_capability + or eth_proto_capability instead of reporting speed as unknown check + if the port's speed can be inferred based on the data_rate_oper field. + + Signed-off-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 915f1d74afad..3b5f4a2c9f4e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -768,6 +768,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings + + static void get_speed_duplex(struct net_device *netdev, + u32 eth_proto_oper, bool force_legacy, ++ u16 data_rate_oper, + struct ethtool_link_ksettings *link_ksettings) + { + struct mlx5e_priv *priv = netdev_priv(netdev); +@@ -779,7 +780,10 @@ static void get_speed_duplex(struct net_device *netdev, + + speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy); + if (!speed) { +- speed = SPEED_UNKNOWN; ++ if (data_rate_oper) ++ speed = 100 * data_rate_oper; ++ else ++ speed = SPEED_UNKNOWN; + goto out; + } + +@@ -869,6 +873,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, + { + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; ++ u16 data_rate_oper; + u32 rx_pause = 0; + u32 tx_pause = 0; + u32 eth_proto_cap; +@@ -912,6 +917,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status); + connector_type = MLX5_GET(ptys_reg, out, connector_type); ++ data_rate_oper = MLX5_GET(ptys_reg, out, data_rate_oper); + + mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); + +@@ -922,7 +928,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, + get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, + admin_ext); + get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext, +- link_ksettings); ++ data_rate_oper, link_ksettings); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + +-- +2.13.6 + diff --git a/SOURCES/0185-netdrv-net-mlx5-Tidy-up-and-fix-reverse-christmas-or.patch b/SOURCES/0185-netdrv-net-mlx5-Tidy-up-and-fix-reverse-christmas-or.patch new file mode 100644 index 0000000..3c26a1a --- /dev/null +++ b/SOURCES/0185-netdrv-net-mlx5-Tidy-up-and-fix-reverse-christmas-or.patch @@ -0,0 +1,69 @@ +From aa1dcfe2bcd1c7faaae7007d4ee7464f4b5b2443 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:57 -0400 +Subject: [PATCH 185/312] [netdrv] net/mlx5: Tidy up and fix reverse christmas + ordring + +Message-id: <20200512105530.4207-92-ahleihel@redhat.com> +Patchwork-id: 306963 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 091/124] net/mlx5: Tidy up and fix reverse christmas ordring +Bugzilla: 1831133 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1831133 +Upstream: v5.7-rc1 + +commit 2f5438ca0ee01a1b3a9c37e3f33d47c8122afe74 +Author: Mark Bloch +Date: Tue Feb 25 19:24:54 2020 +0000 + + net/mlx5: Tidy up and fix reverse christmas ordring + + Use reverse chirstmas tree inside mlx5e_ethtool_get_link_ksettings. + + Signed-off-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 3b5f4a2c9f4e..cae5da83e793 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -872,18 +872,18 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, + struct ethtool_link_ksettings *link_ksettings) + { + struct mlx5_core_dev *mdev = priv->mdev; +- u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; ++ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {}; ++ u32 eth_proto_admin; ++ u8 an_disable_admin; + u16 data_rate_oper; ++ u32 eth_proto_oper; ++ u32 eth_proto_cap; ++ u8 connector_type; + u32 rx_pause = 0; + u32 tx_pause = 0; +- u32 eth_proto_cap; +- u32 eth_proto_admin; + u32 eth_proto_lp; +- u32 eth_proto_oper; +- u8 an_disable_admin; +- u8 an_status; +- u8 connector_type; + bool admin_ext; ++ u8 an_status; + bool ext; + int err; + +-- +2.13.6 + diff --git a/SOURCES/0186-netdrv-net-mlx5-E-Switch-Hold-mutex-when-querying-dr.patch b/SOURCES/0186-netdrv-net-mlx5-E-Switch-Hold-mutex-when-querying-dr.patch new file mode 100644 index 0000000..701d844 --- /dev/null +++ b/SOURCES/0186-netdrv-net-mlx5-E-Switch-Hold-mutex-when-querying-dr.patch @@ -0,0 +1,95 @@ +From 7160797e00796a359ba98a56918724238d8a9c81 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:58 -0400 +Subject: [PATCH 186/312] [netdrv] net/mlx5: E-Switch, Hold mutex when querying + drop counter in legacy mode + +Message-id: <20200512105530.4207-93-ahleihel@redhat.com> +Patchwork-id: 306964 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 092/124] net/mlx5: E-Switch, Hold mutex when querying drop counter in legacy mode +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 14c844cbf3503076de6e2e48d575216f1600b19f +Author: Bodong Wang +Date: Fri Sep 13 16:24:19 2019 -0500 + + net/mlx5: E-Switch, Hold mutex when querying drop counter in legacy mode + + Consider scenario below, CPU 1 is at risk to query already destroyed + drop counters. Need to apply the same state mutex when disabling vport. + + +-------------------------------+-------------------------------------+ + | CPU 0 | CPU 1 | + +-------------------------------+-------------------------------------+ + | mlx5_device_disable_sriov | mlx5e_get_vf_stats | + | mlx5_eswitch_disable | mlx5_eswitch_get_vport_stats | + | esw_disable_vport | mlx5_eswitch_query_vport_drop_stats | + | mlx5_fc_destroy(drop_counter) | mlx5_fc_query(drop_counter) | + +-------------------------------+-------------------------------------+ + + Fixes: b8a0dbe3a90b ("net/mlx5e: E-switch, Add steering drop counters") + Signed-off-by: Bodong Wang + Reviewed-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 2151787235e0..1541cdf877d2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -2600,9 +2600,13 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, + u64 bytes = 0; + int err = 0; + +- if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY) ++ if (esw->mode != MLX5_ESWITCH_LEGACY) + return 0; + ++ mutex_lock(&esw->state_lock); ++ if (!vport->enabled) ++ goto unlock; ++ + if (vport->egress.legacy.drop_counter) + mlx5_fc_query(dev, vport->egress.legacy.drop_counter, + &stats->rx_dropped, &bytes); +@@ -2613,20 +2617,22 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, + + if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) && + !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) +- return 0; ++ goto unlock; + + err = mlx5_query_vport_down_stats(dev, vport->vport, 1, + &rx_discard_vport_down, + &tx_discard_vport_down); + if (err) +- return err; ++ goto unlock; + + if (MLX5_CAP_GEN(dev, receive_discard_vport_down)) + stats->rx_dropped += rx_discard_vport_down; + if (MLX5_CAP_GEN(dev, transmit_discard_vport_down)) + stats->tx_dropped += tx_discard_vport_down; + +- return 0; ++unlock: ++ mutex_unlock(&esw->state_lock); ++ return err; + } + + int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, +-- +2.13.6 + diff --git a/SOURCES/0187-netdrv-net-mlx5-Fix-group-version-management.patch b/SOURCES/0187-netdrv-net-mlx5-Fix-group-version-management.patch new file mode 100644 index 0000000..b19e3d2 --- /dev/null +++ b/SOURCES/0187-netdrv-net-mlx5-Fix-group-version-management.patch @@ -0,0 +1,54 @@ +From d0b7b0101b6234149651940453cb568c716f9e3c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:54:59 -0400 +Subject: [PATCH 187/312] [netdrv] net/mlx5: Fix group version management + +Message-id: <20200512105530.4207-94-ahleihel@redhat.com> +Patchwork-id: 306966 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 093/124] net/mlx5: Fix group version management +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 454401aeb2957e0d996bc9208b78aa4d8ac12964 +Author: Eli Cohen +Date: Wed Mar 4 10:32:56 2020 +0200 + + net/mlx5: Fix group version management + + When adding a rule to a flow group we need increment the version of the + group. Current code fails to do that and as a result, when trying to add + a rule, we will fail to discover a case where an FTE with the same match + value was added while we scanned the groups of the same match criteria, + thus we may try to add an FTE that was already added. + + Signed-off-by: Eli Cohen + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 4c7c707f9e2d..7cc21f08cbcc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -1342,6 +1342,7 @@ add_rule_fte(struct fs_fte *fte, + fte->node.active = true; + fte->status |= FS_FTE_STATUS_EXISTING; + atomic_inc(&fte->node.version); ++ atomic_inc(&fg->node.version); + + out: + return handle; +-- +2.13.6 + diff --git a/SOURCES/0188-netdrv-net-mlx5e-Don-t-allow-forwarding-between-upli.patch b/SOURCES/0188-netdrv-net-mlx5e-Don-t-allow-forwarding-between-upli.patch new file mode 100644 index 0000000..be9e09d --- /dev/null +++ b/SOURCES/0188-netdrv-net-mlx5e-Don-t-allow-forwarding-between-upli.patch @@ -0,0 +1,121 @@ +From a8f5854b513e24cc1c6b337aeeebaa868a09bfe7 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:04 -0400 +Subject: [PATCH 188/312] [netdrv] net/mlx5e: Don't allow forwarding between + uplink + +Message-id: <20200512105530.4207-99-ahleihel@redhat.com> +Patchwork-id: 306970 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 098/124] net/mlx5e: Don't allow forwarding between uplink +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit ffec97020f841fefa508db038bad58bc6def9431 +Author: Tonghao Zhang +Date: Mon Feb 17 22:08:50 2020 +0800 + + net/mlx5e: Don't allow forwarding between uplink + + We can install forwarding packets rule between uplink + in switchdev mode, as show below. But the hardware does + not do that as expected (mlnx_perf -i $PF1, we can't get + the counter of the PF1). By the way, if we add the uplink + PF0, PF1 to Open vSwitch and enable hw-offload, the rules + can be offloaded but not work fine too. This patch add a + check and if so return -EOPNOTSUPP. + + $ tc filter add dev $PF0 protocol all parent ffff: prio 1 handle 1 \ + flower skip_sw action mirred egress redirect dev $PF1 + + $ tc -d -s filter show dev $PF0 ingress + skip_sw + in_hw in_hw_count 1 + action order 1: mirred (Egress Redirect to device enp130s0f1) stolen + ... + Sent hardware 408954 bytes 4173 pkt + ... + + Signed-off-by: Tonghao Zhang + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 5 +++++ + drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 1 + + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 17 +++++++++++++++++ + 3 files changed, 23 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 5c9b67b71456..02f1362a01ef 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1371,6 +1371,11 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { + .ndo_set_features = mlx5e_set_features, + }; + ++bool mlx5e_eswitch_uplink_rep(struct net_device *netdev) ++{ ++ return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; ++} ++ + bool mlx5e_eswitch_rep(struct net_device *netdev) + { + if (netdev->netdev_ops == &mlx5e_netdev_ops_rep || +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +index c8f3bbdc1ffb..4bc5d5cd071c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +@@ -200,6 +200,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, + void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); + + bool mlx5e_eswitch_rep(struct net_device *netdev); ++bool mlx5e_eswitch_uplink_rep(struct net_device *netdev); + + #else /* CONFIG_MLX5_ESWITCH */ + static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 2c89f1251354..6c37a9e7912e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3384,6 +3384,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + struct net_device *uplink_upper; ++ struct mlx5e_rep_priv *rep_priv; + + if (is_duplicated_output_device(priv->netdev, + out_dev, +@@ -3419,6 +3420,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + return err; + } + ++ /* Don't allow forwarding between uplink. ++ * ++ * Input vport was stored esw_attr->in_rep. ++ * In LAG case, *priv* is the private data of ++ * uplink which may be not the input vport. ++ */ ++ rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep); ++ if (mlx5e_eswitch_uplink_rep(rep_priv->netdev) && ++ mlx5e_eswitch_uplink_rep(out_dev)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "devices are both uplink, can't offload forwarding"); ++ pr_err("devices %s %s are both uplink, can't offload forwarding\n", ++ priv->netdev->name, out_dev->name); ++ return -EOPNOTSUPP; ++ } ++ + if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); +-- +2.13.6 + diff --git a/SOURCES/0189-netdrv-net-mlx5-Eswitch-avoid-redundant-mask.patch b/SOURCES/0189-netdrv-net-mlx5-Eswitch-avoid-redundant-mask.patch new file mode 100644 index 0000000..3892bac --- /dev/null +++ b/SOURCES/0189-netdrv-net-mlx5-Eswitch-avoid-redundant-mask.patch @@ -0,0 +1,62 @@ +From 9449eae0b7957d9a09f751c6b26f42bab3dbc478 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:05 -0400 +Subject: [PATCH 189/312] [netdrv] net/mlx5: Eswitch, avoid redundant mask + +Message-id: <20200512105530.4207-100-ahleihel@redhat.com> +Patchwork-id: 306972 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 099/124] net/mlx5: Eswitch, avoid redundant mask +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 1708dd54687db4fd5baa3b6169aa116505c1e2ef +Author: Eli Cohen +Date: Thu Feb 6 15:13:36 2020 +0200 + + net/mlx5: Eswitch, avoid redundant mask + + misc_params.source_port is a 16 bit field already so no need for + redundant masking against 0xffff. Also change local variables type to + u16. + + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +index d2f6af3a8a28..0050f3138e4b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +@@ -181,7 +181,7 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src, + static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, + const struct mlx5_flow_spec *spec) + { +- u32 port_mask, port_value; ++ u16 port_mask, port_value; + + if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) + return spec->flow_context.flow_source == +@@ -191,7 +191,7 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, + misc_parameters.source_port); + port_value = MLX5_GET(fte_match_param, spec->match_value, + misc_parameters.source_port); +- return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK; ++ return (port_mask & port_value) == MLX5_VPORT_UPLINK; + } + + bool +-- +2.13.6 + diff --git a/SOURCES/0190-netdrv-net-mlx5-DR-Change-matcher-priority-parameter.patch b/SOURCES/0190-netdrv-net-mlx5-DR-Change-matcher-priority-parameter.patch new file mode 100644 index 0000000..829da6f --- /dev/null +++ b/SOURCES/0190-netdrv-net-mlx5-DR-Change-matcher-priority-parameter.patch @@ -0,0 +1,106 @@ +From 4163cbaa64b327db085ef2c4d389e388097ef093 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:06 -0400 +Subject: [PATCH 190/312] [netdrv] net/mlx5: DR, Change matcher priority + parameter type + +Message-id: <20200512105530.4207-101-ahleihel@redhat.com> +Patchwork-id: 306971 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 100/124] net/mlx5: DR, Change matcher priority parameter type +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.7-rc1 + +commit f64092997fcd772068ad1edb3ef04d9e69243aa1 +Author: Hamdan Igbaria +Date: Mon Feb 17 15:53:20 2020 +0200 + + net/mlx5: DR, Change matcher priority parameter type + + Change matcher priority parameter type from u16 to u32, + this change is needed since sometimes upper levels + create a matcher with priority bigger than 2^16. + + Signed-off-by: Hamdan Igbaria + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h | 4 ++-- + 4 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +index c6dbd856df94..2ecec4429070 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +@@ -671,7 +671,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, + + struct mlx5dr_matcher * + mlx5dr_matcher_create(struct mlx5dr_table *tbl, +- u16 priority, ++ u32 priority, + u8 match_criteria_enable, + struct mlx5dr_match_parameters *mask) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +index dffe35145d19..3fa739951b34 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +@@ -705,7 +705,7 @@ struct mlx5dr_matcher { + struct mlx5dr_matcher_rx_tx rx; + struct mlx5dr_matcher_rx_tx tx; + struct list_head matcher_list; +- u16 prio; ++ u32 prio; + struct mlx5dr_match_param mask; + u8 match_criteria; + refcount_t refcount; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +index c2027192e21e..d12d3a2d46ab 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +@@ -140,7 +140,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_group *fg) + { + struct mlx5dr_matcher *matcher; +- u16 priority = MLX5_GET(create_flow_group_in, in, ++ u32 priority = MLX5_GET(create_flow_group_in, in, + start_flow_index); + u8 match_criteria_enable = MLX5_GET(create_flow_group_in, + in, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +index e1edc9c247b7..e09e4ea1b045 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +@@ -59,7 +59,7 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *table); + + struct mlx5dr_matcher * + mlx5dr_matcher_create(struct mlx5dr_table *table, +- u16 priority, ++ u32 priority, + u8 match_criteria_enable, + struct mlx5dr_match_parameters *mask); + +@@ -151,7 +151,7 @@ mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; } + + static inline struct mlx5dr_matcher * + mlx5dr_matcher_create(struct mlx5dr_table *table, +- u16 priority, ++ u32 priority, + u8 match_criteria_enable, + struct mlx5dr_match_parameters *mask) { return NULL; } + +-- +2.13.6 + diff --git a/SOURCES/0191-netdrv-net-mlx5-DR-Improve-log-messages.patch b/SOURCES/0191-netdrv-net-mlx5-DR-Improve-log-messages.patch new file mode 100644 index 0000000..fc52757 --- /dev/null +++ b/SOURCES/0191-netdrv-net-mlx5-DR-Improve-log-messages.patch @@ -0,0 +1,408 @@ +From 98de485b333df8c2d5a2cc823b4718e410b4a5a3 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:07 -0400 +Subject: [PATCH 191/312] [netdrv] net/mlx5: DR, Improve log messages + +Message-id: <20200512105530.4207-102-ahleihel@redhat.com> +Patchwork-id: 306974 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 101/124] net/mlx5: DR, Improve log messages +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.7-rc1 + +commit b7d0db5520d87c5ddf0c2388f1e542e622ebfdc5 +Author: Erez Shitrit +Date: Sun Jan 12 10:55:54 2020 +0200 + + net/mlx5: DR, Improve log messages + + Few print messages are in debug level where they should be in error, and + few messages are missing. + + Signed-off-by: Erez Shitrit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/steering/dr_action.c | 10 +++++----- + .../ethernet/mellanox/mlx5/core/steering/dr_domain.c | 17 ++++++++++------- + .../ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c | 2 +- + .../ethernet/mellanox/mlx5/core/steering/dr_matcher.c | 10 +++++----- + .../net/ethernet/mellanox/mlx5/core/steering/dr_rule.c | 18 +++++++++--------- + .../net/ethernet/mellanox/mlx5/core/steering/dr_send.c | 16 ++++++++++++---- + .../net/ethernet/mellanox/mlx5/core/steering/dr_ste.c | 2 +- + .../ethernet/mellanox/mlx5/core/steering/dr_table.c | 8 ++++++-- + 8 files changed, 49 insertions(+), 34 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +index 2d93228ff633..1d90378b155c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +@@ -672,7 +672,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + dest_action = action; + if (!action->dest_tbl.is_fw_tbl) { + if (action->dest_tbl.tbl->dmn != dmn) { +- mlx5dr_dbg(dmn, ++ mlx5dr_err(dmn, + "Destination table belongs to a different domain\n"); + goto out_invalid_arg; + } +@@ -703,7 +703,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + action->dest_tbl.fw_tbl.rx_icm_addr = + output.sw_owner_icm_root_0; + } else { +- mlx5dr_dbg(dmn, ++ mlx5dr_err(dmn, + "Failed mlx5_cmd_query_flow_table ret: %d\n", + ret); + return ret; +@@ -772,7 +772,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + + /* Check action duplication */ + if (++action_type_set[action_type] > max_actions_type) { +- mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n", ++ mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n", + action_type, max_actions_type); + goto out_invalid_arg; + } +@@ -781,7 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + if (dr_action_validate_and_get_next_state(action_domain, + action_type, + &state)) { +- mlx5dr_dbg(dmn, "Invalid action sequence provided\n"); ++ mlx5dr_err(dmn, "Invalid action sequence provided\n"); + return -EOPNOTSUPP; + } + } +@@ -797,7 +797,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, + rx_rule && recalc_cs_required && dest_action) { + ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr); + if (ret) { +- mlx5dr_dbg(dmn, ++ mlx5dr_err(dmn, + "Failed to handle checksum recalculation err %d\n", + ret); + return ret; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +index a9da961d4d2f..48b6358b6845 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +@@ -59,7 +59,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn) + + ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn); + if (ret) { +- mlx5dr_dbg(dmn, "Couldn't allocate PD\n"); ++ mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret); + return ret; + } + +@@ -192,7 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, + + ret = dr_domain_query_vports(dmn); + if (ret) { +- mlx5dr_dbg(dmn, "Failed to query vports caps\n"); ++ mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret); + goto free_vports_caps; + } + +@@ -213,7 +213,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, + int ret; + + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) { +- mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n"); ++ mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n"); + return -EOPNOTSUPP; + } + +@@ -257,7 +257,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, + dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); + if (!vport_cap) { +- mlx5dr_dbg(dmn, "Failed to get esw manager vport\n"); ++ mlx5dr_err(dmn, "Failed to get esw manager vport\n"); + return -ENOENT; + } + +@@ -268,7 +268,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, + dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address; + break; + default: +- mlx5dr_dbg(dmn, "Invalid domain\n"); ++ mlx5dr_err(dmn, "Invalid domain\n"); + ret = -EINVAL; + break; + } +@@ -300,7 +300,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) + mutex_init(&dmn->mutex); + + if (dr_domain_caps_init(mdev, dmn)) { +- mlx5dr_dbg(dmn, "Failed init domain, no caps\n"); ++ mlx5dr_err(dmn, "Failed init domain, no caps\n"); + goto free_domain; + } + +@@ -348,8 +348,11 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags) + mutex_lock(&dmn->mutex); + ret = mlx5dr_send_ring_force_drain(dmn); + mutex_unlock(&dmn->mutex); +- if (ret) ++ if (ret) { ++ mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n", ++ flags, ret); + return ret; ++ } + } + + if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +index d7c7467e2d53..30d2d7376f56 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +@@ -468,7 +468,7 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, + err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); + if (err) { + dr_icm_chill_buckets_abort(pool, bucket, buckets); +- mlx5dr_dbg(pool->dmn, "Sync_steering failed\n"); ++ mlx5dr_err(pool->dmn, "Sync_steering failed\n"); + chunk = NULL; + goto out; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +index 2ecec4429070..a95938874798 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +@@ -388,14 +388,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, + mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); + + if (idx == 0) { +- mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n"); ++ mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n"); + return -EINVAL; + } + + /* Check that all mask fields were consumed */ + for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) { + if (((u8 *)&mask)[i] != 0) { +- mlx5dr_info(dmn, "Mask contains unsupported parameters\n"); ++ mlx5dr_err(dmn, "Mask contains unsupported parameters\n"); + return -EOPNOTSUPP; + } + } +@@ -563,7 +563,7 @@ static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher, + dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6); + + if (!nic_matcher->ste_builder) { +- mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n"); ++ mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n"); + return -EINVAL; + } + +@@ -634,13 +634,13 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, + int ret; + + if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) { +- mlx5dr_info(dmn, "Invalid match criteria attribute\n"); ++ mlx5dr_err(dmn, "Invalid match criteria attribute\n"); + return -EINVAL; + } + + if (mask) { + if (mask->match_sz > sizeof(struct mlx5dr_match_param)) { +- mlx5dr_info(dmn, "Invalid match size attribute\n"); ++ mlx5dr_err(dmn, "Invalid match size attribute\n"); + return -EINVAL; + } + mlx5dr_ste_copy_param(matcher->match_criteria, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +index e4cff7abb348..cce3ee7a6614 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +@@ -826,8 +826,8 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule, + ste_location, send_ste_list); + if (!new_htbl) { + mlx5dr_htbl_put(cur_htbl); +- mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n", +- cur_htbl->chunk_size); ++ mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n", ++ cur_htbl->chunk_size); + } else { + cur_htbl = new_htbl; + } +@@ -877,7 +877,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, + if (!value_size || + (value_size > sizeof(struct mlx5dr_match_param) || + (value_size % sizeof(u32)))) { +- mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); ++ mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); + return false; + } + +@@ -888,7 +888,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, + e_idx = min(s_idx + sizeof(param->outer), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { +- mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n"); ++ mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n"); + return false; + } + } +@@ -898,7 +898,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, + e_idx = min(s_idx + sizeof(param->misc), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { +- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n"); ++ mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n"); + return false; + } + } +@@ -908,7 +908,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, + e_idx = min(s_idx + sizeof(param->inner), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { +- mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n"); ++ mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n"); + return false; + } + } +@@ -918,7 +918,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, + e_idx = min(s_idx + sizeof(param->misc2), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { +- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n"); ++ mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n"); + return false; + } + } +@@ -928,7 +928,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, + e_idx = min(s_idx + sizeof(param->misc3), value_size); + + if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { +- mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n"); ++ mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n"); + return false; + } + } +@@ -1221,7 +1221,7 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher, + dr_rule_remove_action_members(rule); + free_rule: + kfree(rule); +- mlx5dr_info(dmn, "Failed creating rule\n"); ++ mlx5dr_err(dmn, "Failed creating rule\n"); + return NULL; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +index 095ec7b1399d..c0ab9cf74929 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +@@ -136,7 +136,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, + err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq, + &dr_qp->wq_ctrl); + if (err) { +- mlx5_core_info(mdev, "Can't create QP WQ\n"); ++ mlx5_core_warn(mdev, "Can't create QP WQ\n"); + goto err_wq; + } + +@@ -652,8 +652,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) + + /* Init */ + ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port); +- if (ret) ++ if (ret) { ++ mlx5dr_err(dmn, "Failed modify QP rst2init\n"); + return ret; ++ } + + /* RTR */ + ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr); +@@ -668,8 +670,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) + rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp; + + ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr); +- if (ret) ++ if (ret) { ++ mlx5dr_err(dmn, "Failed modify QP init2rtr\n"); + return ret; ++ } + + /* RTS */ + rts_attr.timeout = 14; +@@ -677,8 +681,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) + rts_attr.rnr_retry = 7; + + ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr); +- if (ret) ++ if (ret) { ++ mlx5dr_err(dmn, "Failed modify QP rtr2rts\n"); + return ret; ++ } + + return 0; + } +@@ -862,6 +868,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) + cq_size = QUEUE_SIZE + 1; + dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); + if (!dmn->send_ring->cq) { ++ mlx5dr_err(dmn, "Failed creating CQ\n"); + ret = -ENOMEM; + goto free_send_ring; + } +@@ -873,6 +880,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) + + dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); + if (!dmn->send_ring->qp) { ++ mlx5dr_err(dmn, "Failed creating QP\n"); + ret = -ENOMEM; + goto clean_cq; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +index aade62a9ee5c..c0e3a1e7389d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +@@ -728,7 +728,7 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, + { + if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { + if (mask->misc.source_port && mask->misc.source_port != 0xffff) { +- mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n"); ++ mlx5dr_err(dmn, "Partial mask source_port is not supported\n"); + return -EINVAL; + } + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +index 14ce2d7dbb66..c2fe48d7b75a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +@@ -128,16 +128,20 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn, + DR_CHUNK_SIZE_1, + MLX5DR_STE_LU_TYPE_DONT_CARE, + 0); +- if (!nic_tbl->s_anchor) ++ if (!nic_tbl->s_anchor) { ++ mlx5dr_err(dmn, "Failed allocating htbl\n"); + return -ENOMEM; ++ } + + info.type = CONNECT_MISS; + info.miss_icm_addr = nic_dmn->default_icm_addr; + ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, + nic_tbl->s_anchor, + &info, true); +- if (ret) ++ if (ret) { ++ mlx5dr_err(dmn, "Failed int and send htbl\n"); + goto free_s_anchor; ++ } + + mlx5dr_htbl_get(nic_tbl->s_anchor); + +-- +2.13.6 + diff --git a/SOURCES/0192-netdrv-net-mlx5-DR-Remove-unneeded-functions-deceler.patch b/SOURCES/0192-netdrv-net-mlx5-DR-Remove-unneeded-functions-deceler.patch new file mode 100644 index 0000000..66898c2 --- /dev/null +++ b/SOURCES/0192-netdrv-net-mlx5-DR-Remove-unneeded-functions-deceler.patch @@ -0,0 +1,159 @@ +From 48dd6d9a317d3ca7652f31af196b7c139b34abcd Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:08 -0400 +Subject: [PATCH 192/312] [netdrv] net/mlx5: DR, Remove unneeded functions + deceleration + +Message-id: <20200512105530.4207-103-ahleihel@redhat.com> +Patchwork-id: 306973 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 102/124] net/mlx5: DR, Remove unneeded functions deceleration +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.7-rc1 + +commit bc1a02884a33f9d49cda0c77dc8eccebd6c5c0e5 +Author: Alex Vesker +Date: Sun Mar 8 13:21:41 2020 +0200 + + net/mlx5: DR, Remove unneeded functions deceleration + + Remove dummy functions declaration, the dummy functions are not needed + since fs_dr is the only one to call mlx5dr and both fs_dr and dr files + depend on the same config flag (MLX5_SW_STEERING). + + Fixes: 70605ea545e8 ("net/mlx5: DR, Expose APIs for direct rule managing") + Signed-off-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/steering/mlx5dr.h | 101 --------------------- + 1 file changed, 101 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +index e09e4ea1b045..a4c9a1db9915 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +@@ -38,8 +38,6 @@ struct mlx5dr_action_dest { + struct mlx5dr_action *reformat; + }; + +-#ifdef CONFIG_MLX5_SW_STEERING +- + struct mlx5dr_domain * + mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type); + +@@ -125,103 +123,4 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev) + return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner); + } + +-#else /* CONFIG_MLX5_SW_STEERING */ +- +-static inline struct mlx5dr_domain * +-mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; } +- +-static inline int +-mlx5dr_domain_destroy(struct mlx5dr_domain *domain) { return 0; } +- +-static inline int +-mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags) { return 0; } +- +-static inline void +-mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, +- struct mlx5dr_domain *peer_dmn) { } +- +-static inline struct mlx5dr_table * +-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags) { return NULL; } +- +-static inline int +-mlx5dr_table_destroy(struct mlx5dr_table *table) { return 0; } +- +-static inline u32 +-mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; } +- +-static inline struct mlx5dr_matcher * +-mlx5dr_matcher_create(struct mlx5dr_table *table, +- u32 priority, +- u8 match_criteria_enable, +- struct mlx5dr_match_parameters *mask) { return NULL; } +- +-static inline int +-mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher) { return 0; } +- +-static inline struct mlx5dr_rule * +-mlx5dr_rule_create(struct mlx5dr_matcher *matcher, +- struct mlx5dr_match_parameters *value, +- size_t num_actions, +- struct mlx5dr_action *actions[]) { return NULL; } +- +-static inline int +-mlx5dr_rule_destroy(struct mlx5dr_rule *rule) { return 0; } +- +-static inline int +-mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, +- struct mlx5dr_action *action) { return 0; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_dest_table(struct mlx5dr_table *table) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain, +- struct mlx5_flow_table *ft) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, +- u32 vport, u8 vhca_id_valid, +- u16 vhca_id) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, +- struct mlx5dr_action_dest *dests, +- u32 num_of_dests) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_drop(void) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_tag(u32 tag_value) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_flow_counter(u32 counter_id) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, +- enum mlx5dr_action_reformat_type reformat_type, +- size_t data_sz, +- void *data) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain, +- u32 flags, +- size_t actions_sz, +- __be64 actions[]) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_pop_vlan(void) { return NULL; } +- +-static inline struct mlx5dr_action * +-mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, +- __be32 vlan_hdr) { return NULL; } +- +-static inline int +-mlx5dr_action_destroy(struct mlx5dr_action *action) { return 0; } +- +-static inline bool +-mlx5dr_is_supported(struct mlx5_core_dev *dev) { return false; } +- +-#endif /* CONFIG_MLX5_SW_STEERING */ +- + #endif /* _MLX5DR_H_ */ +-- +2.13.6 + diff --git a/SOURCES/0193-netdrv-net-mlx5e-Use-netdev_warn-instead-of-pr_err-f.patch b/SOURCES/0193-netdrv-net-mlx5e-Use-netdev_warn-instead-of-pr_err-f.patch new file mode 100644 index 0000000..6a24963 --- /dev/null +++ b/SOURCES/0193-netdrv-net-mlx5e-Use-netdev_warn-instead-of-pr_err-f.patch @@ -0,0 +1,82 @@ +From 9a16b3d75ba103147a6c45f89ab433538767b726 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:09 -0400 +Subject: [PATCH 193/312] [netdrv] net/mlx5e: Use netdev_warn() instead of + pr_err() for errors + +Message-id: <20200512105530.4207-104-ahleihel@redhat.com> +Patchwork-id: 306975 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 103/124] net/mlx5e: Use netdev_warn() instead of pr_err() for errors +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit 4ccd83f40cdc0c5b3b93cd176f9583994832f5f7 +Author: Roi Dayan +Date: Tue Feb 18 15:24:39 2020 +0200 + + net/mlx5e: Use netdev_warn() instead of pr_err() for errors + + This is for added netdev prefix that helps identify + the source of the message. + + Signed-off-by: Roi Dayan + Reviewed-by: Eli Cohen + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 17 +++++++++++------ + 1 file changed, 11 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 6c37a9e7912e..2fc1a0879e5d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3360,8 +3360,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + NL_SET_ERR_MSG_MOD(extack, + "can't support more output ports, can't offload forwarding"); +- pr_err("can't support more than %d output ports, can't offload forwarding\n", +- attr->out_count); ++ netdev_warn(priv->netdev, ++ "can't support more than %d output ports, can't offload forwarding\n", ++ attr->out_count); + return -EOPNOTSUPP; + } + +@@ -3439,8 +3440,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); +- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", +- priv->netdev->name, out_dev->name); ++ netdev_warn(priv->netdev, ++ "devices %s %s not on same switch HW, can't offload forwarding\n", ++ priv->netdev->name, ++ out_dev->name); + return -EOPNOTSUPP; + } + +@@ -3459,8 +3462,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + } else { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); +- pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", +- priv->netdev->name, out_dev->name); ++ netdev_warn(priv->netdev, ++ "devices %s %s not on same switch HW, can't offload forwarding\n", ++ priv->netdev->name, ++ out_dev->name); + return -EINVAL; + } + } +-- +2.13.6 + diff --git a/SOURCES/0194-netdrv-net-mlx5e-Remove-unused-argument-from-parse_t.patch b/SOURCES/0194-netdrv-net-mlx5e-Remove-unused-argument-from-parse_t.patch new file mode 100644 index 0000000..0c389e9 --- /dev/null +++ b/SOURCES/0194-netdrv-net-mlx5e-Remove-unused-argument-from-parse_t.patch @@ -0,0 +1,79 @@ +From fbd9f760e024d696d1cc7518e36a22cf7c190513 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:10 -0400 +Subject: [PATCH 194/312] [netdrv] net/mlx5e: Remove unused argument from + parse_tc_pedit_action() + +Message-id: <20200512105530.4207-105-ahleihel@redhat.com> +Patchwork-id: 306976 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 104/124] net/mlx5e: Remove unused argument from parse_tc_pedit_action() +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 + +commit dec481c86e741b9ec94cb7867dbf253d6bca5e43 +Author: Eli Cohen +Date: Thu Feb 13 15:18:51 2020 +0200 + + net/mlx5e: Remove unused argument from parse_tc_pedit_action() + + parse_attr is not used by parse_tc_pedit_action() so revmove it. + + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 2fc1a0879e5d..284a1df33bc7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2548,7 +2548,6 @@ static const struct pedit_headers zero_masks = {}; + + static int parse_tc_pedit_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, int namespace, +- struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) + { +@@ -2824,8 +2823,7 @@ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, + return -EOPNOTSUPP; + } + +- err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, +- hdrs, NULL); ++ err = parse_tc_pedit_action(priv, &pedit_act, namespace, hdrs, NULL); + *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + + return err; +@@ -2887,7 +2885,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_ADD: + err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL, +- parse_attr, hdrs, extack); ++ hdrs, extack); + if (err) + return err; + +@@ -3330,7 +3328,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_ADD: + err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB, +- parse_attr, hdrs, extack); ++ hdrs, extack); + if (err) + return err; + +-- +2.13.6 + diff --git a/SOURCES/0195-netdrv-flow_offload-check-for-basic-action-hw-stats-.patch b/SOURCES/0195-netdrv-flow_offload-check-for-basic-action-hw-stats-.patch new file mode 100644 index 0000000..deb6c97 --- /dev/null +++ b/SOURCES/0195-netdrv-flow_offload-check-for-basic-action-hw-stats-.patch @@ -0,0 +1,77 @@ +From 11dd730184c21d5ae4dff9ca9ee64c1e688dff32 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:11 -0400 +Subject: [PATCH 195/312] [netdrv] flow_offload: check for basic action hw + stats type + +Message-id: <20200512105530.4207-106-ahleihel@redhat.com> +Patchwork-id: 306977 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 105/124] flow_offload: check for basic action hw stats type +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc1 +Conflicts: + - Add the mlx5 hunks that were previously dropped when this patch + was backported as part of a CNB BZ. + +commit 319a1d19471ec49b8a91a7f6a3fe2c4535e5c279 +Author: Jiri Pirko +Date: Sat Mar 7 12:40:13 2020 +0100 + + flow_offload: check for basic action hw stats type + + Introduce flow_action_basic_hw_stats_types_check() helper and use it + in drivers. That sanitizes the drivers which do not have support + for action HW stats types. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 284a1df33bc7..c455f73cd54e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2868,6 +2868,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, + if (!flow_action_has_entries(flow_action)) + return -EINVAL; + ++ if (!flow_action_basic_hw_stats_types_check(flow_action, extack)) ++ return -EOPNOTSUPP; ++ + attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + + flow_action_for_each(i, act, flow_action) { +@@ -3319,6 +3322,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + if (!flow_action_has_entries(flow_action)) + return -EINVAL; + ++ if (!flow_action_basic_hw_stats_types_check(flow_action, extack)) ++ return -EOPNOTSUPP; ++ + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: +@@ -4118,6 +4124,9 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, + return -EOPNOTSUPP; + } + ++ if (!flow_action_basic_hw_stats_types_check(flow_action, extack)) ++ return -EOPNOTSUPP; ++ + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_POLICE: +-- +2.13.6 + diff --git a/SOURCES/0196-netdrv-net-mlx5-Fix-frequent-ioread-PCI-access-durin.patch b/SOURCES/0196-netdrv-net-mlx5-Fix-frequent-ioread-PCI-access-durin.patch new file mode 100644 index 0000000..e990a39 --- /dev/null +++ b/SOURCES/0196-netdrv-net-mlx5-Fix-frequent-ioread-PCI-access-durin.patch @@ -0,0 +1,85 @@ +From 5247dc98b37bb4fea63e99a2e43372f73f6ba4b8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:12 -0400 +Subject: [PATCH 196/312] [netdrv] net/mlx5: Fix frequent ioread PCI access + during recovery + +Message-id: <20200512105530.4207-107-ahleihel@redhat.com> +Patchwork-id: 306979 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 106/124] net/mlx5: Fix frequent ioread PCI access during recovery +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc2 + +commit 8c702a53bb0a79bfa203ba21ef1caba43673c5b7 +Author: Moshe Shemesh +Date: Mon Mar 30 10:21:49 2020 +0300 + + net/mlx5: Fix frequent ioread PCI access during recovery + + High frequency of PCI ioread calls during recovery flow may cause the + following trace on powerpc: + + [ 248.670288] EEH: 2100000 reads ignored for recovering device at + location=Slot1 driver=mlx5_core pci addr=0000:01:00.1 + [ 248.670331] EEH: Might be infinite loop in mlx5_core driver + [ 248.670361] CPU: 2 PID: 35247 Comm: kworker/u192:11 Kdump: loaded + Tainted: G OE ------------ 4.14.0-115.14.1.el7a.ppc64le #1 + [ 248.670425] Workqueue: mlx5_health0000:01:00.1 health_recover_work + [mlx5_core] + [ 248.670471] Call Trace: + [ 248.670492] [c00020391c11b960] [c000000000c217ac] dump_stack+0xb0/0xf4 + (unreliable) + [ 248.670548] [c00020391c11b9a0] [c000000000045818] + eeh_check_failure+0x5c8/0x630 + [ 248.670631] [c00020391c11ba50] [c00000000068fce4] + ioread32be+0x114/0x1c0 + [ 248.670692] [c00020391c11bac0] [c00800000dd8b400] + mlx5_error_sw_reset+0x160/0x510 [mlx5_core] + [ 248.670752] [c00020391c11bb60] [c00800000dd75824] + mlx5_disable_device+0x34/0x1d0 [mlx5_core] + [ 248.670822] [c00020391c11bbe0] [c00800000dd8affc] + health_recover_work+0x11c/0x3c0 [mlx5_core] + [ 248.670891] [c00020391c11bc80] [c000000000164fcc] + process_one_work+0x1bc/0x5f0 + [ 248.670955] [c00020391c11bd20] [c000000000167f8c] + worker_thread+0xac/0x6b0 + [ 248.671015] [c00020391c11bdc0] [c000000000171618] kthread+0x168/0x1b0 + [ 248.671067] [c00020391c11be30] [c00000000000b65c] + ret_from_kernel_thread+0x5c/0x80 + + Reduce the PCI ioread frequency during recovery by using msleep() + instead of cond_resched() + + Fixes: 3e5b72ac2f29 ("net/mlx5: Issue SW reset on FW assert") + Signed-off-by: Moshe Shemesh + Reviewed-by: Feras Daoud + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/health.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c +index d6b0a4ef9daf..fd985c3b4147 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c +@@ -243,7 +243,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev) + if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + break; + +- cond_resched(); ++ msleep(20); + } while (!time_after(jiffies, end)); + + if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { +-- +2.13.6 + diff --git a/SOURCES/0197-netdrv-net-mlx5e-Add-missing-release-firmware-call.patch b/SOURCES/0197-netdrv-net-mlx5e-Add-missing-release-firmware-call.patch new file mode 100644 index 0000000..29d90ca --- /dev/null +++ b/SOURCES/0197-netdrv-net-mlx5e-Add-missing-release-firmware-call.patch @@ -0,0 +1,55 @@ +From 134f8606ebca3094b8e2f3994f1b2797ae867749 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:13 -0400 +Subject: [PATCH 197/312] [netdrv] net/mlx5e: Add missing release firmware call + +Message-id: <20200512105530.4207-108-ahleihel@redhat.com> +Patchwork-id: 306978 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 107/124] net/mlx5e: Add missing release firmware call +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc2 + +commit d19987ccf57501894fdd8fadc2e55e4a3dd57239 +Author: Eran Ben Elisha +Date: Tue Mar 24 15:04:26 2020 +0200 + + net/mlx5e: Add missing release firmware call + + Once driver finishes flashing the firmware image, it should release it. + + Fixes: 9c8bca2637b8 ("mlx5: Move firmware flash implementation to devlink") + Signed-off-by: Eran Ben Elisha + Reviewed-by: Aya Levin + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +index 381925c90d94..d63ce3feb65c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +@@ -23,7 +23,10 @@ static int mlx5_devlink_flash_update(struct devlink *devlink, + if (err) + return err; + +- return mlx5_firmware_flash(dev, fw, extack); ++ err = mlx5_firmware_flash(dev, fw, extack); ++ release_firmware(fw); ++ ++ return err; + } + + static u8 mlx5_fw_ver_major(u32 version) +-- +2.13.6 + diff --git a/SOURCES/0198-netdrv-net-mlx5e-Fix-nest_level-for-vlan-pop-action.patch b/SOURCES/0198-netdrv-net-mlx5e-Fix-nest_level-for-vlan-pop-action.patch new file mode 100644 index 0000000..038e20e --- /dev/null +++ b/SOURCES/0198-netdrv-net-mlx5e-Fix-nest_level-for-vlan-pop-action.patch @@ -0,0 +1,65 @@ +From 4e68ad174a7731f7ecb6d6ee2d02af52f2f8eb80 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:14 -0400 +Subject: [PATCH 198/312] [netdrv] net/mlx5e: Fix nest_level for vlan pop + action + +Message-id: <20200512105530.4207-109-ahleihel@redhat.com> +Patchwork-id: 306980 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 108/124] net/mlx5e: Fix nest_level for vlan pop action +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc2 + +commit 70f478ca085deec4d6c1f187f773f5827ddce7e8 +Author: Dmytro Linkin +Date: Wed Apr 1 14:41:27 2020 +0300 + + net/mlx5e: Fix nest_level for vlan pop action + + Current value of nest_level, assigned from net_device lower_level value, + does not reflect the actual number of vlan headers, needed to pop. + For ex., if we have untagged ingress traffic sended over vlan devices, + instead of one pop action, driver will perform two pop actions. + To fix that, calculate nest_level as difference between vlan device and + parent device lower_levels. + + Fixes: f3b0a18bb6cb ("net: remove unnecessary variables and callback") + Signed-off-by: Dmytro Linkin + Signed-off-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index c455f73cd54e..3461aec49d9e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3257,12 +3257,13 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv, + struct mlx5_esw_flow_attr *attr, + u32 *action) + { +- int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev); + struct flow_action_entry vlan_act = { + .id = FLOW_ACTION_VLAN_POP, + }; +- int err = 0; ++ int nest_level, err = 0; + ++ nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev) - ++ vlan_get_encap_level(priv->netdev); + while (nest_level--) { + err = parse_tc_vlan_action(priv, &vlan_act, attr, action); + if (err) +-- +2.13.6 + diff --git a/SOURCES/0199-netdrv-net-mlx5e-Fix-pfnum-in-devlink-port-attribute.patch b/SOURCES/0199-netdrv-net-mlx5e-Fix-pfnum-in-devlink-port-attribute.patch new file mode 100644 index 0000000..0f9093b --- /dev/null +++ b/SOURCES/0199-netdrv-net-mlx5e-Fix-pfnum-in-devlink-port-attribute.patch @@ -0,0 +1,84 @@ +From 08d07de27cd1b9ec43e75242f71fa8468fb8b406 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:15 -0400 +Subject: [PATCH 199/312] [netdrv] net/mlx5e: Fix pfnum in devlink port + attribute + +Message-id: <20200512105530.4207-110-ahleihel@redhat.com> +Patchwork-id: 306981 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 109/124] net/mlx5e: Fix pfnum in devlink port attribute +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc2 + +commit 7482d9cb5b974b7ad1a58fa8714f7a8c05b5d278 +Author: Parav Pandit +Date: Fri Apr 3 03:57:30 2020 -0500 + + net/mlx5e: Fix pfnum in devlink port attribute + + Cited patch missed to extract PCI pf number accurately for PF and VF + port flavour. It considered PCI device + function number. + Due to this, device having non zero device number shown large pfnum. + + Hence, use only PCI function number; to avoid similar errors, derive + pfnum one time for all port flavours. + + Fixes: f60f315d339e ("net/mlx5e: Register devlink ports for physical link, PCI PF, VFs") + Reviewed-by: Jiri Pirko + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 02f1362a01ef..e8845a6121dd 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1935,29 +1935,30 @@ static int register_devlink_port(struct mlx5_core_dev *dev, + struct mlx5_eswitch_rep *rep = rpriv->rep; + struct netdev_phys_item_id ppid = {}; + unsigned int dl_port_index = 0; ++ u16 pfnum; + + if (!is_devlink_port_supported(dev, rpriv)) + return 0; + + mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); ++ pfnum = PCI_FUNC(dev->pdev->devfn); + + if (rep->vport == MLX5_VPORT_UPLINK) { + devlink_port_attrs_set(&rpriv->dl_port, + DEVLINK_PORT_FLAVOUR_PHYSICAL, +- PCI_FUNC(dev->pdev->devfn), false, 0, ++ pfnum, false, 0, + &ppid.id[0], ppid.id_len); + dl_port_index = vport_to_devlink_port_index(dev, rep->vport); + } else if (rep->vport == MLX5_VPORT_PF) { + devlink_port_attrs_pci_pf_set(&rpriv->dl_port, + &ppid.id[0], ppid.id_len, +- dev->pdev->devfn); ++ pfnum); + dl_port_index = rep->vport; + } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, + rpriv->rep->vport)) { + devlink_port_attrs_pci_vf_set(&rpriv->dl_port, + &ppid.id[0], ppid.id_len, +- dev->pdev->devfn, +- rep->vport - 1); ++ pfnum, rep->vport - 1); + dl_port_index = vport_to_devlink_port_index(dev, rep->vport); + } + +-- +2.13.6 + diff --git a/SOURCES/0200-netdrv-net-mlx5-Fix-failing-fw-tracer-allocation-on-.patch b/SOURCES/0200-netdrv-net-mlx5-Fix-failing-fw-tracer-allocation-on-.patch new file mode 100644 index 0000000..4bb305a --- /dev/null +++ b/SOURCES/0200-netdrv-net-mlx5-Fix-failing-fw-tracer-allocation-on-.patch @@ -0,0 +1,77 @@ +From 9fb3bab32b430a90670e6c65cc15070c54a9a2d5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:16 -0400 +Subject: [PATCH 200/312] [netdrv] net/mlx5: Fix failing fw tracer allocation + on s390 + +Message-id: <20200512105530.4207-111-ahleihel@redhat.com> +Patchwork-id: 306982 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 110/124] net/mlx5: Fix failing fw tracer allocation on s390 +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc3 + +commit a019b36123aec9700b21ae0724710f62928a8bc1 +Author: Niklas Schnelle +Date: Thu Apr 9 09:46:20 2020 +0200 + + net/mlx5: Fix failing fw tracer allocation on s390 + + On s390 FORCE_MAX_ZONEORDER is 9 instead of 11, thus a larger kzalloc() + allocation as done for the firmware tracer will always fail. + + Looking at mlx5_fw_tracer_save_trace(), it is actually the driver itself + that copies the debug data into the trace array and there is no need for + the allocation to be contiguous in physical memory. We can therefor use + kvzalloc() instead of kzalloc() and get rid of the large contiguous + allcoation. + + Fixes: f53aaa31cce7 ("net/mlx5: FW tracer, implement tracer logic") + Signed-off-by: Niklas Schnelle + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +index c9c9b479bda5..5ce6ebbc7f10 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +@@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) + return NULL; + } + +- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); ++ tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL); + if (!tracer) + return ERR_PTR(-ENOMEM); + +@@ -982,7 +982,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) + tracer->dev = NULL; + destroy_workqueue(tracer->work_queue); + free_tracer: +- kfree(tracer); ++ kvfree(tracer); + return ERR_PTR(err); + } + +@@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) + mlx5_fw_tracer_destroy_log_buf(tracer); + flush_workqueue(tracer->work_queue); + destroy_workqueue(tracer->work_queue); +- kfree(tracer); ++ kvfree(tracer); + } + + static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data) +-- +2.13.6 + diff --git a/SOURCES/0201-netdrv-net-mlx5e-Don-t-trigger-IRQ-multiple-times-on.patch b/SOURCES/0201-netdrv-net-mlx5e-Don-t-trigger-IRQ-multiple-times-on.patch new file mode 100644 index 0000000..eb812b1 --- /dev/null +++ b/SOURCES/0201-netdrv-net-mlx5e-Don-t-trigger-IRQ-multiple-times-on.patch @@ -0,0 +1,138 @@ +From aacfc2f99d074a34d4a246df7d775bd6e463cd8f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:17 -0400 +Subject: [PATCH 201/312] [netdrv] net/mlx5e: Don't trigger IRQ multiple times + on XSK wakeup to avoid WQ overruns + +Message-id: <20200512105530.4207-112-ahleihel@redhat.com> +Patchwork-id: 306983 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 111/124] net/mlx5e: Don't trigger IRQ multiple times on XSK wakeup to avoid WQ overruns +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc3 + +commit e7e0004abdd6f83ae4be5613b29ed396beff576c +Author: Maxim Mikityanskiy +Date: Tue Feb 11 16:02:35 2020 +0200 + + net/mlx5e: Don't trigger IRQ multiple times on XSK wakeup to avoid WQ overruns + + XSK wakeup function triggers NAPI by posting a NOP WQE to a special XSK + ICOSQ. When the application floods the driver with wakeup requests by + calling sendto() in a certain pattern that ends up in mlx5e_trigger_irq, + the XSK ICOSQ may overflow. + + Multiple NOPs are not required and won't accelerate the process, so + avoid posting a second NOP if there is one already on the way. This way + we also avoid increasing the queue size (which might not help anyway). + + Fixes: db05815b36cb ("net/mlx5e: Add XSK zero-copy support") + Signed-off-by: Maxim Mikityanskiy + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 ++- + drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | 3 +++ + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 8 +++++--- + drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 6 +++++- + 4 files changed, 15 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 58a7f28b146f..2e3a4ba96793 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -366,6 +366,7 @@ enum { + MLX5E_SQ_STATE_AM, + MLX5E_SQ_STATE_TLS, + MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, ++ MLX5E_SQ_STATE_PENDING_XSK_TX, + }; + + struct mlx5e_sq_wqe_info { +@@ -947,7 +948,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); + void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); + bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); +-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq); ++int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); + bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); + void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); + void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +index 03abb8cb96be..c054759ed7eb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +@@ -33,6 +33,9 @@ int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) + return 0; + ++ if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state)) ++ return 0; ++ + spin_lock(&c->xskicosq_lock); + mlx5e_trigger_irq(&c->xskicosq); + spin_unlock(&c->xskicosq_lock); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index b2109bbcb985..1d606e13a336 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -590,7 +590,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) + return !!err; + } + +-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) ++int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + { + struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); + struct mlx5_cqe64 *cqe; +@@ -598,11 +598,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + int i; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) +- return; ++ return 0; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (likely(!cqe)) +- return; ++ return 0; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur +@@ -651,6 +651,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) + sq->cc = sqcc; + + mlx5_cqwq_update_db_record(&cq->wq); ++ ++ return i; + } + + bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +index 5dcdd18143e6..333d813b0019 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +@@ -138,7 +138,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) + mlx5e_post_rx_wqes, + rq); + if (xsk_open) { +- mlx5e_poll_ico_cq(&c->xskicosq.cq); ++ if (mlx5e_poll_ico_cq(&c->xskicosq.cq)) ++ /* Don't clear the flag if nothing was polled to prevent ++ * queueing more WQEs and overflowing XSKICOSQ. ++ */ ++ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state); + busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); + busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); + } +-- +2.13.6 + diff --git a/SOURCES/0202-netdrv-net-mlx5e-Get-the-latest-values-from-counters.patch b/SOURCES/0202-netdrv-net-mlx5e-Get-the-latest-values-from-counters.patch new file mode 100644 index 0000000..2b9bf1a --- /dev/null +++ b/SOURCES/0202-netdrv-net-mlx5e-Get-the-latest-values-from-counters.patch @@ -0,0 +1,74 @@ +From 5391a9ae3e19dbb7bd9d314271ec510177f9d8ca Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:18 -0400 +Subject: [PATCH 202/312] [netdrv] net/mlx5e: Get the latest values from + counters in switchdev mode + +Message-id: <20200512105530.4207-113-ahleihel@redhat.com> +Patchwork-id: 306985 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 112/124] net/mlx5e: Get the latest values from counters in switchdev mode +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc3 + +commit dcdf4ce0ff4ba206fc362e149c8ae81d6a2f849c +Author: Zhu Yanjun +Date: Wed Apr 8 14:51:52 2020 +0800 + + net/mlx5e: Get the latest values from counters in switchdev mode + + In the switchdev mode, when running "cat + /sys/class/net/NIC/statistics/tx_packets", the ppcnt register is + accessed to get the latest values. But currently this command can + not get the correct values from ppcnt. + + From firmware manual, before getting the 802_3 counters, the 802_3 + data layout should be set to the ppcnt register. + + When the command "cat /sys/class/net/NIC/statistics/tx_packets" is + run, before updating 802_3 data layout with ppcnt register, the + monitor counters are tested. The test result will decide the + 802_3 data layout is updated or not. + + Actually the monitor counters do not support to monitor rx/tx + stats of 802_3 in switchdev mode. So the rx/tx counters change + will not trigger monitor counters. So the 802_3 data layout will + not be updated in ppcnt register. Finally this command can not get + the latest values from ppcnt register with 802_3 data layout. + + Fixes: 5c7e8bbb0257 ("net/mlx5e: Use monitor counters for update stats") + Signed-off-by: Zhu Yanjun + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 0d50ee4fd986..22298f67fbd2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -3584,7 +3584,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + +- if (!mlx5e_monitor_counter_supported(priv)) { ++ /* In switchdev mode, monitor counters doesn't monitor ++ * rx/tx stats of 802_3. The update stats mechanism ++ * should keep the 802_3 layout counters updated ++ */ ++ if (!mlx5e_monitor_counter_supported(priv) || ++ mlx5e_is_uplink_rep(priv)) { + /* update HW stats in background for next time */ + mlx5e_queue_update_stats(priv); + } +-- +2.13.6 + diff --git a/SOURCES/0203-netdrv-net-mlx5-DR-On-creation-set-CQ-s-arm_db-membe.patch b/SOURCES/0203-netdrv-net-mlx5-DR-On-creation-set-CQ-s-arm_db-membe.patch new file mode 100644 index 0000000..fbdda13 --- /dev/null +++ b/SOURCES/0203-netdrv-net-mlx5-DR-On-creation-set-CQ-s-arm_db-membe.patch @@ -0,0 +1,85 @@ +From c3ca971b1394fda6a35b50ea0af68cd66cd1e761 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:20 -0400 +Subject: [PATCH 203/312] [netdrv] net/mlx5: DR, On creation set CQ's arm_db + member to right value + +Message-id: <20200512105530.4207-115-ahleihel@redhat.com> +Patchwork-id: 306988 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 114/124] net/mlx5: DR, On creation set CQ's arm_db member to right value +Bugzilla: 1789384 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1789384 +Upstream: v5.7-rc5 + +commit 8075411d93b6efe143d9f606f6531077795b7fbf +Author: Erez Shitrit +Date: Wed Mar 25 17:19:43 2020 +0200 + + net/mlx5: DR, On creation set CQ's arm_db member to right value + + In polling mode, set arm_db member to a value that will avoid CQ + event recovery by the HW. + Otherwise we might get event without completion function. + In addition,empty completion function to was added to protect from + unexpected events. + + Fixes: 297cccebdc5a ("net/mlx5: DR, Expose an internal API to issue RDMA operations") + Signed-off-by: Erez Shitrit + Reviewed-by: Tariq Toukan + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c | 14 +++++++++++++- + 1 file changed, 13 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +index c0ab9cf74929..18719acb7e54 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +@@ -695,6 +695,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq, + pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); + } + ++static void dr_cq_complete(struct mlx5_core_cq *mcq, ++ struct mlx5_eqe *eqe) ++{ ++ pr_err("CQ completion CQ: #%u\n", mcq->cqn); ++} ++ + static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + struct mlx5_uars_page *uar, + size_t ncqe) +@@ -756,6 +762,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); + + cq->mcq.event = dr_cq_event; ++ cq->mcq.comp = dr_cq_complete; + + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); + kvfree(in); +@@ -767,7 +774,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, + cq->mcq.set_ci_db = cq->wq_ctrl.db.db; + cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; + *cq->mcq.set_ci_db = 0; +- *cq->mcq.arm_db = 0; ++ ++ /* set no-zero value, in order to avoid the HW to run db-recovery on ++ * CQ that used in polling mode. ++ */ ++ *cq->mcq.arm_db = cpu_to_be32(2 << 28); ++ + cq->mcq.vector = 0; + cq->mcq.irqn = irqn; + cq->mcq.uar = uar; +-- +2.13.6 + diff --git a/SOURCES/0204-netdrv-net-mlx5-Fix-forced-completion-access-non-ini.patch b/SOURCES/0204-netdrv-net-mlx5-Fix-forced-completion-access-non-ini.patch new file mode 100644 index 0000000..303836b --- /dev/null +++ b/SOURCES/0204-netdrv-net-mlx5-Fix-forced-completion-access-non-ini.patch @@ -0,0 +1,68 @@ +From 531a5454501692236d41d7fbf7932a4b66340b2f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:21 -0400 +Subject: [PATCH 204/312] [netdrv] net/mlx5: Fix forced completion access non + initialized command entry + +Message-id: <20200512105530.4207-116-ahleihel@redhat.com> +Patchwork-id: 306987 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 115/124] net/mlx5: Fix forced completion access non initialized command entry +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc5 + +commit f3cb3cebe26ed4c8036adbd9448b372129d3c371 +Author: Moshe Shemesh +Date: Sun Jul 21 08:40:13 2019 +0300 + + net/mlx5: Fix forced completion access non initialized command entry + + mlx5_cmd_flush() will trigger forced completions to all valid command + entries. Triggered by an asynch event such as fast teardown it can + happen at any stage of the command, including command initialization. + It will trigger forced completion and that can lead to completion on an + uninitialized command entry. + + Setting MLX5_CMD_ENT_STATE_PENDING_COMP only after command entry is + initialized will ensure force completion is treated only if command + entry is initialized. + + Fixes: 73dd3a4839c1 ("net/mlx5: Avoid using pending command interface slots") + Signed-off-by: Moshe Shemesh + Signed-off-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 71a52b890f38..59e38a6c4f52 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work) + } + + cmd->ent_arr[ent->idx] = ent; +- set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + lay = get_inst(cmd, ent->idx); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); +@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work) + + if (ent->callback) + schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); ++ set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); + + /* Skip sending command to fw if internal error */ + if (pci_channel_offline(dev->pdev) || +-- +2.13.6 + diff --git a/SOURCES/0205-netdrv-net-mlx5-Fix-command-entry-leak-in-Internal-E.patch b/SOURCES/0205-netdrv-net-mlx5-Fix-command-entry-leak-in-Internal-E.patch new file mode 100644 index 0000000..e795e7f --- /dev/null +++ b/SOURCES/0205-netdrv-net-mlx5-Fix-command-entry-leak-in-Internal-E.patch @@ -0,0 +1,58 @@ +From 1d288fe788bd17f3e56e4bccc7f0ef448284a5e8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:22 -0400 +Subject: [PATCH 205/312] [netdrv] net/mlx5: Fix command entry leak in Internal + Error State + +Message-id: <20200512105530.4207-117-ahleihel@redhat.com> +Patchwork-id: 306989 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 116/124] net/mlx5: Fix command entry leak in Internal Error State +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc5 + +commit cece6f432cca9f18900463ed01b97a152a03600a +Author: Moshe Shemesh +Date: Sun Feb 23 03:27:41 2020 +0200 + + net/mlx5: Fix command entry leak in Internal Error State + + Processing commands by cmd_work_handler() while already in Internal + Error State will result in entry leak, since the handler process force + completion without doorbell. Forced completion doesn't release the entry + and event completion will never arrive, so entry should be released. + + Fixes: 73dd3a4839c1 ("net/mlx5: Avoid using pending command interface slots") + Signed-off-by: Moshe Shemesh + Signed-off-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 59e38a6c4f52..23acec5a31d4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work) + MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); + + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); ++ /* no doorbell, no need to keep the entry */ ++ free_ent(cmd, ent->idx); ++ if (ent->callback) ++ free_cmd(ent); + return; + } + +-- +2.13.6 + diff --git a/SOURCES/0206-netdrv-net-mlx5e-Fix-q-counters-on-uplink-represento.patch b/SOURCES/0206-netdrv-net-mlx5e-Fix-q-counters-on-uplink-represento.patch new file mode 100644 index 0000000..c2f914e --- /dev/null +++ b/SOURCES/0206-netdrv-net-mlx5e-Fix-q-counters-on-uplink-represento.patch @@ -0,0 +1,67 @@ +From ab910fa64a2981ba829ba09faacecf273220aa45 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:23 -0400 +Subject: [PATCH 206/312] [netdrv] net/mlx5e: Fix q counters on uplink + representors + +Message-id: <20200512105530.4207-118-ahleihel@redhat.com> +Patchwork-id: 306990 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 117/124] net/mlx5e: Fix q counters on uplink representors +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: v5.7-rc5 + +commit 67b38de646894c9a94fe4d6d17719e70cc6028eb +Author: Roi Dayan +Date: Thu Apr 23 12:37:21 2020 +0300 + + net/mlx5e: Fix q counters on uplink representors + + Need to allocate the q counters before init_rx which needs them + when creating the rq. + + Fixes: 8520fa57a4e9 ("net/mlx5e: Create q counters on uplink representors") + Signed-off-by: Roi Dayan + Reviewed-by: Vlad Buslov + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 9 ++------- + 1 file changed, 2 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index e8845a6121dd..7ba784575a23 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1664,19 +1664,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) + + static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) + { +- int err = mlx5e_init_rep_rx(priv); +- +- if (err) +- return err; +- + mlx5e_create_q_counters(priv); +- return 0; ++ return mlx5e_init_rep_rx(priv); + } + + static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) + { +- mlx5e_destroy_q_counters(priv); + mlx5e_cleanup_rep_rx(priv); ++ mlx5e_destroy_q_counters(priv); + } + + static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) +-- +2.13.6 + diff --git a/SOURCES/0207-netdrv-net-mlx5e-en_accel-Add-missing-net-geneve.h-i.patch b/SOURCES/0207-netdrv-net-mlx5e-en_accel-Add-missing-net-geneve.h-i.patch new file mode 100644 index 0000000..8c3ccee --- /dev/null +++ b/SOURCES/0207-netdrv-net-mlx5e-en_accel-Add-missing-net-geneve.h-i.patch @@ -0,0 +1,59 @@ +From 0882db22b0ab0fbf192529ef76969bb691360529 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:24 -0400 +Subject: [PATCH 207/312] [netdrv] net/mlx5e: en_accel, Add missing + net/geneve.h include + +Message-id: <20200512105530.4207-119-ahleihel@redhat.com> +Patchwork-id: 306991 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 118/124] net/mlx5e: en_accel, Add missing net/geneve.h include +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: net-next/master + +commit 9425c595bd513948537ef355c07a65595dd2c771 +Author: Raed Salem +Date: Mon Jan 20 15:03:00 2020 +0200 + + net/mlx5e: en_accel, Add missing net/geneve.h include + + The cited commit relies on include being included + implicitly prior to include "en_accel/en_accel.h". + This mandates that all files that needs to include en_accel.h + to redantantly include net/geneve.h. + + Include net/geneve.h explicitly at "en_accel/en_accel.h" to avoid + undesired constrain as above. + + Fixes: e3cfc7e6b7bd ("net/mlx5e: TX, Add geneve tunnel stateless offload support") + Signed-off-by: Raed Salem + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +index 3022463f2284..a6f65d4b2f36 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +@@ -42,6 +42,8 @@ + #include "en/txrx.h" + + #if IS_ENABLED(CONFIG_GENEVE) ++#include ++ + static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) + { + return mlx5_tx_swp_supported(mdev); +-- +2.13.6 + diff --git a/SOURCES/0208-netdrv-net-mlx5e-Set-of-completion-request-bit-shoul.patch b/SOURCES/0208-netdrv-net-mlx5e-Set-of-completion-request-bit-shoul.patch new file mode 100644 index 0000000..5fb8f7a --- /dev/null +++ b/SOURCES/0208-netdrv-net-mlx5e-Set-of-completion-request-bit-shoul.patch @@ -0,0 +1,58 @@ +From 736ea388a8658ad5e46c95416c687e66edd66bbc Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:25 -0400 +Subject: [PATCH 208/312] [netdrv] net/mlx5e: Set of completion request bit + should not clear other adjacent bits + +Message-id: <20200512105530.4207-120-ahleihel@redhat.com> +Patchwork-id: 306992 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 119/124] net/mlx5e: Set of completion request bit should not clear other adjacent bits +Bugzilla: 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Upstream: net-next/master + +commit 82fe2996419830b0bb2c7e1f2fed2d3a8a1a65cd +Author: Tariq Toukan +Date: Tue Feb 18 12:27:25 2020 +0200 + + net/mlx5e: Set of completion request bit should not clear other adjacent bits + + In notify HW (ring doorbell) flow, we set the bit to request a completion + on the TX descriptor. + When doing so, we should not unset other bits in the same byte. + Currently, this does not fix a real issue, as we still don't have a flow + where both MLX5_WQE_CTRL_CQ_UPDATE and any adjacent bit are set together. + + Fixes: 542578c67936 ("net/mlx5e: Move helper functions to a new txrx datapath header") + Fixes: 864b2d715300 ("net/mlx5e: Generalize tx helper functions for different SQ types") + Signed-off-by: Tariq Toukan + Reviewed-by: Aya Levin + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +index f07b1399744e..9f6967d76053 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +@@ -102,7 +102,7 @@ static inline void + mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, + struct mlx5_wqe_ctrl_seg *ctrl) + { +- ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; ++ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + +-- +2.13.6 + diff --git a/SOURCES/0209-netdrv-mlx5-Update-list-of-unsupported-devices.patch b/SOURCES/0209-netdrv-mlx5-Update-list-of-unsupported-devices.patch new file mode 100644 index 0000000..b6c76ea --- /dev/null +++ b/SOURCES/0209-netdrv-mlx5-Update-list-of-unsupported-devices.patch @@ -0,0 +1,53 @@ +From a06969b83a8515b2d32d6d4e9d65555b75229b5f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:28 -0400 +Subject: [PATCH 209/312] [netdrv] mlx5: Update list of unsupported devices + +Message-id: <20200512105530.4207-123-ahleihel@redhat.com> +Patchwork-id: 306995 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 122/124] mlx5: Update list of unsupported devices +Bugzilla: 1823685 1789382 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1823685 +Upstream: RHEL-only + +Mark the new devices as unsupported so customers know that these devices +have not passed full RHEL testing. +Marking the devices as unsupported doesn't taint the kernel, which marking +them as tech-preview would do. +This enables the devices to go through qualification & certification +testing post RHEL-8.3 GA. + +The new devices are: + - ConnectX-6 LX + - ConnectX-7 + - BlueField-2 + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 05d66dd6791b..314d6d95cf76 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -738,6 +738,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) + static const struct pci_device_id mlx5_core_hw_unsupp_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ ++ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ ++ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ ++ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ + { 0, } + }; + +-- +2.13.6 + diff --git a/SOURCES/0210-netdrv-mlx5-Remove-the-unsupported-mark-from-Connect.patch b/SOURCES/0210-netdrv-mlx5-Remove-the-unsupported-mark-from-Connect.patch new file mode 100644 index 0000000..7acf676 --- /dev/null +++ b/SOURCES/0210-netdrv-mlx5-Remove-the-unsupported-mark-from-Connect.patch @@ -0,0 +1,44 @@ +From af84b7bad6e72a4f3f9647d5856b6f94277f35ba Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 12 May 2020 10:55:29 -0400 +Subject: [PATCH 210/312] [netdrv] mlx5: Remove the unsupported mark from + ConnectX-6 Dx device + +Message-id: <20200512105530.4207-124-ahleihel@redhat.com> +Patchwork-id: 306997 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1789382 123/124] mlx5: Remove the unsupported mark from ConnectX-6 Dx device +Bugzilla: 1789382 1782831 +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1789382 +Bugzilla: http://bugzilla.redhat.com/1782831 +Upstream: RHEL-only + +Now that ConnectX-6 Dx device is available and tested, we can remove the +unsupported mark from this devices. + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 314d6d95cf76..c835f029caf8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -736,8 +736,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) + + /* PCI table of mlx5 devices that are tech preview in RHEL */ + static const struct pci_device_id mlx5_core_hw_unsupp_pci_table[] = { +- { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ +- { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ +-- +2.13.6 + diff --git a/SOURCES/0211-netdrv-net-mlx5-TC-Offload-flow-table-rules.patch b/SOURCES/0211-netdrv-net-mlx5-TC-Offload-flow-table-rules.patch new file mode 100644 index 0000000..dec90b6 --- /dev/null +++ b/SOURCES/0211-netdrv-net-mlx5-TC-Offload-flow-table-rules.patch @@ -0,0 +1,215 @@ +From b48be0498952550c01fb5de0769c8f1be1ac612d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:32 -0400 +Subject: [PATCH 211/312] [netdrv] net/mlx5: TC: Offload flow table rules + +Message-id: <20200519074934.6303-2-ahleihel@redhat.com> +Patchwork-id: 310508 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 01/63] net/mlx5: TC: Offload flow table rules +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.5-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c + Small context diff due to already backported commit: + 554fe75c1b3f ("net/mlx5e: Avoid duplicating rule destinations") + ---> Different list of local variables in function parse_tc_fdb_actions. + +commit 84179981317fb4fb3e9df5acd42ea33cf6037793 +Author: Paul Blakey +Date: Tue Nov 12 00:34:30 2019 +0100 + + net/mlx5: TC: Offload flow table rules + + Since both tc rules and flow table rules are of the same format, + we can re-use tc parsing for that, and move the flow table rules + to their steering domain - In this case, the next chain after + max tc chain. + + Signed-off-by: Paul Blakey + Reviewed-by: Mark Bloch + Acked-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 45 ++++++++++++++++++++++-- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 28 ++++++++++++++- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 3 +- + 3 files changed, 71 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 7ba784575a23..d03d80e162df 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1220,21 +1220,60 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, + } + } + +-static LIST_HEAD(mlx5e_rep_block_cb_list); ++static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, ++ void *cb_priv) ++{ ++ struct flow_cls_offload *f = type_data; ++ struct flow_cls_offload cls_flower; ++ struct mlx5e_priv *priv = cb_priv; ++ struct mlx5_eswitch *esw; ++ unsigned long flags; ++ int err; ++ ++ flags = MLX5_TC_FLAG(INGRESS) | ++ MLX5_TC_FLAG(ESW_OFFLOAD) | ++ MLX5_TC_FLAG(FT_OFFLOAD); ++ esw = priv->mdev->priv.eswitch; + ++ switch (type) { ++ case TC_SETUP_CLSFLOWER: ++ if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index) ++ return -EOPNOTSUPP; ++ ++ /* Re-use tc offload path by moving the ft flow to the ++ * reserved ft chain. ++ */ ++ memcpy(&cls_flower, f, sizeof(*f)); ++ cls_flower.common.chain_index = FDB_FT_CHAIN; ++ err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags); ++ memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats)); ++ return err; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static LIST_HEAD(mlx5e_rep_block_tc_cb_list); ++static LIST_HEAD(mlx5e_rep_block_ft_cb_list); + static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct flow_block_offload *f = type_data; + ++ f->unlocked_driver_cb = true; ++ + switch (type) { + case TC_SETUP_BLOCK: +- f->unlocked_driver_cb = true; + return flow_block_cb_setup_simple(type_data, +- &mlx5e_rep_block_cb_list, ++ &mlx5e_rep_block_tc_cb_list, + mlx5e_rep_setup_tc_cb, + priv, priv, true); ++ case TC_SETUP_FT: ++ return flow_block_cb_setup_simple(type_data, ++ &mlx5e_rep_block_ft_cb_list, ++ mlx5e_rep_setup_ft_cb, ++ priv, priv, true); + default: + return -EOPNOTSUPP; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 3461aec49d9e..3df69f8ed58a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -74,6 +74,7 @@ enum { + MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, + MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, ++ MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, + MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, +@@ -276,6 +277,11 @@ static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) + return flow_flag_test(flow, ESWITCH); + } + ++static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow) ++{ ++ return flow_flag_test(flow, FT); ++} ++ + static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) + { + return flow_flag_test(flow, OFFLOADED); +@@ -1171,7 +1177,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + return -EOPNOTSUPP; + } + +- if (attr->chain > max_chain) { ++ /* We check chain range only for tc flows. ++ * For ft flows, we checked attr->chain was originally 0 and set it to ++ * FDB_FT_CHAIN which is outside tc range. ++ * See mlx5e_rep_setup_ft_cb(). ++ */ ++ if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { + NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); + return -EOPNOTSUPP; + } +@@ -3315,6 +3326,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct mlx5e_rep_priv *rpriv = priv->ppriv; + const struct ip_tunnel_info *info = NULL; + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; ++ bool ft_flow = mlx5e_is_ft_flow(flow); + const struct flow_action_entry *act; + int err, i, if_count = 0; + bool encap = false; +@@ -3362,6 +3374,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + return -EINVAL; + } + ++ if (ft_flow && out_dev == priv->netdev) { ++ /* Ignore forward to self rules generated ++ * by adding both mlx5 devs to the flow table ++ * block on a normal nft offload setup. ++ */ ++ return -EOPNOTSUPP; ++ } ++ + if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + NL_SET_ERR_MSG_MOD(extack, + "can't support more output ports, can't offload forwarding"); +@@ -3518,6 +3538,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + u32 dest_chain = act->chain_index; + u32 max_chain = mlx5_eswitch_get_chain_range(esw); + ++ if (ft_flow) { ++ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); ++ return -EOPNOTSUPP; ++ } + if (dest_chain <= attr->chain) { + NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported"); + return -EOPNOTSUPP; +@@ -3608,6 +3632,8 @@ static void get_flags(int flags, unsigned long *flow_flags) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); + if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); ++ if (flags & MLX5_TC_FLAG(FT_OFFLOAD)) ++ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT); + + *flow_flags = __flow_flags; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 924c6ef86a14..262cdb7b69b1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -44,7 +44,8 @@ enum { + MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, +- MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, ++ MLX5E_TC_FLAG_FT_OFFLOAD_BIT, ++ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT, + }; + + #define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT) +-- +2.13.6 + diff --git a/SOURCES/0212-netdrv-net-mlx5-ft-Use-getter-function-to-get-ft-cha.patch b/SOURCES/0212-netdrv-net-mlx5-ft-Use-getter-function-to-get-ft-cha.patch new file mode 100644 index 0000000..e92784f --- /dev/null +++ b/SOURCES/0212-netdrv-net-mlx5-ft-Use-getter-function-to-get-ft-cha.patch @@ -0,0 +1,97 @@ +From 23f752e697e5b1d68d0e2ccf5fc7679a62211fbd Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:33 -0400 +Subject: [PATCH 212/312] [netdrv] net/mlx5: ft: Use getter function to get ft + chain + +Message-id: <20200519074934.6303-3-ahleihel@redhat.com> +Patchwork-id: 310504 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 02/63] net/mlx5: ft: Use getter function to get ft chain +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.6-rc1 + +commit e66cbc961cfe375f977b3bfae13abec4df8c8521 +Author: Paul Blakey +Date: Tue Nov 26 14:13:42 2019 +0200 + + net/mlx5: ft: Use getter function to get ft chain + + FT chain is defined as the next chain after tc. + + To prepare for next patches that will increase the number of tc + chains available at runtime, use a getter function to get this + value. + + The define is still used in static fs_core allocation, + to calculate the number of chains. This static allocation + will be used if the relevant capabilities won't be available + to support dynamic chains. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 3 +++ + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 5 +++++ + 3 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index d03d80e162df..406fb642f2d4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1244,7 +1244,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, + * reserved ft chain. + */ + memcpy(&cls_flower, f, sizeof(*f)); +- cls_flower.common.chain_index = FDB_FT_CHAIN; ++ cls_flower.common.chain_index = mlx5_eswitch_get_ft_chain(esw); + err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags); + memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats)); + return err; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 14814f41346e..90995ab7e1a8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -387,6 +387,9 @@ mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw); + u32 + mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw); + ++unsigned int ++mlx5_eswitch_get_ft_chain(struct mlx5_eswitch *esw); ++ + struct mlx5_flow_handle * + mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, + struct mlx5_flow_destination *dest); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 9837baba3b8b..bd750f069e6c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -80,6 +80,11 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) + return 0; + } + ++u32 mlx5_eswitch_get_ft_chain(struct mlx5_eswitch *esw) ++{ ++ return mlx5_eswitch_get_chain_range(esw) + 1; ++} ++ + u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) + { + if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) +-- +2.13.6 + diff --git a/SOURCES/0213-netdrv-net-mlx5-ft-Check-prio-and-chain-sanity-for-f.patch b/SOURCES/0213-netdrv-net-mlx5-ft-Check-prio-and-chain-sanity-for-f.patch new file mode 100644 index 0000000..0c8e4dd --- /dev/null +++ b/SOURCES/0213-netdrv-net-mlx5-ft-Check-prio-and-chain-sanity-for-f.patch @@ -0,0 +1,96 @@ +From 54d02b39369e78ccdc913277f36d16b337d08437 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:34 -0400 +Subject: [PATCH 213/312] [netdrv] net/mlx5: ft: Check prio and chain sanity + for ft offload + +Message-id: <20200519074934.6303-4-ahleihel@redhat.com> +Patchwork-id: 310509 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 03/63] net/mlx5: ft: Check prio and chain sanity for ft offload +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.6-rc1 + +commit 82270e12544ee76ea9a3117a769a6d466a2e646b +Author: Paul Blakey +Date: Tue Nov 26 14:15:00 2019 +0200 + + net/mlx5: ft: Check prio and chain sanity for ft offload + + Before changing the chain from original chain to ft offload chain, + make sure user doesn't actually use chains. + + While here, normalize the prio range to that which we support. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 27 ++++++++++++++++++------ + 1 file changed, 20 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 406fb642f2d4..bde634ca85d0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1223,8 +1223,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, + static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) + { +- struct flow_cls_offload *f = type_data; +- struct flow_cls_offload cls_flower; ++ struct flow_cls_offload tmp, *f = type_data; + struct mlx5e_priv *priv = cb_priv; + struct mlx5_eswitch *esw; + unsigned long flags; +@@ -1237,16 +1236,30 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, + + switch (type) { + case TC_SETUP_CLSFLOWER: +- if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index) ++ memcpy(&tmp, f, sizeof(*f)); ++ ++ if (!mlx5_eswitch_prios_supported(esw) || ++ tmp.common.chain_index) + return -EOPNOTSUPP; + + /* Re-use tc offload path by moving the ft flow to the + * reserved ft chain. ++ * ++ * FT offload can use prio range [0, INT_MAX], so we ++ * normalize it to range [1, mlx5_eswitch_get_prio_range(esw)] ++ * as with tc, where prio 0 isn't supported. ++ * ++ * We only support chain 0 of FT offload. + */ +- memcpy(&cls_flower, f, sizeof(*f)); +- cls_flower.common.chain_index = mlx5_eswitch_get_ft_chain(esw); +- err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags); +- memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats)); ++ if (tmp.common.prio >= mlx5_eswitch_get_prio_range(esw)) ++ return -EOPNOTSUPP; ++ if (tmp.common.chain_index != 0) ++ return -EOPNOTSUPP; ++ ++ tmp.common.chain_index = mlx5_eswitch_get_ft_chain(esw); ++ tmp.common.prio++; ++ err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags); ++ memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); + return err; + default: + return -EOPNOTSUPP; +-- +2.13.6 + diff --git a/SOURCES/0214-netdrv-net-mlx5-E-Switch-Refactor-chains-and-priorit.patch b/SOURCES/0214-netdrv-net-mlx5-E-Switch-Refactor-chains-and-priorit.patch new file mode 100644 index 0000000..2b38b7a --- /dev/null +++ b/SOURCES/0214-netdrv-net-mlx5-E-Switch-Refactor-chains-and-priorit.patch @@ -0,0 +1,1357 @@ +From 73d0c14afb1f4a952800aeef0d20a0227dfd3d28 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:35 -0400 +Subject: [PATCH 214/312] [netdrv] net/mlx5: E-Switch, Refactor chains and + priorities + +Message-id: <20200519074934.6303-5-ahleihel@redhat.com> +Patchwork-id: 310505 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 04/63] net/mlx5: E-Switch, Refactor chains and priorities +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.6-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c + Minor context diff due to code that was previously changed by + this commit: + 93b8a7ecb728 ("net/mlx5: Fix lowest FDB pool size") + The same conflict was seen upstream in merge commit: + 4d8773b68e83 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net") + + - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c + The previous file had a conflict at the last value in array ESW_POOLS, + since the whole code moved to this file, make sure we don't lose the + correct value of that field. + +commit 39ac237ce00968545e7298faa9e07ecb7e440fb5 +Author: Paul Blakey +Date: Wed Jan 8 12:11:04 2020 +0200 + + net/mlx5: E-Switch, Refactor chains and priorities + + To support the entire chain and prio range (32bit + 16bit), + instead of a using a static array of chains/prios of limited size, create + them dynamically, and use a rhashtable to search for existing chains/prio + combinations. + + This will be used in next patch to actually increase the number using + unamanged tables support and ignore flow level capability. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 11 +- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 14 +- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 33 +- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 303 ++---------- + .../mellanox/mlx5/core/eswitch_offloads_chains.c | 542 +++++++++++++++++++++ + .../mellanox/mlx5/core/eswitch_offloads_chains.h | 27 + + 7 files changed, 646 insertions(+), 286 deletions(-) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index d14a13557c0c..eb3ce7912730 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -41,7 +41,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu + # Core extra + # + mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ +- ecpf.o rdma.o ++ ecpf.o rdma.o eswitch_offloads_chains.o + mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o + mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o + mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index bde634ca85d0..dcf97bd4fa49 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -41,6 +41,7 @@ + #include + + #include "eswitch.h" ++#include "eswitch_offloads_chains.h" + #include "en.h" + #include "en_rep.h" + #include "en_tc.h" +@@ -1238,25 +1239,25 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, + case TC_SETUP_CLSFLOWER: + memcpy(&tmp, f, sizeof(*f)); + +- if (!mlx5_eswitch_prios_supported(esw) || ++ if (!mlx5_esw_chains_prios_supported(esw) || + tmp.common.chain_index) + return -EOPNOTSUPP; + + /* Re-use tc offload path by moving the ft flow to the + * reserved ft chain. + * +- * FT offload can use prio range [0, INT_MAX], so we +- * normalize it to range [1, mlx5_eswitch_get_prio_range(esw)] ++ * FT offload can use prio range [0, INT_MAX], so we normalize ++ * it to range [1, mlx5_esw_chains_get_prio_range(esw)] + * as with tc, where prio 0 isn't supported. + * + * We only support chain 0 of FT offload. + */ +- if (tmp.common.prio >= mlx5_eswitch_get_prio_range(esw)) ++ if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw)) + return -EOPNOTSUPP; + if (tmp.common.chain_index != 0) + return -EOPNOTSUPP; + +- tmp.common.chain_index = mlx5_eswitch_get_ft_chain(esw); ++ tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw); + tmp.common.prio++; + err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags); + memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 3df69f8ed58a..24f7af806da2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -51,6 +51,7 @@ + #include "en_rep.h" + #include "en_tc.h" + #include "eswitch.h" ++#include "eswitch_offloads_chains.h" + #include "fs_core.h" + #include "en/port.h" + #include "en/tc_tun.h" +@@ -1083,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, + memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); + slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + slow_attr->split_count = 0; +- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; ++ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; + + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); + if (!IS_ERR(rule)) +@@ -1100,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, + memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); + slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + slow_attr->split_count = 0; +- slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN; ++ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; + mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); + flow_flag_clear(flow, SLOW); + } +@@ -1160,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + struct netlink_ext_ack *extack) + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; +- u32 max_chain = mlx5_eswitch_get_chain_range(esw); + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; +- u16 max_prio = mlx5_eswitch_get_prio_range(esw); + struct net_device *out_dev, *encap_dev = NULL; + struct mlx5_fc *counter = NULL; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *out_priv; + bool encap_valid = true; ++ u32 max_prio, max_chain; + int err = 0; + int out_index; + +- if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) { ++ if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) { + NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW"); + return -EOPNOTSUPP; + } +@@ -1182,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + * FDB_FT_CHAIN which is outside tc range. + * See mlx5e_rep_setup_ft_cb(). + */ ++ max_chain = mlx5_esw_chains_get_chain_range(esw); + if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { + NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); + return -EOPNOTSUPP; + } + ++ max_prio = mlx5_esw_chains_get_prio_range(esw); + if (attr->prio > max_prio) { + NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); + return -EOPNOTSUPP; +@@ -3536,7 +3538,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + break; + case FLOW_ACTION_GOTO: { + u32 dest_chain = act->chain_index; +- u32 max_chain = mlx5_eswitch_get_chain_range(esw); ++ u32 max_chain = mlx5_esw_chains_get_chain_range(esw); + + if (ft_flow) { + NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 90995ab7e1a8..dd7b9a96045c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -176,7 +176,10 @@ enum offloads_fdb_flags { + ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), + }; + ++#ifdef __GENKSYMS__ + extern const unsigned int ESW_POOLS[4]; ++#endif ++struct mlx5_esw_chains_priv; + + struct mlx5_eswitch_fdb { + union { +@@ -201,18 +204,18 @@ struct mlx5_eswitch_fdb { + struct mlx5_flow_handle *miss_rule_multi; + int vlan_push_pop_refcount; + ++#ifndef __GENKSYMS__ ++ struct mlx5_esw_chains_priv *esw_chains_priv; ++#else + struct { + struct mlx5_flow_table *fdb; + u32 num_rules; +-#ifndef __GENKSYMS__ +- } fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; +-#else + } fdb_prio[FDB_TC_MAX_CHAIN + 1][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO]; +-#endif + /* Protects fdb_prio table */ + struct mutex fdb_prio_lock; + + int fdb_left[ARRAY_SIZE(ESW_POOLS)]; ++#endif + } offloads; + }; + u32 flags; +@@ -378,18 +381,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule, + struct mlx5_esw_flow_attr *attr); + +-bool +-mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw); +- +-u16 +-mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw); +- +-u32 +-mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw); +- +-unsigned int +-mlx5_eswitch_get_ft_chain(struct mlx5_eswitch *esw); +- + struct mlx5_flow_handle * + mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, + struct mlx5_flow_destination *dest); +@@ -414,6 +405,11 @@ enum { + MLX5_ESW_DEST_ENCAP_VALID = BIT(1), + }; + ++enum { ++ MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), ++ MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), ++}; ++ + struct mlx5_esw_flow_attr { + struct mlx5_eswitch_rep *in_rep; + struct mlx5_core_dev *in_mdev; +@@ -427,7 +423,9 @@ struct mlx5_esw_flow_attr { + u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; + u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; + u8 total_vlan; ++#ifdef __GENKSYMS__ + bool vlan_handled; ++#endif + struct { + u32 flags; + struct mlx5_eswitch_rep *rep; +@@ -442,6 +440,9 @@ struct mlx5_esw_flow_attr { + u32 chain; + u16 prio; + u32 dest_chain; ++#ifndef __GENKSYMS__ ++ u32 flags; ++#endif + struct mlx5e_tc_flow_parse_attr *parse_attr; + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index bd750f069e6c..b8db12635730 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -37,6 +37,7 @@ + #include + #include "mlx5_core.h" + #include "eswitch.h" ++#include "eswitch_offloads_chains.h" + #include "rdma.h" + #include "en.h" + #include "fs_core.h" +@@ -47,10 +48,6 @@ + * one for multicast. + */ + #define MLX5_ESW_MISS_FLOWS (2) +- +-#define fdb_prio_table(esw, chain, prio, level) \ +- (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)] +- + #define UPLINK_REP_INDEX 0 + + static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, +@@ -62,37 +59,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, + return &esw->offloads.vport_reps[idx]; + } + +-static struct mlx5_flow_table * +-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); +-static void +-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); +- +-bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw) +-{ +- return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)); +-} +- +-u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) +-{ +- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) +- return FDB_TC_MAX_CHAIN; +- +- return 0; +-} +- +-u32 mlx5_eswitch_get_ft_chain(struct mlx5_eswitch *esw) +-{ +- return mlx5_eswitch_get_chain_range(esw) + 1; +-} +- +-u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) +-{ +- if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) +- return FDB_TC_MAX_PRIO; +- +- return 1; +-} +- + static bool + esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw, + const struct mlx5_vport *vport) +@@ -180,10 +146,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + } + + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { +- if (attr->dest_chain) { +- struct mlx5_flow_table *ft; ++ struct mlx5_flow_table *ft; + +- ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0); ++ if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { ++ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; ++ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; ++ dest[i].ft = esw->fdb_table.offloads.slow_fdb; ++ i++; ++ } else if (attr->dest_chain) { ++ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; ++ ft = mlx5_esw_chains_get_table(esw, attr->dest_chain, ++ 1, 0); + if (IS_ERR(ft)) { + rule = ERR_CAST(ft); + goto err_create_goto_table; +@@ -228,7 +201,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + flow_act.modify_hdr = attr->modify_hdr; + +- fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split); ++ fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, ++ !!split); + if (IS_ERR(fdb)) { + rule = ERR_CAST(fdb); + goto err_esw_get; +@@ -247,10 +221,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + return rule; + + err_add_rule: +- esw_put_prio_table(esw, attr->chain, attr->prio, !!split); ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split); + err_esw_get: +- if (attr->dest_chain) +- esw_put_prio_table(esw, attr->dest_chain, 1, 0); ++ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) ++ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); + err_create_goto_table: + return rule; + } +@@ -267,13 +241,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_handle *rule; + int i; + +- fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0); ++ fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0); + if (IS_ERR(fast_fdb)) { + rule = ERR_CAST(fast_fdb); + goto err_get_fast; + } + +- fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1); ++ fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1); + if (IS_ERR(fwd_fdb)) { + rule = ERR_CAST(fwd_fdb); + goto err_get_fwd; +@@ -310,9 +284,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + + return rule; + add_err: +- esw_put_prio_table(esw, attr->chain, attr->prio, 1); ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); + err_get_fwd: +- esw_put_prio_table(esw, attr->chain, attr->prio, 0); ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); + err_get_fast: + return rule; + } +@@ -337,12 +311,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, + atomic64_dec(&esw->offloads.num_flows); + + if (fwd_rule) { +- esw_put_prio_table(esw, attr->chain, attr->prio, 1); +- esw_put_prio_table(esw, attr->chain, attr->prio, 0); ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); + } else { +- esw_put_prio_table(esw, attr->chain, attr->prio, !!split); ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, ++ !!split); + if (attr->dest_chain) +- esw_put_prio_table(esw, attr->dest_chain, 1, 0); ++ mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); + } + } + +@@ -456,7 +431,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, + if (err) + goto unlock; + +- attr->vlan_handled = false; ++ attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; + + vport = esw_vlan_action_get_vport(attr, push, pop); + +@@ -464,7 +439,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, + /* tracks VF --> wire rules without vlan push action */ + if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { + vport->vlan_refcount++; +- attr->vlan_handled = true; ++ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; + } + + goto unlock; +@@ -495,7 +470,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, + } + out: + if (!err) +- attr->vlan_handled = true; ++ attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED; + unlock: + mutex_unlock(&esw->state_lock); + return err; +@@ -513,7 +488,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, + if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) + return 0; + +- if (!attr->vlan_handled) ++ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED)) + return 0; + + push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); +@@ -587,8 +562,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport, + dest.vport.num = vport; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + +- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, +- &flow_act, &dest, 1); ++ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, ++ spec, &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) + esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); + out: +@@ -829,8 +804,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) + dest.vport.num = esw->manager_vport; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + +- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, +- &flow_act, &dest, 1); ++ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, ++ spec, &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); +@@ -844,8 +819,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) + dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, + outer_headers.dmac_47_16); + dmac_v[0] = 0x01; +- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, +- &flow_act, &dest, 1); ++ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, ++ spec, &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); +@@ -860,175 +835,6 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) + return err; + } + +-#define ESW_OFFLOADS_NUM_GROUPS 4 +- +-/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), +- * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated +- * for each flow table pool. We can allocate up to 16M of each pool, +- * and we keep track of how much we used via put/get_sz_to_pool. +- * Firmware doesn't report any of this for now. +- * ESW_POOL is expected to be sorted from large to small +- */ +-#define ESW_SIZE (16 * 1024 * 1024) +-const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, +- 64 * 1024, 128 }; +- +-static int +-get_sz_from_pool(struct mlx5_eswitch *esw) +-{ +- int sz = 0, i; +- +- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { +- if (esw->fdb_table.offloads.fdb_left[i]) { +- --esw->fdb_table.offloads.fdb_left[i]; +- sz = ESW_POOLS[i]; +- break; +- } +- } +- +- return sz; +-} +- +-static void +-put_sz_to_pool(struct mlx5_eswitch *esw, int sz) +-{ +- int i; +- +- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { +- if (sz >= ESW_POOLS[i]) { +- ++esw->fdb_table.offloads.fdb_left[i]; +- break; +- } +- } +-} +- +-static struct mlx5_flow_table * +-create_next_size_table(struct mlx5_eswitch *esw, +- struct mlx5_flow_namespace *ns, +- u16 table_prio, +- int level, +- u32 flags) +-{ +- struct mlx5_flow_table_attr ft_attr = {}; +- struct mlx5_flow_table *fdb; +- int sz; +- +- sz = get_sz_from_pool(esw); +- if (!sz) +- return ERR_PTR(-ENOSPC); +- +- ft_attr.max_fte = sz; +- ft_attr.prio = table_prio; +- ft_attr.level = level; +- ft_attr.flags = flags; +- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; +- fdb = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); +- if (IS_ERR(fdb)) { +- esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n", +- (int)PTR_ERR(fdb), table_prio, level, sz); +- put_sz_to_pool(esw, sz); +- } +- +- return fdb; +-} +- +-static struct mlx5_flow_table * +-esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) +-{ +- struct mlx5_core_dev *dev = esw->dev; +- struct mlx5_flow_table *fdb = NULL; +- struct mlx5_flow_namespace *ns; +- int table_prio, l = 0; +- u32 flags = 0; +- +- if (chain == FDB_TC_SLOW_PATH_CHAIN) +- return esw->fdb_table.offloads.slow_fdb; +- +- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); +- +- fdb = fdb_prio_table(esw, chain, prio, level).fdb; +- if (fdb) { +- /* take ref on earlier levels as well */ +- while (level >= 0) +- fdb_prio_table(esw, chain, prio, level--).num_rules++; +- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); +- return fdb; +- } +- +- ns = mlx5_get_fdb_sub_ns(dev, chain); +- if (!ns) { +- esw_warn(dev, "Failed to get FDB sub namespace\n"); +- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); +- return ERR_PTR(-EOPNOTSUPP); +- } +- +- if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) +- flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | +- MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); +- +- table_prio = prio - 1; +- +- /* create earlier levels for correct fs_core lookup when +- * connecting tables +- */ +- for (l = 0; l <= level; l++) { +- if (fdb_prio_table(esw, chain, prio, l).fdb) { +- fdb_prio_table(esw, chain, prio, l).num_rules++; +- continue; +- } +- +- fdb = create_next_size_table(esw, ns, table_prio, l, flags); +- if (IS_ERR(fdb)) { +- l--; +- goto err_create_fdb; +- } +- +- fdb_prio_table(esw, chain, prio, l).fdb = fdb; +- fdb_prio_table(esw, chain, prio, l).num_rules = 1; +- } +- +- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); +- return fdb; +- +-err_create_fdb: +- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); +- if (l >= 0) +- esw_put_prio_table(esw, chain, prio, l); +- +- return fdb; +-} +- +-static void +-esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) +-{ +- int l; +- +- if (chain == FDB_TC_SLOW_PATH_CHAIN) +- return; +- +- mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); +- +- for (l = level; l >= 0; l--) { +- if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0) +- continue; +- +- put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte); +- mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb); +- fdb_prio_table(esw, chain, prio, l).fdb = NULL; +- } +- +- mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); +-} +- +-static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw) +-{ +- /* If lazy creation isn't supported, deref the fast path tables */ +- if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) { +- esw_put_prio_table(esw, 0, 1, 1); +- esw_put_prio_table(esw, 0, 1, 0); +- } +-} +- + #define MAX_PF_SQ 256 + #define MAX_SQ_NVPORTS 32 + +@@ -1061,16 +867,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; +- u32 *flow_group_in, max_flow_counter; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb = NULL; +- int table_size, ix, err = 0, i; ++ u32 flags = 0, *flow_group_in; ++ int table_size, ix, err = 0; + struct mlx5_flow_group *g; +- u32 flags = 0, fdb_max; + void *match_criteria; + u8 *dmac; + + esw_debug(esw->dev, "Create offloads FDB Tables\n"); ++ + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; +@@ -1089,19 +895,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) + goto ns_err; + } + +- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | +- MLX5_CAP_GEN(dev, max_flow_counter_15_0); +- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); +- +- esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n", +- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), +- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, +- fdb_max); +- +- for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) +- esw->fdb_table.offloads.fdb_left[i] = +- ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; +- + table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + + MLX5_ESW_MISS_FLOWS + esw->total_vports; + +@@ -1124,16 +917,10 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) + } + esw->fdb_table.offloads.slow_fdb = fdb; + +- /* If lazy creation isn't supported, open the fast path tables now */ +- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) && +- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { +- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; +- esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n"); +- esw_get_prio_table(esw, 0, 1, 0); +- esw_get_prio_table(esw, 0, 1, 1); +- } else { +- esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n"); +- esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; ++ err = mlx5_esw_chains_create(esw); ++ if (err) { ++ esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); ++ goto fdb_chains_err; + } + + /* create send-to-vport group */ +@@ -1225,7 +1012,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) + peer_miss_err: + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); + send_vport_err: +- esw_destroy_offloads_fast_fdb_tables(esw); ++ mlx5_esw_chains_destroy(esw); ++fdb_chains_err: + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); + slow_fdb_err: + /* Holds true only as long as DMFS is the default */ +@@ -1247,8 +1035,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); + mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); + ++ mlx5_esw_chains_destroy(esw); + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); +- esw_destroy_offloads_fast_fdb_tables(esw); + /* Holds true only as long as DMFS is the default */ + mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, + MLX5_FLOW_STEERING_MODE_DMFS); +@@ -2132,7 +1920,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); + + memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); +- mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); + + err = esw_create_uplink_offloads_acl_tables(esw); + if (err) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +new file mode 100644 +index 000000000000..589b94df252a +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -0,0 +1,542 @@ ++// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB ++// Copyright (c) 2020 Mellanox Technologies. ++ ++#include ++#include ++#include ++ ++#include "eswitch_offloads_chains.h" ++#include "mlx5_core.h" ++#include "fs_core.h" ++#include "eswitch.h" ++#include "en.h" ++ ++#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv) ++#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock) ++#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht) ++#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht) ++#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left) ++ ++#define ESW_OFFLOADS_NUM_GROUPS 4 ++ ++/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), ++ * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated ++ * for each flow table pool. We can allocate up to 16M of each pool, ++ * and we keep track of how much we used via get_next_avail_sz_from_pool. ++ * Firmware doesn't report any of this for now. ++ * ESW_POOL is expected to be sorted from large to small and match firmware ++ * pools. ++ */ ++#define ESW_SIZE (16 * 1024 * 1024) ++const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, ++ 1 * 1024 * 1024, ++ 64 * 1024, ++ 128 }; ++ ++struct mlx5_esw_chains_priv { ++ struct rhashtable chains_ht; ++ struct rhashtable prios_ht; ++ /* Protects above chains_ht and prios_ht */ ++ struct mutex lock; ++ ++ int fdb_left[ARRAY_SIZE(ESW_POOLS)]; ++}; ++ ++struct fdb_chain { ++ struct rhash_head node; ++ ++ u32 chain; ++ ++ int ref; ++ ++ struct mlx5_eswitch *esw; ++}; ++ ++struct fdb_prio_key { ++ u32 chain; ++ u32 prio; ++ u32 level; ++}; ++ ++struct fdb_prio { ++ struct rhash_head node; ++ ++ struct fdb_prio_key key; ++ ++ int ref; ++ ++ struct fdb_chain *fdb_chain; ++ struct mlx5_flow_table *fdb; ++}; ++ ++static const struct rhashtable_params chain_params = { ++ .head_offset = offsetof(struct fdb_chain, node), ++ .key_offset = offsetof(struct fdb_chain, chain), ++ .key_len = sizeof_field(struct fdb_chain, chain), ++ .automatic_shrinking = true, ++}; ++ ++static const struct rhashtable_params prio_params = { ++ .head_offset = offsetof(struct fdb_prio, node), ++ .key_offset = offsetof(struct fdb_prio, key), ++ .key_len = sizeof_field(struct fdb_prio, key), ++ .automatic_shrinking = true, ++}; ++ ++bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw) ++{ ++ return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; ++} ++ ++u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) ++{ ++ if (!mlx5_esw_chains_prios_supported(esw)) ++ return 1; ++ ++ return FDB_TC_MAX_CHAIN; ++} ++ ++u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw) ++{ ++ return mlx5_esw_chains_get_chain_range(esw) + 1; ++} ++ ++u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw) ++{ ++ if (!mlx5_esw_chains_prios_supported(esw)) ++ return 1; ++ ++ return FDB_TC_MAX_PRIO; ++} ++ ++static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw) ++{ ++ return FDB_TC_LEVELS_PER_PRIO; ++} ++ ++#define POOL_NEXT_SIZE 0 ++static int ++mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw, ++ int desired_size) ++{ ++ int i, found_i = -1; ++ ++ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) { ++ if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) { ++ found_i = i; ++ if (desired_size != POOL_NEXT_SIZE) ++ break; ++ } ++ } ++ ++ if (found_i != -1) { ++ --fdb_pool_left(esw)[found_i]; ++ return ESW_POOLS[found_i]; ++ } ++ ++ return 0; ++} ++ ++static void ++mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz) ++{ ++ int i; ++ ++ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) { ++ if (sz == ESW_POOLS[i]) { ++ ++fdb_pool_left(esw)[i]; ++ return; ++ } ++ } ++ ++ WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz); ++} ++ ++static void ++mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw) ++{ ++ u32 fdb_max; ++ int i; ++ ++ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size); ++ ++ for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) ++ fdb_pool_left(esw)[i] = ++ ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; ++} ++ ++static struct mlx5_flow_table * ++mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw, ++ u32 chain, u32 prio, u32 level) ++{ ++ struct mlx5_flow_table_attr ft_attr = {}; ++ struct mlx5_flow_namespace *ns; ++ struct mlx5_flow_table *fdb; ++ int sz; ++ ++ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) ++ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | ++ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); ++ ++ sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE); ++ if (!sz) ++ return ERR_PTR(-ENOSPC); ++ ++ ft_attr.max_fte = sz; ++ ft_attr.level = level; ++ ft_attr.prio = prio - 1; ++ ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; ++ ns = mlx5_get_fdb_sub_ns(esw->dev, chain); ++ ++ fdb = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); ++ if (IS_ERR(fdb)) { ++ esw_warn(esw->dev, ++ "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n", ++ (int)PTR_ERR(fdb), chain, prio, level, sz); ++ mlx5_esw_chains_put_sz_to_pool(esw, sz); ++ return fdb; ++ } ++ ++ return fdb; ++} ++ ++static void ++mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw, ++ struct mlx5_flow_table *fdb) ++{ ++ mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte); ++ mlx5_destroy_flow_table(fdb); ++} ++ ++static struct fdb_chain * ++mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) ++{ ++ struct fdb_chain *fdb_chain = NULL; ++ int err; ++ ++ fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL); ++ if (!fdb_chain) ++ return ERR_PTR(-ENOMEM); ++ ++ fdb_chain->esw = esw; ++ fdb_chain->chain = chain; ++ ++ err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node, ++ chain_params); ++ if (err) ++ goto err_insert; ++ ++ return fdb_chain; ++ ++err_insert: ++ kvfree(fdb_chain); ++ return ERR_PTR(err); ++} ++ ++static void ++mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain) ++{ ++ struct mlx5_eswitch *esw = fdb_chain->esw; ++ ++ rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node, ++ chain_params); ++ kvfree(fdb_chain); ++} ++ ++static struct fdb_chain * ++mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain) ++{ ++ struct fdb_chain *fdb_chain; ++ ++ fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain, ++ chain_params); ++ if (!fdb_chain) { ++ fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain); ++ if (IS_ERR(fdb_chain)) ++ return fdb_chain; ++ } ++ ++ fdb_chain->ref++; ++ ++ return fdb_chain; ++} ++ ++static void ++mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain) ++{ ++ if (--fdb_chain->ref == 0) ++ mlx5_esw_chains_destroy_fdb_chain(fdb_chain); ++} ++ ++static struct fdb_prio * ++mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, ++ u32 chain, u32 prio, u32 level) ++{ ++ struct fdb_prio *fdb_prio = NULL; ++ struct fdb_chain *fdb_chain; ++ struct mlx5_flow_table *fdb; ++ int err; ++ ++ fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain); ++ if (IS_ERR(fdb_chain)) ++ return ERR_CAST(fdb_chain); ++ ++ fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL); ++ if (!fdb_prio) { ++ err = -ENOMEM; ++ goto err_alloc; ++ } ++ ++ fdb = mlx5_esw_chains_create_fdb_table(esw, fdb_chain->chain, prio, ++ level); ++ if (IS_ERR(fdb)) { ++ err = PTR_ERR(fdb); ++ goto err_create; ++ } ++ ++ fdb_prio->fdb_chain = fdb_chain; ++ fdb_prio->key.chain = chain; ++ fdb_prio->key.prio = prio; ++ fdb_prio->key.level = level; ++ fdb_prio->fdb = fdb; ++ ++ err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node, ++ prio_params); ++ if (err) ++ goto err_insert; ++ ++ return fdb_prio; ++ ++err_insert: ++ mlx5_esw_chains_destroy_fdb_table(esw, fdb); ++err_create: ++ kvfree(fdb_prio); ++err_alloc: ++ mlx5_esw_chains_put_fdb_chain(fdb_chain); ++ return ERR_PTR(err); ++} ++ ++static void ++mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw, ++ struct fdb_prio *fdb_prio) ++{ ++ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain; ++ ++ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node, ++ prio_params); ++ mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb); ++ mlx5_esw_chains_put_fdb_chain(fdb_chain); ++ kvfree(fdb_prio); ++} ++ ++struct mlx5_flow_table * ++mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, ++ u32 level) ++{ ++ struct mlx5_flow_table *prev_fts; ++ struct fdb_prio *fdb_prio; ++ struct fdb_prio_key key; ++ int l = 0; ++ ++ if ((chain > mlx5_esw_chains_get_chain_range(esw) && ++ chain != mlx5_esw_chains_get_ft_chain(esw)) || ++ prio > mlx5_esw_chains_get_prio_range(esw) || ++ level > mlx5_esw_chains_get_level_range(esw)) ++ return ERR_PTR(-EOPNOTSUPP); ++ ++ /* create earlier levels for correct fs_core lookup when ++ * connecting tables. ++ */ ++ for (l = 0; l < level; l++) { ++ prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l); ++ if (IS_ERR(prev_fts)) { ++ fdb_prio = ERR_CAST(prev_fts); ++ goto err_get_prevs; ++ } ++ } ++ ++ key.chain = chain; ++ key.prio = prio; ++ key.level = level; ++ ++ mutex_lock(&esw_chains_lock(esw)); ++ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key, ++ prio_params); ++ if (!fdb_prio) { ++ fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain, ++ prio, level); ++ if (IS_ERR(fdb_prio)) ++ goto err_create_prio; ++ } ++ ++ ++fdb_prio->ref; ++ mutex_unlock(&esw_chains_lock(esw)); ++ ++ return fdb_prio->fdb; ++ ++err_create_prio: ++ mutex_unlock(&esw_chains_lock(esw)); ++err_get_prevs: ++ while (--l >= 0) ++ mlx5_esw_chains_put_table(esw, chain, prio, l); ++ return ERR_CAST(fdb_prio); ++} ++ ++void ++mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, ++ u32 level) ++{ ++ struct fdb_prio *fdb_prio; ++ struct fdb_prio_key key; ++ ++ key.chain = chain; ++ key.prio = prio; ++ key.level = level; ++ ++ mutex_lock(&esw_chains_lock(esw)); ++ fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key, ++ prio_params); ++ if (!fdb_prio) ++ goto err_get_prio; ++ ++ if (--fdb_prio->ref == 0) ++ mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio); ++ mutex_unlock(&esw_chains_lock(esw)); ++ ++ while (level-- > 0) ++ mlx5_esw_chains_put_table(esw, chain, prio, level); ++ ++ return; ++ ++err_get_prio: ++ mutex_unlock(&esw_chains_lock(esw)); ++ WARN_ONCE(1, ++ "Couldn't find table: (chain: %d prio: %d level: %d)", ++ chain, prio, level); ++} ++ ++static int ++mlx5_esw_chains_init(struct mlx5_eswitch *esw) ++{ ++ struct mlx5_esw_chains_priv *chains_priv; ++ struct mlx5_core_dev *dev = esw->dev; ++ u32 max_flow_counter, fdb_max; ++ int err; ++ ++ chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL); ++ if (!chains_priv) ++ return -ENOMEM; ++ esw_chains_priv(esw) = chains_priv; ++ ++ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | ++ MLX5_CAP_GEN(dev, max_flow_counter_15_0); ++ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); ++ ++ esw_debug(dev, ++ "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n", ++ max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max); ++ ++ mlx5_esw_chains_init_sz_pool(esw); ++ ++ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) && ++ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { ++ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; ++ esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); ++ } else { ++ esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; ++ esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n", ++ mlx5_esw_chains_get_chain_range(esw), ++ mlx5_esw_chains_get_prio_range(esw)); ++ } ++ ++ err = rhashtable_init(&esw_chains_ht(esw), &chain_params); ++ if (err) ++ goto init_chains_ht_err; ++ ++ err = rhashtable_init(&esw_prios_ht(esw), &prio_params); ++ if (err) ++ goto init_prios_ht_err; ++ ++ mutex_init(&esw_chains_lock(esw)); ++ ++ return 0; ++ ++init_prios_ht_err: ++ rhashtable_destroy(&esw_chains_ht(esw)); ++init_chains_ht_err: ++ kfree(chains_priv); ++ return err; ++} ++ ++static void ++mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw) ++{ ++ mutex_destroy(&esw_chains_lock(esw)); ++ rhashtable_destroy(&esw_prios_ht(esw)); ++ rhashtable_destroy(&esw_chains_ht(esw)); ++ ++ kfree(esw_chains_priv(esw)); ++} ++ ++static int ++mlx5_esw_chains_open(struct mlx5_eswitch *esw) ++{ ++ struct mlx5_flow_table *ft; ++ int err; ++ ++ /* Always open the root for fast path */ ++ ft = mlx5_esw_chains_get_table(esw, 0, 1, 0); ++ if (IS_ERR(ft)) ++ return PTR_ERR(ft); ++ ++ /* Open level 1 for split rules now if prios isn't supported */ ++ if (!mlx5_esw_chains_prios_supported(esw)) { ++ ft = mlx5_esw_chains_get_table(esw, 0, 1, 1); ++ ++ if (IS_ERR(ft)) { ++ err = PTR_ERR(ft); ++ goto level_1_err; ++ } ++ } ++ ++ return 0; ++ ++level_1_err: ++ mlx5_esw_chains_put_table(esw, 0, 1, 0); ++ return err; ++} ++ ++static void ++mlx5_esw_chains_close(struct mlx5_eswitch *esw) ++{ ++ if (!mlx5_esw_chains_prios_supported(esw)) ++ mlx5_esw_chains_put_table(esw, 0, 1, 1); ++ mlx5_esw_chains_put_table(esw, 0, 1, 0); ++} ++ ++int ++mlx5_esw_chains_create(struct mlx5_eswitch *esw) ++{ ++ int err; ++ ++ err = mlx5_esw_chains_init(esw); ++ if (err) ++ return err; ++ ++ err = mlx5_esw_chains_open(esw); ++ if (err) ++ goto err_open; ++ ++ return 0; ++ ++err_open: ++ mlx5_esw_chains_cleanup(esw); ++ return err; ++} ++ ++void ++mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) ++{ ++ mlx5_esw_chains_close(esw); ++ mlx5_esw_chains_cleanup(esw); ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +new file mode 100644 +index 000000000000..52fadacab84d +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +@@ -0,0 +1,27 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ ++/* Copyright (c) 2020 Mellanox Technologies. */ ++ ++#ifndef __ML5_ESW_CHAINS_H__ ++#define __ML5_ESW_CHAINS_H__ ++ ++bool ++mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw); ++u32 ++mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw); ++u32 ++mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw); ++u32 ++mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw); ++ ++struct mlx5_flow_table * ++mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, ++ u32 level); ++void ++mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, ++ u32 level); ++ ++int mlx5_esw_chains_create(struct mlx5_eswitch *esw); ++void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); ++ ++#endif /* __ML5_ESW_CHAINS_H__ */ ++ +-- +2.13.6 + diff --git a/SOURCES/0215-netdrv-net-mlx5-E-Switch-Increase-number-of-chains-a.patch b/SOURCES/0215-netdrv-net-mlx5-E-Switch-Increase-number-of-chains-a.patch new file mode 100644 index 0000000..2e484a9 --- /dev/null +++ b/SOURCES/0215-netdrv-net-mlx5-E-Switch-Increase-number-of-chains-a.patch @@ -0,0 +1,488 @@ +From e4bdcb77308e795a58ba030a7c27d11a62e98515 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:36 -0400 +Subject: [PATCH 215/312] [netdrv] net/mlx5: E-Switch, Increase number of + chains and priorities + +Message-id: <20200519074934.6303-6-ahleihel@redhat.com> +Patchwork-id: 310507 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 05/63] net/mlx5: E-Switch, Increase number of chains and priorities +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.6-rc1 + +commit 278d51f24330718aefd7fe86996a6da66fd345e7 +Author: Paul Blakey +Date: Wed Nov 20 15:06:19 2019 +0200 + + net/mlx5: E-Switch, Increase number of chains and priorities + + Increase the number of chains and priorities to support + the whole range available in tc. + + We use unmanaged tables and ignore flow level to create more + tables than what we declared to fs_core steering, and we manage + the connections between the tables themselves. + + To support that we need FW with ignore_flow_level capability. + Otherwise the old behaviour will be used, where we are limited + by the number of levels we declared (4 chains, 16 prios). + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 3 +- + .../mellanox/mlx5/core/eswitch_offloads_chains.c | 238 ++++++++++++++++++++- + .../mellanox/mlx5/core/eswitch_offloads_chains.h | 3 + + 3 files changed, 232 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index b8db12635730..7c33ce7ec074 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -151,7 +151,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; +- dest[i].ft = esw->fdb_table.offloads.slow_fdb; ++ dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw); + i++; + } else if (attr->dest_chain) { + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; +@@ -275,6 +275,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + if (attr->outer_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + ++ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); + + if (IS_ERR(rule)) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index 589b94df252a..d569969afd9d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -16,6 +16,10 @@ + #define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht) + #define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht) + #define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left) ++#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb) ++#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb) ++#define fdb_ignore_flow_level_supported(esw) \ ++ (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level)) + + #define ESW_OFFLOADS_NUM_GROUPS 4 + +@@ -39,6 +43,8 @@ struct mlx5_esw_chains_priv { + /* Protects above chains_ht and prios_ht */ + struct mutex lock; + ++ struct mlx5_flow_table *tc_end_fdb; ++ + int fdb_left[ARRAY_SIZE(ESW_POOLS)]; + }; + +@@ -50,6 +56,7 @@ struct fdb_chain { + int ref; + + struct mlx5_eswitch *esw; ++ struct list_head prios_list; + }; + + struct fdb_prio_key { +@@ -60,6 +67,7 @@ struct fdb_prio_key { + + struct fdb_prio { + struct rhash_head node; ++ struct list_head list; + + struct fdb_prio_key key; + +@@ -67,6 +75,9 @@ struct fdb_prio { + + struct fdb_chain *fdb_chain; + struct mlx5_flow_table *fdb; ++ struct mlx5_flow_table *next_fdb; ++ struct mlx5_flow_group *miss_group; ++ struct mlx5_flow_handle *miss_rule; + }; + + static const struct rhashtable_params chain_params = { +@@ -93,6 +104,9 @@ u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) + if (!mlx5_esw_chains_prios_supported(esw)) + return 1; + ++ if (fdb_ignore_flow_level_supported(esw)) ++ return UINT_MAX - 1; ++ + return FDB_TC_MAX_CHAIN; + } + +@@ -106,11 +120,17 @@ u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw) + if (!mlx5_esw_chains_prios_supported(esw)) + return 1; + ++ if (fdb_ignore_flow_level_supported(esw)) ++ return UINT_MAX; ++ + return FDB_TC_MAX_PRIO; + } + + static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw) + { ++ if (fdb_ignore_flow_level_supported(esw)) ++ return UINT_MAX; ++ + return FDB_TC_LEVELS_PER_PRIO; + } + +@@ -181,13 +201,40 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw, + sz = mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE); + if (!sz) + return ERR_PTR(-ENOSPC); +- + ft_attr.max_fte = sz; +- ft_attr.level = level; +- ft_attr.prio = prio - 1; +- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; +- ns = mlx5_get_fdb_sub_ns(esw->dev, chain); + ++ /* We use tc_slow_fdb(esw) as the table's next_ft till ++ * ignore_flow_level is allowed on FT creation and not just for FTEs. ++ * Instead caller should add an explicit miss rule if needed. ++ */ ++ ft_attr.next_ft = tc_slow_fdb(esw); ++ ++ /* The root table(chain 0, prio 1, level 0) is required to be ++ * connected to the previous prio (FDB_BYPASS_PATH if exists). ++ * We always create it, as a managed table, in order to align with ++ * fs_core logic. ++ */ ++ if (!fdb_ignore_flow_level_supported(esw) || ++ (chain == 0 && prio == 1 && level == 0)) { ++ ft_attr.level = level; ++ ft_attr.prio = prio - 1; ++ ns = mlx5_get_fdb_sub_ns(esw->dev, chain); ++ } else { ++ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED; ++ ft_attr.prio = FDB_TC_OFFLOAD; ++ /* Firmware doesn't allow us to create another level 0 table, ++ * so we create all unmanaged tables as level 1. ++ * ++ * To connect them, we use explicit miss rules with ++ * ignore_flow_level. Caller is responsible to create ++ * these rules (if needed). ++ */ ++ ft_attr.level = 1; ++ ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB); ++ } ++ ++ ft_attr.autogroup.num_reserved_entries = 2; ++ ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; + fdb = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); + if (IS_ERR(fdb)) { + esw_warn(esw->dev, +@@ -220,6 +267,7 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) + + fdb_chain->esw = esw; + fdb_chain->chain = chain; ++ INIT_LIST_HEAD(&fdb_chain->prios_list); + + err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node, + chain_params); +@@ -261,6 +309,79 @@ mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain) + return fdb_chain; + } + ++static struct mlx5_flow_handle * ++mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb, ++ struct mlx5_flow_table *next_fdb) ++{ ++ static const struct mlx5_flow_spec spec = {}; ++ struct mlx5_flow_destination dest = {}; ++ struct mlx5_flow_act act = {}; ++ ++ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; ++ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; ++ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; ++ dest.ft = next_fdb; ++ ++ return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1); ++} ++ ++static int ++mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio, ++ struct mlx5_flow_table *next_fdb) ++{ ++ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {}; ++ struct fdb_chain *fdb_chain = fdb_prio->fdb_chain; ++ struct fdb_prio *pos; ++ int n = 0, err; ++ ++ if (fdb_prio->key.level) ++ return 0; ++ ++ /* Iterate in reverse order until reaching the level 0 rule of ++ * the previous priority, adding all the miss rules first, so we can ++ * revert them if any of them fails. ++ */ ++ pos = fdb_prio; ++ list_for_each_entry_continue_reverse(pos, ++ &fdb_chain->prios_list, ++ list) { ++ miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb, ++ next_fdb); ++ if (IS_ERR(miss_rules[n])) { ++ err = PTR_ERR(miss_rules[n]); ++ goto err_prev_rule; ++ } ++ ++ n++; ++ if (!pos->key.level) ++ break; ++ } ++ ++ /* Success, delete old miss rules, and update the pointers. */ ++ n = 0; ++ pos = fdb_prio; ++ list_for_each_entry_continue_reverse(pos, ++ &fdb_chain->prios_list, ++ list) { ++ mlx5_del_flow_rules(pos->miss_rule); ++ ++ pos->miss_rule = miss_rules[n]; ++ pos->next_fdb = next_fdb; ++ ++ n++; ++ if (!pos->key.level) ++ break; ++ } ++ ++ return 0; ++ ++err_prev_rule: ++ while (--n >= 0) ++ mlx5_del_flow_rules(miss_rules[n]); ++ ++ return err; ++} ++ + static void + mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain) + { +@@ -272,9 +393,15 @@ static struct fdb_prio * + mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, + u32 chain, u32 prio, u32 level) + { ++ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); ++ struct mlx5_flow_handle *miss_rule = NULL; ++ struct mlx5_flow_group *miss_group; + struct fdb_prio *fdb_prio = NULL; ++ struct mlx5_flow_table *next_fdb; + struct fdb_chain *fdb_chain; + struct mlx5_flow_table *fdb; ++ struct list_head *pos; ++ u32 *flow_group_in; + int err; + + fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain); +@@ -282,18 +409,65 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, + return ERR_CAST(fdb_chain); + + fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL); +- if (!fdb_prio) { ++ flow_group_in = kvzalloc(inlen, GFP_KERNEL); ++ if (!fdb_prio || !flow_group_in) { + err = -ENOMEM; + goto err_alloc; + } + +- fdb = mlx5_esw_chains_create_fdb_table(esw, fdb_chain->chain, prio, +- level); ++ /* Chain's prio list is sorted by prio and level. ++ * And all levels of some prio point to the next prio's level 0. ++ * Example list (prio, level): ++ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0) ++ * In hardware, we will we have the following pointers: ++ * (3,0) -> (5,0) -> (7,0) -> Slow path ++ * (3,1) -> (5,0) ++ * (5,1) -> (7,0) ++ * (6,1) -> (7,0) ++ */ ++ ++ /* Default miss for each chain: */ ++ next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ? ++ tc_slow_fdb(esw) : ++ tc_end_fdb(esw); ++ list_for_each(pos, &fdb_chain->prios_list) { ++ struct fdb_prio *p = list_entry(pos, struct fdb_prio, list); ++ ++ /* exit on first pos that is larger */ ++ if (prio < p->key.prio || (prio == p->key.prio && ++ level < p->key.level)) { ++ /* Get next level 0 table */ ++ next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb; ++ break; ++ } ++ } ++ ++ fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + goto err_create; + } + ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ++ fdb->max_fte - 2); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ++ fdb->max_fte - 1); ++ miss_group = mlx5_create_flow_group(fdb, flow_group_in); ++ if (IS_ERR(miss_group)) { ++ err = PTR_ERR(miss_group); ++ goto err_group; ++ } ++ ++ /* Add miss rule to next_fdb */ ++ miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb); ++ if (IS_ERR(miss_rule)) { ++ err = PTR_ERR(miss_rule); ++ goto err_miss_rule; ++ } ++ ++ fdb_prio->miss_group = miss_group; ++ fdb_prio->miss_rule = miss_rule; ++ fdb_prio->next_fdb = next_fdb; + fdb_prio->fdb_chain = fdb_chain; + fdb_prio->key.chain = chain; + fdb_prio->key.prio = prio; +@@ -305,13 +479,30 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, + if (err) + goto err_insert; + ++ list_add(&fdb_prio->list, pos->prev); ++ ++ /* Table is ready, connect it */ ++ err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb); ++ if (err) ++ goto err_update; ++ ++ kvfree(flow_group_in); + return fdb_prio; + ++err_update: ++ list_del(&fdb_prio->list); ++ rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node, ++ prio_params); + err_insert: ++ mlx5_del_flow_rules(miss_rule); ++err_miss_rule: ++ mlx5_destroy_flow_group(miss_group); ++err_group: + mlx5_esw_chains_destroy_fdb_table(esw, fdb); + err_create: +- kvfree(fdb_prio); + err_alloc: ++ kvfree(fdb_prio); ++ kvfree(flow_group_in); + mlx5_esw_chains_put_fdb_chain(fdb_chain); + return ERR_PTR(err); + } +@@ -322,8 +513,14 @@ mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw, + { + struct fdb_chain *fdb_chain = fdb_prio->fdb_chain; + ++ WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio, ++ fdb_prio->next_fdb)); ++ ++ list_del(&fdb_prio->list); + rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node, + prio_params); ++ mlx5_del_flow_rules(fdb_prio->miss_rule); ++ mlx5_destroy_flow_group(fdb_prio->miss_group); + mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb); + mlx5_esw_chains_put_fdb_chain(fdb_chain); + kvfree(fdb_prio); +@@ -415,6 +612,12 @@ mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + chain, prio, level); + } + ++struct mlx5_flow_table * ++mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) ++{ ++ return tc_end_fdb(esw); ++} ++ + static int + mlx5_esw_chains_init(struct mlx5_eswitch *esw) + { +@@ -484,11 +687,21 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw) + struct mlx5_flow_table *ft; + int err; + +- /* Always open the root for fast path */ +- ft = mlx5_esw_chains_get_table(esw, 0, 1, 0); ++ /* Create tc_end_fdb(esw) which is the always created ft chain */ ++ ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw), ++ 1, 0); + if (IS_ERR(ft)) + return PTR_ERR(ft); + ++ tc_end_fdb(esw) = ft; ++ ++ /* Always open the root for fast path */ ++ ft = mlx5_esw_chains_get_table(esw, 0, 1, 0); ++ if (IS_ERR(ft)) { ++ err = PTR_ERR(ft); ++ goto level_0_err; ++ } ++ + /* Open level 1 for split rules now if prios isn't supported */ + if (!mlx5_esw_chains_prios_supported(esw)) { + ft = mlx5_esw_chains_get_table(esw, 0, 1, 1); +@@ -503,6 +716,8 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw) + + level_1_err: + mlx5_esw_chains_put_table(esw, 0, 1, 0); ++level_0_err: ++ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0); + return err; + } + +@@ -512,6 +727,7 @@ mlx5_esw_chains_close(struct mlx5_eswitch *esw) + if (!mlx5_esw_chains_prios_supported(esw)) + mlx5_esw_chains_put_table(esw, 0, 1, 1); + mlx5_esw_chains_put_table(esw, 0, 1, 0); ++ mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0); + } + + int +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +index 52fadacab84d..2e13097fe348 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +@@ -20,6 +20,9 @@ void + mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + u32 level); + ++struct mlx5_flow_table * ++mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw); ++ + int mlx5_esw_chains_create(struct mlx5_eswitch *esw); + void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); + +-- +2.13.6 + diff --git a/SOURCES/0216-netdrv-net-mlx5-make-the-symbol-ESW_POOLS-static.patch b/SOURCES/0216-netdrv-net-mlx5-make-the-symbol-ESW_POOLS-static.patch new file mode 100644 index 0000000..74f7ec5 --- /dev/null +++ b/SOURCES/0216-netdrv-net-mlx5-make-the-symbol-ESW_POOLS-static.patch @@ -0,0 +1,66 @@ +From c20834d8c21e3c5a00840d9865dfec8a6acce15a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:37 -0400 +Subject: [PATCH 216/312] [netdrv] net/mlx5: make the symbol 'ESW_POOLS' static + +Message-id: <20200519074934.6303-7-ahleihel@redhat.com> +Patchwork-id: 310506 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 06/63] net/mlx5: make the symbol 'ESW_POOLS' static +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.6-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c + Context diff due to resolution of previous merge conflict at the + declaration of ESW_POOLS array. + +commit e15cf98ee8a76472144a19a24ca73d26fefa5237 +Author: Chen Wandun +Date: Mon Jan 20 20:41:53 2020 +0800 + + net/mlx5: make the symbol 'ESW_POOLS' static + + Fix the following sparse warning: + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c:35:20: warning: symbol 'ESW_POOLS' was not declared. Should it be static? + + Fixes: 39ac237ce009 ("net/mlx5: E-Switch, Refactor chains and priorities") + Signed-off-by: Chen Wandun + Acked-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index d569969afd9d..cdf435cd08fb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -32,10 +32,10 @@ + * pools. + */ + #define ESW_SIZE (16 * 1024 * 1024) +-const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, +- 1 * 1024 * 1024, +- 64 * 1024, +- 128 }; ++static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, ++ 1 * 1024 * 1024, ++ 64 * 1024, ++ 128 }; + + struct mlx5_esw_chains_priv { + struct rhashtable chains_ht; +-- +2.13.6 + diff --git a/SOURCES/0217-netdrv-net-mlx5e-Eswitch-Use-per-vport-tables-for-mi.patch b/SOURCES/0217-netdrv-net-mlx5e-Eswitch-Use-per-vport-tables-for-mi.patch new file mode 100644 index 0000000..e051ac8 --- /dev/null +++ b/SOURCES/0217-netdrv-net-mlx5e-Eswitch-Use-per-vport-tables-for-mi.patch @@ -0,0 +1,435 @@ +From 1dc76baa2155965c13e6d9c5def7a34fe5d91430 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:38 -0400 +Subject: [PATCH 217/312] [netdrv] net/mlx5e: Eswitch, Use per vport tables for + mirroring + +Message-id: <20200519074934.6303-8-ahleihel@redhat.com> +Patchwork-id: 310511 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 07/63] net/mlx5e: Eswitch, Use per vport tables for mirroring +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 96e326878fa5e2727d14e9a23644119374619010 +Author: Eli Cohen +Date: Tue Jan 14 17:30:41 2020 +0200 + + net/mlx5e: Eswitch, Use per vport tables for mirroring + + When using port mirroring, we forward the traffic to another table and + use that table to forward to the mirrored vport. Since the hardware + loses the values of reg c, and in particular reg c0, we fail the match + on the input vport which previously existed in reg c0. To overcome this + situation, we use a set of per vport tables, positioned at the lowest + priority, and forward traffic to those tables. Since these tables are + per vport, we can avoid matching on reg c0. + + Fixes: c01cfd0f1115 ("net/mlx5: E-Switch, Add match on vport metadata for rule in fast path") + Signed-off-by: Eli Cohen + Reviewed-by: Mark Bloch + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 10 + + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 206 ++++++++++++++++++++- + .../mellanox/mlx5/core/eswitch_offloads_chains.c | 11 +- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 11 ++ + include/linux/mlx5/fs.h | 1 + + 5 files changed, 221 insertions(+), 18 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index dd7b9a96045c..255838c9ae5d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -49,6 +49,7 @@ + + /* The index of the last real chain (FT) + 1 as chain zero is valid as well */ + #define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) ++#define ESW_OFFLOADS_NUM_GROUPS 4 + + #define FDB_TC_MAX_PRIO 16 + #define FDB_TC_LEVELS_PER_PRIO 2 +@@ -206,6 +207,12 @@ struct mlx5_eswitch_fdb { + + #ifndef __GENKSYMS__ + struct mlx5_esw_chains_priv *esw_chains_priv; ++ struct { ++ DECLARE_HASHTABLE(table, 8); ++ /* Protects vports.table */ ++ struct mutex lock; ++ } vports; ++ + #else + struct { + struct mlx5_flow_table *fdb; +@@ -661,6 +668,9 @@ void + esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + struct mlx5_vport *vport); + ++int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw); ++void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw); ++ + #else /* CONFIG_MLX5_ESWITCH */ + /* eswitch API stubs */ + static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 7c33ce7ec074..8b7a2b095ec3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -50,6 +50,179 @@ + #define MLX5_ESW_MISS_FLOWS (2) + #define UPLINK_REP_INDEX 0 + ++/* Per vport tables */ ++ ++#define MLX5_ESW_VPORT_TABLE_SIZE 128 ++ ++/* This struct is used as a key to the hash table and we need it to be packed ++ * so hash result is consistent ++ */ ++struct mlx5_vport_key { ++ u32 chain; ++ u16 prio; ++ u16 vport; ++ u16 vhca_id; ++} __packed; ++ ++struct mlx5_vport_table { ++ struct hlist_node hlist; ++ struct mlx5_flow_table *fdb; ++ u32 num_rules; ++ struct mlx5_vport_key key; ++}; ++ ++static struct mlx5_flow_table * ++esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns) ++{ ++ struct mlx5_flow_table_attr ft_attr = {}; ++ struct mlx5_flow_table *fdb; ++ ++ ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; ++ ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE; ++ ft_attr.prio = FDB_PER_VPORT; ++ fdb = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); ++ if (IS_ERR(fdb)) { ++ esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n", ++ PTR_ERR(fdb)); ++ } ++ ++ return fdb; ++} ++ ++static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw, ++ struct mlx5_esw_flow_attr *attr, ++ struct mlx5_vport_key *key) ++{ ++ key->vport = attr->in_rep->vport; ++ key->chain = attr->chain; ++ key->prio = attr->prio; ++ key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); ++ return jhash(key, sizeof(*key), 0); ++} ++ ++/* caller must hold vports.lock */ ++static struct mlx5_vport_table * ++esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key) ++{ ++ struct mlx5_vport_table *e; ++ ++ hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key) ++ if (!memcmp(&e->key, skey, sizeof(*skey))) ++ return e; ++ ++ return NULL; ++} ++ ++static void ++esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr) ++{ ++ struct mlx5_vport_table *e; ++ struct mlx5_vport_key key; ++ u32 hkey; ++ ++ mutex_lock(&esw->fdb_table.offloads.vports.lock); ++ hkey = flow_attr_to_vport_key(esw, attr, &key); ++ e = esw_vport_tbl_lookup(esw, &key, hkey); ++ if (!e || --e->num_rules) ++ goto out; ++ ++ hash_del(&e->hlist); ++ mlx5_destroy_flow_table(e->fdb); ++ kfree(e); ++out: ++ mutex_unlock(&esw->fdb_table.offloads.vports.lock); ++} ++ ++static struct mlx5_flow_table * ++esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr) ++{ ++ struct mlx5_core_dev *dev = esw->dev; ++ struct mlx5_flow_namespace *ns; ++ struct mlx5_flow_table *fdb; ++ struct mlx5_vport_table *e; ++ struct mlx5_vport_key skey; ++ u32 hkey; ++ ++ mutex_lock(&esw->fdb_table.offloads.vports.lock); ++ hkey = flow_attr_to_vport_key(esw, attr, &skey); ++ e = esw_vport_tbl_lookup(esw, &skey, hkey); ++ if (e) { ++ e->num_rules++; ++ goto out; ++ } ++ ++ e = kzalloc(sizeof(*e), GFP_KERNEL); ++ if (!e) { ++ fdb = ERR_PTR(-ENOMEM); ++ goto err_alloc; ++ } ++ ++ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); ++ if (!ns) { ++ esw_warn(dev, "Failed to get FDB namespace\n"); ++ fdb = ERR_PTR(-ENOENT); ++ goto err_ns; ++ } ++ ++ fdb = esw_vport_tbl_create(esw, ns); ++ if (IS_ERR(fdb)) ++ goto err_ns; ++ ++ e->fdb = fdb; ++ e->num_rules = 1; ++ e->key = skey; ++ hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey); ++out: ++ mutex_unlock(&esw->fdb_table.offloads.vports.lock); ++ return e->fdb; ++ ++err_ns: ++ kfree(e); ++err_alloc: ++ mutex_unlock(&esw->fdb_table.offloads.vports.lock); ++ return fdb; ++} ++ ++int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw) ++{ ++ struct mlx5_esw_flow_attr attr = {}; ++ struct mlx5_eswitch_rep rep = {}; ++ struct mlx5_flow_table *fdb; ++ struct mlx5_vport *vport; ++ int i; ++ ++ attr.prio = 1; ++ attr.in_rep = &rep; ++ mlx5_esw_for_all_vports(esw, i, vport) { ++ attr.in_rep->vport = vport->vport; ++ fdb = esw_vport_tbl_get(esw, &attr); ++ if (!fdb) ++ goto out; ++ } ++ return 0; ++ ++out: ++ mlx5_esw_vport_tbl_put(esw); ++ return PTR_ERR(fdb); ++} ++ ++void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw) ++{ ++ struct mlx5_esw_flow_attr attr = {}; ++ struct mlx5_eswitch_rep rep = {}; ++ struct mlx5_vport *vport; ++ int i; ++ ++ attr.prio = 1; ++ attr.in_rep = &rep; ++ mlx5_esw_for_all_vports(esw, i, vport) { ++ attr.in_rep->vport = vport->vport; ++ esw_vport_tbl_put(esw, &attr); ++ } ++} ++ ++/* End: Per vport tables */ ++ + static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, + u16 vport_num) + { +@@ -191,8 +364,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + i++; + } + +- mlx5_eswitch_set_rule_source_port(esw, spec, attr); +- + if (attr->outer_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + if (attr->inner_match_level != MLX5_MATCH_NONE) +@@ -201,8 +372,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + flow_act.modify_hdr = attr->modify_hdr; + +- fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, +- !!split); ++ if (split) { ++ fdb = esw_vport_tbl_get(esw, attr); ++ } else { ++ fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, ++ 0); ++ mlx5_eswitch_set_rule_source_port(esw, spec, attr); ++ } + if (IS_ERR(fdb)) { + rule = ERR_CAST(fdb); + goto err_esw_get; +@@ -221,7 +397,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + return rule; + + err_add_rule: +- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split); ++ if (split) ++ esw_vport_tbl_put(esw, attr); ++ else ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); + err_esw_get: + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) + mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); +@@ -247,7 +426,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + goto err_get_fast; + } + +- fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1); ++ fwd_fdb = esw_vport_tbl_get(esw, attr); + if (IS_ERR(fwd_fdb)) { + rule = ERR_CAST(fwd_fdb); + goto err_get_fwd; +@@ -285,7 +464,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + + return rule; + add_err: +- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); ++ esw_vport_tbl_put(esw, attr); + err_get_fwd: + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); + err_get_fast: +@@ -312,11 +491,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, + atomic64_dec(&esw->offloads.num_flows); + + if (fwd_rule) { +- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); ++ esw_vport_tbl_put(esw, attr); + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); + } else { +- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, +- !!split); ++ if (split) ++ esw_vport_tbl_put(esw, attr); ++ else ++ mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, ++ 0); + if (attr->dest_chain) + mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); + } +@@ -1938,6 +2120,9 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + if (err) + goto create_fg_err; + ++ mutex_init(&esw->fdb_table.offloads.vports.lock); ++ hash_init(esw->fdb_table.offloads.vports.table); ++ + return 0; + + create_fg_err: +@@ -1954,6 +2139,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + + static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) + { ++ mutex_destroy(&esw->fdb_table.offloads.vports.lock); + esw_destroy_vport_rx_group(esw); + esw_destroy_offloads_table(esw); + esw_destroy_offloads_fdb_tables(esw); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index cdf435cd08fb..483186883ac4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -21,8 +21,6 @@ + #define fdb_ignore_flow_level_supported(esw) \ + (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level)) + +-#define ESW_OFFLOADS_NUM_GROUPS 4 +- + /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), + * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated + * for each flow table pool. We can allocate up to 16M of each pool, +@@ -704,12 +702,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw) + + /* Open level 1 for split rules now if prios isn't supported */ + if (!mlx5_esw_chains_prios_supported(esw)) { +- ft = mlx5_esw_chains_get_table(esw, 0, 1, 1); +- +- if (IS_ERR(ft)) { +- err = PTR_ERR(ft); ++ err = mlx5_esw_vport_tbl_get(esw); ++ if (err) + goto level_1_err; +- } + } + + return 0; +@@ -725,7 +720,7 @@ static void + mlx5_esw_chains_close(struct mlx5_eswitch *esw) + { + if (!mlx5_esw_chains_prios_supported(esw)) +- mlx5_esw_chains_put_table(esw, 0, 1, 1); ++ mlx5_esw_vport_tbl_put(esw); + mlx5_esw_chains_put_table(esw, 0, 1, 0); + mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 7cc21f08cbcc..344e5470a81c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2720,6 +2720,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) + goto out_err; + } + ++ /* We put this priority last, knowing that nothing will get here ++ * unless explicitly forwarded to. This is possible because the ++ * slow path tables have catch all rules and nothing gets passed ++ * those tables. ++ */ ++ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1); ++ if (IS_ERR(maj_prio)) { ++ err = PTR_ERR(maj_prio); ++ goto out_err; ++ } ++ + set_prio_attrs(steering->fdb_root_ns); + return 0; + +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index b918d9724fc2..b63d3a8b502e 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -86,6 +86,7 @@ enum { + FDB_TC_OFFLOAD, + FDB_FT_OFFLOAD, + FDB_SLOW_PATH, ++ FDB_PER_VPORT, + }; + + struct mlx5_pkt_reformat; +-- +2.13.6 + diff --git a/SOURCES/0218-netdrv-net-mlx5-E-Switch-Allow-goto-earlier-chain-if.patch b/SOURCES/0218-netdrv-net-mlx5-E-Switch-Allow-goto-earlier-chain-if.patch new file mode 100644 index 0000000..a21e19c --- /dev/null +++ b/SOURCES/0218-netdrv-net-mlx5-E-Switch-Allow-goto-earlier-chain-if.patch @@ -0,0 +1,88 @@ +From 1c159f98cdc32812823723ae87b853a0bfdb9e9e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:39 -0400 +Subject: [PATCH 218/312] [netdrv] net/mlx5: E-Switch, Allow goto earlier chain + if FW supports it + +Message-id: <20200519074934.6303-9-ahleihel@redhat.com> +Patchwork-id: 310510 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 08/63] net/mlx5: E-Switch, Allow goto earlier chain if FW supports it +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 297eaf5b952bcda4678ebc55177074d79263847f +Author: Roi Dayan +Date: Thu Feb 6 16:06:58 2020 +0200 + + net/mlx5: E-Switch, Allow goto earlier chain if FW supports it + + Mellanox FW can support this if ignore_flow_level capability exists. + + Signed-off-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 3 ++- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c | 5 +++++ + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h | 2 ++ + 3 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 24f7af806da2..c5b6081a55d6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3544,7 +3544,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); + return -EOPNOTSUPP; + } +- if (dest_chain <= attr->chain) { ++ if (!mlx5_esw_chains_backwards_supported(esw) && ++ dest_chain <= attr->chain) { + NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported"); + return -EOPNOTSUPP; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index 483186883ac4..726d28ff0a65 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -97,6 +97,11 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw) + return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + } + ++bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw) ++{ ++ return fdb_ignore_flow_level_supported(esw); ++} ++ + u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) + { + if (!mlx5_esw_chains_prios_supported(esw)) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +index 2e13097fe348..4ae2baf2a7a1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +@@ -6,6 +6,8 @@ + + bool + mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw); ++bool ++mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw); + u32 + mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw); + u32 +-- +2.13.6 + diff --git a/SOURCES/0219-netdrv-net-mlx5e-Use-NL_SET_ERR_MSG_MOD-extack-for-e.patch b/SOURCES/0219-netdrv-net-mlx5e-Use-NL_SET_ERR_MSG_MOD-extack-for-e.patch new file mode 100644 index 0000000..8ec0f84 --- /dev/null +++ b/SOURCES/0219-netdrv-net-mlx5e-Use-NL_SET_ERR_MSG_MOD-extack-for-e.patch @@ -0,0 +1,110 @@ +From a7e18b503e4432e57b868bc0a9a359f2c0ab2d58 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:40 -0400 +Subject: [PATCH 219/312] [netdrv] net/mlx5e: Use NL_SET_ERR_MSG_MOD() extack + for errors + +Message-id: <20200519074934.6303-10-ahleihel@redhat.com> +Patchwork-id: 310512 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 09/63] net/mlx5e: Use NL_SET_ERR_MSG_MOD() extack for errors +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 61644c3de8a30245c1d4aae7f164175a0498ca76 +Author: Roi Dayan +Date: Tue Feb 18 15:30:58 2020 +0200 + + net/mlx5e: Use NL_SET_ERR_MSG_MOD() extack for errors + + This to be consistent and adds the module name to the error message. + + Signed-off-by: Roi Dayan + Reviewed-by: Eli Cohen + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 21 ++++++++++++++------- + 1 file changed, 14 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index c5b6081a55d6..456d6e05388c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1173,7 +1173,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + int out_index; + + if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) { +- NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW"); ++ NL_SET_ERR_MSG_MOD(extack, ++ "E-switch priorities unsupported, upgrade FW"); + return -EOPNOTSUPP; + } + +@@ -1184,13 +1185,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + */ + max_chain = mlx5_esw_chains_get_chain_range(esw); + if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) { +- NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); ++ NL_SET_ERR_MSG_MOD(extack, ++ "Requested chain is out of supported range"); + return -EOPNOTSUPP; + } + + max_prio = mlx5_esw_chains_get_prio_range(esw); + if (attr->prio > max_prio) { +- NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); ++ NL_SET_ERR_MSG_MOD(extack, ++ "Requested priority is out of supported range"); + return -EOPNOTSUPP; + } + +@@ -3546,11 +3549,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + } + if (!mlx5_esw_chains_backwards_supported(esw) && + dest_chain <= attr->chain) { +- NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported"); ++ NL_SET_ERR_MSG_MOD(extack, ++ "Goto earlier chain isn't supported"); + return -EOPNOTSUPP; + } + if (dest_chain > max_chain) { +- NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range"); ++ NL_SET_ERR_MSG_MOD(extack, ++ "Requested destination chain is out of supported range"); + return -EOPNOTSUPP; + } + action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; +@@ -3600,7 +3605,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + + if (attr->dest_chain) { + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { +- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); ++ NL_SET_ERR_MSG_MOD(extack, ++ "Mirroring goto chain rules isn't supported"); + return -EOPNOTSUPP; + } + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; +@@ -3608,7 +3614,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + + if (!(attr->action & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { +- NL_SET_ERR_MSG(extack, "Rule must have at least one forward/drop action"); ++ NL_SET_ERR_MSG_MOD(extack, ++ "Rule must have at least one forward/drop action"); + return -EOPNOTSUPP; + } + +-- +2.13.6 + diff --git a/SOURCES/0220-netdrv-net-mlx5e-Reduce-number-of-arguments-in-slow-.patch b/SOURCES/0220-netdrv-net-mlx5e-Reduce-number-of-arguments-in-slow-.patch new file mode 100644 index 0000000..3a8dccd --- /dev/null +++ b/SOURCES/0220-netdrv-net-mlx5e-Reduce-number-of-arguments-in-slow-.patch @@ -0,0 +1,160 @@ +From 5f19fa2b27c6a54497dea02a5a1e09b05b63ffdf Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:41 -0400 +Subject: [PATCH 220/312] [netdrv] net/mlx5e: Reduce number of arguments in + slow path handling + +Message-id: <20200519074934.6303-11-ahleihel@redhat.com> +Patchwork-id: 310516 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 10/63] net/mlx5e: Reduce number of arguments in slow path handling +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 178f69b4776ea5e6c1dc1240d447d9c76e32c839 +Author: Eli Cohen +Date: Thu Feb 13 11:12:16 2020 +0200 + + net/mlx5e: Reduce number of arguments in slow path handling + + mlx5e_tc_offload_to_slow_path() and mlx5e_tc_unoffload_from_slow_path() + take an extra argument allocated on the stack of the caller but not used + by the caller. Avoid the extra argument and use local variable in the + function itself. + + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 43 ++++++++++++------------- + 1 file changed, 20 insertions(+), 23 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 456d6e05388c..c1619157e590 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1076,17 +1076,17 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, + static struct mlx5_flow_handle * + mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, +- struct mlx5_flow_spec *spec, +- struct mlx5_esw_flow_attr *slow_attr) ++ struct mlx5_flow_spec *spec) + { ++ struct mlx5_esw_flow_attr slow_attr; + struct mlx5_flow_handle *rule; + +- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); +- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; +- slow_attr->split_count = 0; +- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; ++ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr)); ++ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; ++ slow_attr.split_count = 0; ++ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; + +- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); ++ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr); + if (!IS_ERR(rule)) + flow_flag_set(flow, SLOW); + +@@ -1095,14 +1095,15 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, + + static void + mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, +- struct mlx5e_tc_flow *flow, +- struct mlx5_esw_flow_attr *slow_attr) ++ struct mlx5e_tc_flow *flow) + { +- memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); +- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; +- slow_attr->split_count = 0; +- slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; +- mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); ++ struct mlx5_esw_flow_attr slow_attr; ++ ++ memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr)); ++ slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; ++ slow_attr.split_count = 0; ++ slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH; ++ mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr); + flow_flag_clear(flow, SLOW); + } + +@@ -1242,9 +1243,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + */ + if (!encap_valid) { + /* continue with goto slow path rule instead */ +- struct mlx5_esw_flow_attr slow_attr; +- +- flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr); ++ flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); + } else { + flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); + } +@@ -1275,7 +1274,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *attr = flow->esw_attr; +- struct mlx5_esw_flow_attr slow_attr; + int out_index; + + if (flow_flag_test(flow, NOT_READY)) { +@@ -1286,7 +1284,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, + + if (mlx5e_is_offloaded_flow(flow)) { + if (flow_flag_test(flow, SLOW)) +- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); ++ mlx5e_tc_unoffload_from_slow_path(esw, flow); + else + mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); + } +@@ -1315,7 +1313,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, + struct list_head *flow_list) + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; +- struct mlx5_esw_flow_attr slow_attr, *esw_attr; ++ struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + struct mlx5e_tc_flow *flow; +@@ -1368,7 +1366,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, + continue; + } + +- mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); ++ mlx5e_tc_unoffload_from_slow_path(esw, flow); + flow->rule[0] = rule; + /* was unset when slow path rule removed */ + flow_flag_set(flow, OFFLOADED); +@@ -1380,7 +1378,6 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, + struct list_head *flow_list) + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; +- struct mlx5_esw_flow_attr slow_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + struct mlx5e_tc_flow *flow; +@@ -1392,7 +1389,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, + spec = &flow->esw_attr->parse_attr->spec; + + /* update from encap rule to slow path rule */ +- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr); ++ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); + /* mark the flow's encap dest as non-valid */ + flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; + +-- +2.13.6 + diff --git a/SOURCES/0221-netdrv-net-mlx5e-Remove-redundant-comment-about-goto.patch b/SOURCES/0221-netdrv-net-mlx5e-Remove-redundant-comment-about-goto.patch new file mode 100644 index 0000000..a75cb7d --- /dev/null +++ b/SOURCES/0221-netdrv-net-mlx5e-Remove-redundant-comment-about-goto.patch @@ -0,0 +1,61 @@ +From c34e908bd20b47356100940fca3b4f76eb4156de Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:42 -0400 +Subject: [PATCH 221/312] [netdrv] net/mlx5e: Remove redundant comment about + goto slow path + +Message-id: <20200519074934.6303-12-ahleihel@redhat.com> +Patchwork-id: 310514 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 11/63] net/mlx5e: Remove redundant comment about goto slow path +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit bc1d75fa79860ec9d065cd3de041f86811d48563 +Author: Roi Dayan +Date: Thu Feb 13 14:19:50 2020 +0200 + + net/mlx5e: Remove redundant comment about goto slow path + + The code is self explanatory and makes the comment redundant. + + Signed-off-by: Roi Dayan + Reviewed-by: Eli Cohen + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index c1619157e590..12773c35d261 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1241,12 +1241,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + * (1) there's no error + * (2) there's an encap action and we don't have valid neigh + */ +- if (!encap_valid) { +- /* continue with goto slow path rule instead */ ++ if (!encap_valid) + flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec); +- } else { ++ else + flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); +- } + + if (IS_ERR(flow->rule[0])) + return PTR_ERR(flow->rule[0]); +-- +2.13.6 + diff --git a/SOURCES/0222-netdrv-net-mlx5-Verify-goto-chain-offload-support.patch b/SOURCES/0222-netdrv-net-mlx5-Verify-goto-chain-offload-support.patch new file mode 100644 index 0000000..cbef26d --- /dev/null +++ b/SOURCES/0222-netdrv-net-mlx5-Verify-goto-chain-offload-support.patch @@ -0,0 +1,135 @@ +From 0fb08035db113fc3f97b196e4191fbc32abf3313 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:44 -0400 +Subject: [PATCH 222/312] [netdrv] net/mlx5: Verify goto chain offload support + +Message-id: <20200519074934.6303-14-ahleihel@redhat.com> +Patchwork-id: 310515 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 13/63] net/mlx5: Verify goto chain offload support +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 2fbbc30da05d9bd32d7fefeef445db3edd28d0bd +Author: Eli Cohen +Date: Tue Feb 18 11:59:53 2020 +0200 + + net/mlx5: Verify goto chain offload support + + According to PRM, forward to flow table along with either packet + reformat or decap is supported only if reformat_and_fwd_to_table + capability is set for the flow table. + + Add dependency on the capability and pack all the conditions for "goto + chain" in a single function. + + Fix language in error message in case of not supporting forward to a + lower numbered flow table. + + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 65 +++++++++++++++++-------- + 1 file changed, 45 insertions(+), 20 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 12773c35d261..9d6ac9a1461b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3314,6 +3314,45 @@ static bool is_duplicated_output_device(struct net_device *dev, + return false; + } + ++static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw, ++ struct mlx5e_tc_flow *flow, ++ const struct flow_action_entry *act, ++ u32 actions, ++ struct netlink_ext_ack *extack) ++{ ++ u32 max_chain = mlx5_esw_chains_get_chain_range(esw); ++ struct mlx5_esw_flow_attr *attr = flow->esw_attr; ++ bool ft_flow = mlx5e_is_ft_flow(flow); ++ u32 dest_chain = act->chain_index; ++ ++ if (ft_flow) { ++ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (!mlx5_esw_chains_backwards_supported(esw) && ++ dest_chain <= attr->chain) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Goto lower numbered chain isn't supported"); ++ return -EOPNOTSUPP; ++ } ++ if (dest_chain > max_chain) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Requested destination chain is out of supported range"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | ++ MLX5_FLOW_CONTEXT_ACTION_DECAP) && ++ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Goto chain is not allowed if action has reformat or decap"); ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ + static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct mlx5e_tc_flow *flow, +@@ -3534,29 +3573,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + case FLOW_ACTION_TUNNEL_DECAP: + action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; + break; +- case FLOW_ACTION_GOTO: { +- u32 dest_chain = act->chain_index; +- u32 max_chain = mlx5_esw_chains_get_chain_range(esw); ++ case FLOW_ACTION_GOTO: ++ err = mlx5_validate_goto_chain(esw, flow, act, action, ++ extack); ++ if (err) ++ return err; + +- if (ft_flow) { +- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported"); +- return -EOPNOTSUPP; +- } +- if (!mlx5_esw_chains_backwards_supported(esw) && +- dest_chain <= attr->chain) { +- NL_SET_ERR_MSG_MOD(extack, +- "Goto earlier chain isn't supported"); +- return -EOPNOTSUPP; +- } +- if (dest_chain > max_chain) { +- NL_SET_ERR_MSG_MOD(extack, +- "Requested destination chain is out of supported range"); +- return -EOPNOTSUPP; +- } + action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; +- attr->dest_chain = dest_chain; ++ attr->dest_chain = act->chain_index; + break; +- } + default: + NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); + return -EOPNOTSUPP; +-- +2.13.6 + diff --git a/SOURCES/0223-netdrv-net-mlx5e-Fix-an-IS_ERR-vs-NULL-check.patch b/SOURCES/0223-netdrv-net-mlx5e-Fix-an-IS_ERR-vs-NULL-check.patch new file mode 100644 index 0000000..f15a1db --- /dev/null +++ b/SOURCES/0223-netdrv-net-mlx5e-Fix-an-IS_ERR-vs-NULL-check.patch @@ -0,0 +1,54 @@ +From 05970587bb399a2746dd10cb62e460e7ef1ff528 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:45 -0400 +Subject: [PATCH 223/312] [netdrv] net/mlx5e: Fix an IS_ERR() vs NULL check + +Message-id: <20200519074934.6303-15-ahleihel@redhat.com> +Patchwork-id: 310520 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 14/63] net/mlx5e: Fix an IS_ERR() vs NULL check +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit d9fb932fde217b15eab2111605b05a05b47ea593 +Author: Dan Carpenter +Date: Wed Mar 4 17:22:24 2020 +0300 + + net/mlx5e: Fix an IS_ERR() vs NULL check + + The esw_vport_tbl_get() function returns error pointers on error. + + Fixes: 96e326878fa5 ("net/mlx5e: Eswitch, Use per vport tables for mirroring") + Signed-off-by: Dan Carpenter + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 8b7a2b095ec3..25665ff7e9c5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -196,7 +196,7 @@ int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw) + mlx5_esw_for_all_vports(esw, i, vport) { + attr.in_rep->vport = vport->vport; + fdb = esw_vport_tbl_get(esw, &attr); +- if (!fdb) ++ if (IS_ERR(fdb)) + goto out; + } + return 0; +-- +2.13.6 + diff --git a/SOURCES/0224-netdrv-net-mlx5-Change-the-name-of-steering-mode-par.patch b/SOURCES/0224-netdrv-net-mlx5-Change-the-name-of-steering-mode-par.patch new file mode 100644 index 0000000..8572591 --- /dev/null +++ b/SOURCES/0224-netdrv-net-mlx5-Change-the-name-of-steering-mode-par.patch @@ -0,0 +1,71 @@ +From 7ee3c323d2f6bdc73bab1ac8550386df8e1e0088 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:46 -0400 +Subject: [PATCH 224/312] [netdrv] net/mlx5: Change the name of steering mode + param id + +Message-id: <20200519074934.6303-16-ahleihel@redhat.com> +Patchwork-id: 310518 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 15/63] net/mlx5: Change the name of steering mode param id +Bugzilla: 1790203 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790203 +Upstream: v5.7-rc1 + +commit 8aa9f3be7369184d4bc23f804668c370515d7d0f +Author: Jianbo Liu +Date: Tue Jan 7 08:48:05 2020 +0000 + + net/mlx5: Change the name of steering mode param id + + The prefix should be "MLX5_DEVLINK_PARAM_ID_" for all in + mlx5_devlink_param_id enum. + + Signed-off-by: Jianbo Liu + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +index d63ce3feb65c..a9e9027422d4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +@@ -177,11 +177,11 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id, + + enum mlx5_devlink_param_id { + MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, +- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, ++ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, + }; + + static const struct devlink_param mlx5_devlink_params[] = { +- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, ++ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, + "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set, +@@ -198,7 +198,7 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) + else + strcpy(value.vstr, "smfs"); + devlink_param_driverinit_value_set(devlink, +- MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, ++ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, + value); + } + +-- +2.13.6 + diff --git a/SOURCES/0225-netdrv-net-mlx5e-Add-devlink-fdb_large_groups-parame.patch b/SOURCES/0225-netdrv-net-mlx5e-Add-devlink-fdb_large_groups-parame.patch new file mode 100644 index 0000000..5fdaaaa --- /dev/null +++ b/SOURCES/0225-netdrv-net-mlx5e-Add-devlink-fdb_large_groups-parame.patch @@ -0,0 +1,271 @@ +From 4db35731707a40fdf2915135cee66e4aaf1f2e5a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:47 -0400 +Subject: [PATCH 225/312] [netdrv] net/mlx5e: Add devlink fdb_large_groups + parameter + +Message-id: <20200519074934.6303-17-ahleihel@redhat.com> +Patchwork-id: 310513 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 16/63] net/mlx5e: Add devlink fdb_large_groups parameter +Bugzilla: 1790203 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790203 +Upstream: v5.7-rc1 +Conflicts: + - Documentation/networking/devlink/mlx5.rst + Drop changes to missing doc file. + +commit 87dac697a05a730d878f703a3c3dd78ac6c5bff4 +Author: Jianbo Liu +Date: Fri Dec 27 06:37:07 2019 +0000 + + net/mlx5e: Add devlink fdb_large_groups parameter + + Add a devlink parameter to control the number of large groups in a + autogrouped flow table. The default value is 15, and the range is between 1 + and 1024. + + The size of each large group can be calculated according to the following + formula: size = 4M / (fdb_large_groups + 1). + + Examples: + - Set the number of large groups to 20. + $ devlink dev param set pci/0000:82:00.0 name fdb_large_groups \ + cmode driverinit value 20 + + Then run devlink reload command to apply the new value. + $ devlink dev reload pci/0000:82:00.0 + + - Read the number of large groups in flow table. + $ devlink dev param show pci/0000:82:00.0 name fdb_large_groups + pci/0000:82:00.0: + name fdb_large_groups type driver-specific + values: + cmode driverinit value 20 + + Signed-off-by: Jianbo Liu + Reviewed-by: Vlad Buslov + Reviewed-by: Roi Dayan + Acked-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 34 +++++++++++++++++++--- + drivers/net/ethernet/mellanox/mlx5/core/devlink.h | 6 ++++ + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 22 ++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 8 ++++- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 4 ++- + .../mellanox/mlx5/core/eswitch_offloads_chains.c | 4 +-- + 6 files changed, 70 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +index a9e9027422d4..757e7b91a394 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +@@ -175,10 +175,22 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id, + return 0; + } + +-enum mlx5_devlink_param_id { +- MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, +- MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, +-}; ++#ifdef CONFIG_MLX5_ESWITCH ++static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id, ++ union devlink_param_value val, ++ struct netlink_ext_ack *extack) ++{ ++ int group_num = val.vu32; ++ ++ if (group_num < 1 || group_num > 1024) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Unsupported group number, supported range is 1-1024"); ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++#endif + + static const struct devlink_param mlx5_devlink_params[] = { + DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, +@@ -186,6 +198,13 @@ static const struct devlink_param mlx5_devlink_params[] = { + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set, + mlx5_devlink_fs_mode_validate), ++#ifdef CONFIG_MLX5_ESWITCH ++ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, ++ "fdb_large_groups", DEVLINK_PARAM_TYPE_U32, ++ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), ++ NULL, NULL, ++ mlx5_devlink_large_group_num_validate), ++#endif + }; + + static void mlx5_devlink_set_params_init_values(struct devlink *devlink) +@@ -200,6 +219,13 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) + devlink_param_driverinit_value_set(devlink, + MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, + value); ++ ++#ifdef CONFIG_MLX5_ESWITCH ++ value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; ++ devlink_param_driverinit_value_set(devlink, ++ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, ++ value); ++#endif + } + + int mlx5_devlink_register(struct devlink *devlink, struct device *dev) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +index d0ba03774ddf..f0de327a59be 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +@@ -6,6 +6,12 @@ + + #include + ++enum mlx5_devlink_param_id { ++ MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, ++ MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, ++ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, ++}; ++ + struct devlink *mlx5_devlink_alloc(void); + void mlx5_devlink_free(struct devlink *devlink); + int mlx5_devlink_register(struct devlink *devlink, struct device *dev); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 1541cdf877d2..6ae084b0e612 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -39,6 +39,7 @@ + #include "lib/eq.h" + #include "eswitch.h" + #include "fs_core.h" ++#include "devlink.h" + #include "ecpf.h" + + enum { +@@ -2006,6 +2007,25 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) + esw_disable_vport(esw, vport); + } + ++static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) ++{ ++ struct devlink *devlink = priv_to_devlink(esw->dev); ++ union devlink_param_value val; ++ int err; ++ ++ err = devlink_param_driverinit_value_get(devlink, ++ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, ++ &val); ++ if (!err) { ++ esw->params.large_group_num = val.vu32; ++ } else { ++ esw_warn(esw->dev, ++ "Devlink can't get param fdb_large_groups, uses default (%d).\n", ++ ESW_OFFLOADS_DEFAULT_NUM_GROUPS); ++ esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS; ++ } ++} ++ + int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) + { + int err; +@@ -2022,6 +2042,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) + if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) + esw_warn(esw->dev, "engress ACL is not supported by FW\n"); + ++ mlx5_eswitch_get_devlink_param(esw); ++ + esw_create_tsar(esw); + + esw->mode = mode; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 255838c9ae5d..bd229bda630e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -49,13 +49,14 @@ + + /* The index of the last real chain (FT) + 1 as chain zero is valid as well */ + #define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) +-#define ESW_OFFLOADS_NUM_GROUPS 4 + + #define FDB_TC_MAX_PRIO 16 + #define FDB_TC_LEVELS_PER_PRIO 2 + + #ifdef CONFIG_MLX5_ESWITCH + ++#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 ++ + #define MLX5_MAX_UC_PER_VPORT(dev) \ + (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) + +@@ -295,6 +296,11 @@ struct mlx5_eswitch { + u16 manager_vport; + u16 first_host_vport; + struct mlx5_esw_functions esw_funcs; ++#ifndef __GENKSYMS__ ++ struct { ++ u32 large_group_num; ++ } params; ++#endif + }; + + void esw_offloads_disable(struct mlx5_eswitch *esw); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 25665ff7e9c5..4cb90c865ff7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -71,13 +71,15 @@ struct mlx5_vport_table { + struct mlx5_vport_key key; + }; + ++#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 ++ + static struct mlx5_flow_table * + esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns) + { + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *fdb; + +- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; ++ ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS; + ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE; + ft_attr.prio = FDB_PER_VPORT; + fdb = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index 726d28ff0a65..6ffc4f041b6c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -237,7 +237,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw, + } + + ft_attr.autogroup.num_reserved_entries = 2; +- ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; ++ ft_attr.autogroup.max_num_groups = esw->params.large_group_num; + fdb = mlx5_create_auto_grouped_flow_table_attr_(ns, &ft_attr); + if (IS_ERR(fdb)) { + esw_warn(esw->dev, +@@ -640,7 +640,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) + + esw_debug(dev, + "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n", +- max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max); ++ max_flow_counter, esw->params.large_group_num, fdb_max); + + mlx5_esw_chains_init_sz_pool(esw); + +-- +2.13.6 + diff --git a/SOURCES/0226-netdrv-net-mlx5-Introduce-mapping-infra-for-mapping-.patch b/SOURCES/0226-netdrv-net-mlx5-Introduce-mapping-infra-for-mapping-.patch new file mode 100644 index 0000000..f7f1e3d --- /dev/null +++ b/SOURCES/0226-netdrv-net-mlx5-Introduce-mapping-infra-for-mapping-.patch @@ -0,0 +1,328 @@ +From f0c1b1467bd007fea4a5df57ef2527b22780e97e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:48 -0400 +Subject: [PATCH 226/312] [netdrv] net/mlx5: Introduce mapping infra for + mapping unique ids to data + +Message-id: <20200519074934.6303-18-ahleihel@redhat.com> +Patchwork-id: 310519 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 17/63] net/mlx5: Introduce mapping infra for mapping unique ids to data +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit 7f30db1ed80db6053a818a2722f92d6c5d1073ee +Author: Paul Blakey +Date: Sun Feb 16 12:01:25 2020 +0200 + + net/mlx5: Introduce mapping infra for mapping unique ids to data + + Add a new interface for mapping data to a given id range (max_id), + and back again. It uses xarray as the id allocator and for finding a + given id. For locking it uses xa_lock (spin_lock) for add()/del(), + and rcu_read_lock for find(). + + This mapping interface also supports delaying the mapping removal via + a workqueue. This is for cases where we need the mapping to have + some grace period in regards to finding it back again, for example + for packets arriving from hardware that were marked with by a rule + with an old mapping that no longer exists. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- + .../net/ethernet/mellanox/mlx5/core/en/mapping.c | 218 +++++++++++++++++++++ + .../net/ethernet/mellanox/mlx5/core/en/mapping.h | 27 +++ + 3 files changed, 246 insertions(+), 1 deletion(-) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index eb3ce7912730..be4e7470830f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -34,7 +34,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o + mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o + mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o + mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \ +- lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ ++ lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ + en/tc_tun_geneve.o diag/en_tc_tracepoint.o + + # +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c +new file mode 100644 +index 000000000000..ea321e528749 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c +@@ -0,0 +1,218 @@ ++// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB ++/* Copyright (c) 2018 Mellanox Technologies */ ++ ++#include ++#include ++#include ++#include ++ ++#include "mapping.h" ++ ++#define MAPPING_GRACE_PERIOD 2000 ++ ++struct mapping_ctx { ++ struct xarray xarray; ++ DECLARE_HASHTABLE(ht, 8); ++ struct mutex lock; /* Guards hashtable and xarray */ ++ unsigned long max_id; ++ size_t data_size; ++ bool delayed_removal; ++ struct delayed_work dwork; ++ struct list_head pending_list; ++ spinlock_t pending_list_lock; /* Guards pending list */ ++}; ++ ++struct mapping_item { ++ struct rcu_head rcu; ++ struct list_head list; ++ unsigned long timeout; ++ struct hlist_node node; ++ int cnt; ++ u32 id; ++ char data[]; ++}; ++ ++int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id) ++{ ++ struct mapping_item *mi; ++ int err = -ENOMEM; ++ u32 hash_key; ++ ++ mutex_lock(&ctx->lock); ++ ++ hash_key = jhash(data, ctx->data_size, 0); ++ hash_for_each_possible(ctx->ht, mi, node, hash_key) { ++ if (!memcmp(data, mi->data, ctx->data_size)) ++ goto attach; ++ } ++ ++ mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL); ++ if (!mi) ++ goto err_alloc; ++ ++ memcpy(mi->data, data, ctx->data_size); ++ hash_add(ctx->ht, &mi->node, hash_key); ++ ++ err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id), ++ GFP_KERNEL); ++ if (err) ++ goto err_assign; ++attach: ++ ++mi->cnt; ++ *id = mi->id; ++ ++ mutex_unlock(&ctx->lock); ++ ++ return 0; ++ ++err_assign: ++ hash_del(&mi->node); ++ kfree(mi); ++err_alloc: ++ mutex_unlock(&ctx->lock); ++ ++ return err; ++} ++ ++static void mapping_remove_and_free(struct mapping_ctx *ctx, ++ struct mapping_item *mi) ++{ ++ xa_erase(&ctx->xarray, mi->id); ++ kfree_rcu(mi, rcu); ++} ++ ++static void mapping_free_item(struct mapping_ctx *ctx, ++ struct mapping_item *mi) ++{ ++ if (!ctx->delayed_removal) { ++ mapping_remove_and_free(ctx, mi); ++ return; ++ } ++ ++ mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD); ++ ++ spin_lock(&ctx->pending_list_lock); ++ list_add_tail(&mi->list, &ctx->pending_list); ++ spin_unlock(&ctx->pending_list_lock); ++ ++ schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD); ++} ++ ++int mapping_remove(struct mapping_ctx *ctx, u32 id) ++{ ++ unsigned long index = id; ++ struct mapping_item *mi; ++ int err = -ENOENT; ++ ++ mutex_lock(&ctx->lock); ++ mi = xa_load(&ctx->xarray, index); ++ if (!mi) ++ goto out; ++ err = 0; ++ ++ if (--mi->cnt > 0) ++ goto out; ++ ++ hash_del(&mi->node); ++ mapping_free_item(ctx, mi); ++out: ++ mutex_unlock(&ctx->lock); ++ ++ return err; ++} ++ ++int mapping_find(struct mapping_ctx *ctx, u32 id, void *data) ++{ ++ unsigned long index = id; ++ struct mapping_item *mi; ++ int err = -ENOENT; ++ ++ rcu_read_lock(); ++ mi = xa_load(&ctx->xarray, index); ++ if (!mi) ++ goto err_find; ++ ++ memcpy(data, mi->data, ctx->data_size); ++ err = 0; ++ ++err_find: ++ rcu_read_unlock(); ++ return err; ++} ++ ++static void ++mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list) ++{ ++ struct mapping_item *mi; ++ ++ list_for_each_entry(mi, list, list) ++ mapping_remove_and_free(ctx, mi); ++} ++ ++static void mapping_work_handler(struct work_struct *work) ++{ ++ unsigned long min_timeout = 0, now = jiffies; ++ struct mapping_item *mi, *next; ++ LIST_HEAD(pending_items); ++ struct mapping_ctx *ctx; ++ ++ ctx = container_of(work, struct mapping_ctx, dwork.work); ++ ++ spin_lock(&ctx->pending_list_lock); ++ list_for_each_entry_safe(mi, next, &ctx->pending_list, list) { ++ if (time_after(now, mi->timeout)) ++ list_move(&mi->list, &pending_items); ++ else if (!min_timeout || ++ time_before(mi->timeout, min_timeout)) ++ min_timeout = mi->timeout; ++ } ++ spin_unlock(&ctx->pending_list_lock); ++ ++ mapping_remove_and_free_list(ctx, &pending_items); ++ ++ if (min_timeout) ++ schedule_delayed_work(&ctx->dwork, abs(min_timeout - now)); ++} ++ ++static void mapping_flush_work(struct mapping_ctx *ctx) ++{ ++ if (!ctx->delayed_removal) ++ return; ++ ++ cancel_delayed_work_sync(&ctx->dwork); ++ mapping_remove_and_free_list(ctx, &ctx->pending_list); ++} ++ ++struct mapping_ctx * ++mapping_create(size_t data_size, u32 max_id, bool delayed_removal) ++{ ++ struct mapping_ctx *ctx; ++ ++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ++ if (!ctx) ++ return ERR_PTR(-ENOMEM); ++ ++ ctx->max_id = max_id ? max_id : UINT_MAX; ++ ctx->data_size = data_size; ++ ++ if (delayed_removal) { ++ INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler); ++ INIT_LIST_HEAD(&ctx->pending_list); ++ spin_lock_init(&ctx->pending_list_lock); ++ ctx->delayed_removal = true; ++ } ++ ++ mutex_init(&ctx->lock); ++ xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1); ++ ++ return ctx; ++} ++ ++void mapping_destroy(struct mapping_ctx *ctx) ++{ ++ mapping_flush_work(ctx); ++ xa_destroy(&ctx->xarray); ++ mutex_destroy(&ctx->lock); ++ ++ kfree(ctx); ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h +new file mode 100644 +index 000000000000..285525cc5470 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h +@@ -0,0 +1,27 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ ++/* Copyright (c) 2019 Mellanox Technologies */ ++ ++#ifndef __MLX5_MAPPING_H__ ++#define __MLX5_MAPPING_H__ ++ ++struct mapping_ctx; ++ ++int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id); ++int mapping_remove(struct mapping_ctx *ctx, u32 id); ++int mapping_find(struct mapping_ctx *ctx, u32 id, void *data); ++ ++/* mapping uses an xarray to map data to ids in add(), and for find(). ++ * For locking, it uses a internal xarray spin lock for add()/remove(), ++ * find() uses rcu_read_lock(). ++ * Choosing delayed_removal postpones the removal of a previously mapped ++ * id by MAPPING_GRACE_PERIOD milliseconds. ++ * This is to avoid races against hardware, where we mark the packet in ++ * hardware with a previous id, and quick remove() and add() reusing the same ++ * previous id. Then find() will get the new mapping instead of the old ++ * which was used to mark the packet. ++ */ ++struct mapping_ctx *mapping_create(size_t data_size, u32 max_id, ++ bool delayed_removal); ++void mapping_destroy(struct mapping_ctx *ctx); ++ ++#endif /* __MLX5_MAPPING_H__ */ +-- +2.13.6 + diff --git a/SOURCES/0227-infiniband-net-mlx5-E-Switch-Move-source-port-on-reg.patch b/SOURCES/0227-infiniband-net-mlx5-E-Switch-Move-source-port-on-reg.patch new file mode 100644 index 0000000..5c82e4f --- /dev/null +++ b/SOURCES/0227-infiniband-net-mlx5-E-Switch-Move-source-port-on-reg.patch @@ -0,0 +1,229 @@ +From 1d36a371eef28ade02138f1079eeecbcd8eb0741 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:49 -0400 +Subject: [PATCH 227/312] [infiniband] net/mlx5: E-Switch, Move source port on + reg_c0 to the upper 16 bits + +Message-id: <20200519074934.6303-19-ahleihel@redhat.com> +Patchwork-id: 310517 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 18/63] net/mlx5: E-Switch, Move source port on reg_c0 to the upper 16 bits +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit 0f0d3827c0b4d6c3d219a73ea103077dc5bc17aa +Author: Paul Blakey +Date: Sun Feb 16 12:01:26 2020 +0200 + + net/mlx5: E-Switch, Move source port on reg_c0 to the upper 16 bits + + Multi chain support requires the miss path to continue the processing + from the last chain id, and for that we need to save the chain + miss tag (a mapping for 32bit chain id) on reg_c0 which will + come in a next patch. + + Currently reg_c0 is exclusively used to store the source port + metadata, giving it 32bit, it is created from 16bits of vcha_id, + and 16bits of vport number. + + We will move this source port metadata to upper 16bits, and leave the + lower bits for the chain miss tag. We compress the reg_c0 source port + metadata to 16bits by taking 8 bits from vhca_id, and 8bits from + the vport number. + + Since we compress the vport number to 8bits statically, and leave two + top ids for special PF/ECPF numbers, we will only support a max of 254 + vports with this strategy. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/infiniband/hw/mlx5/main.c | 3 +- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 64 ++++++++++++++++++---- + include/linux/mlx5/eswitch.h | 32 ++++++++++- + 3 files changed, 87 insertions(+), 12 deletions(-) + +Index: src/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 2020-10-06 17:42:12.209236893 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 2020-10-06 17:42:12.226236747 +0200 +@@ -260,7 +260,8 @@ + attr->in_rep->vport)); + + misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); +- MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0); ++ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, ++ mlx5_eswitch_get_vport_metadata_mask()); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); +@@ -805,7 +806,8 @@ + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_2); +- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); ++ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, ++ mlx5_eswitch_get_vport_metadata_mask()); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + } else { +@@ -1035,8 +1037,9 @@ + match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS_2); + +- MLX5_SET_TO_ONES(fte_match_param, match_criteria, +- misc_parameters_2.metadata_reg_c_0); ++ MLX5_SET(fte_match_param, match_criteria, ++ misc_parameters_2.metadata_reg_c_0, ++ mlx5_eswitch_get_vport_metadata_mask()); + } else { + MLX5_SET(create_flow_group_in, flow_group_in, + match_criteria_enable, +@@ -1321,7 +1324,8 @@ + mlx5_eswitch_get_vport_metadata_for_match(esw, vport)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2); +- MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); ++ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, ++ mlx5_eswitch_get_vport_metadata_mask()); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + } else { +@@ -1791,11 +1795,19 @@ + static const struct mlx5_flow_spec spec = {}; + struct mlx5_flow_act flow_act = {}; + int err = 0; ++ u32 key; ++ ++ key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport); ++ key >>= ESW_SOURCE_PORT_METADATA_OFFSET; + + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); +- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); +- MLX5_SET(set_action_in, action, data, +- mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport)); ++ MLX5_SET(set_action_in, action, field, ++ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); ++ MLX5_SET(set_action_in, action, data, key); ++ MLX5_SET(set_action_in, action, offset, ++ ESW_SOURCE_PORT_METADATA_OFFSET); ++ MLX5_SET(set_action_in, action, length, ++ ESW_SOURCE_PORT_METADATA_BITS); + + vport->ingress.offloads.modify_metadata = + mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, +@@ -2673,9 +2685,41 @@ + } + EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled); + +-u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, ++u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, + u16 vport_num) + { +- return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num; ++ u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0); ++ u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0); ++ u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); ++ u32 val; ++ ++ /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */ ++ WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS)); ++ ++ /* Trim vhca_id to ESW_VHCA_ID_BITS */ ++ vhca_id &= vhca_id_mask; ++ ++ /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they ++ * don't overlap with VF numbers, and themselves, after trimming. ++ */ ++ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) < ++ vport_num_mask - 1); ++ WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) < ++ vport_num_mask - 1); ++ WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) == ++ (MLX5_VPORT_ECPF & vport_num_mask)); ++ ++ /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't ++ * overlap with pf and ecpf. ++ */ ++ if (vport_num != MLX5_VPORT_UPLINK && ++ vport_num != MLX5_VPORT_ECPF) ++ WARN_ON_ONCE(vport_num >= vport_num_mask - 1); ++ ++ /* We can now trim vport_num to ESW_VPORT_BITS */ ++ vport_num &= vport_num_mask; ++ ++ val = (vhca_id << ESW_VPORT_BITS) | vport_num; ++ return val << (32 - ESW_SOURCE_PORT_METADATA_BITS); + } + EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); +Index: src/include/linux/mlx5/eswitch.h +=================================================================== +--- src.orig/include/linux/mlx5/eswitch.h 2020-10-06 17:41:30.578594215 +0200 ++++ src/include/linux/mlx5/eswitch.h 2020-10-06 17:42:12.226236747 +0200 +@@ -71,8 +71,32 @@ + mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); + + bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); ++ ++/* Reg C0 usage: ++ * Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) > ++ * ++ * Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num, ++ * the rest (lowest 16 bits) is left for tc chain tag restoration. ++ * VHCA_ID + VPORT comprise the SOURCE_PORT matching. ++ */ ++#define ESW_VHCA_ID_BITS 8 ++#define ESW_VPORT_BITS 8 ++#define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS) ++#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS) ++#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS) ++ ++static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) ++{ ++ return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS); ++} ++ ++# ifndef __GENKSYMS__ ++u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, ++ u16 vport_num); ++# else + u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, + u16 vport_num); ++# endif + u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ + +@@ -94,11 +118,17 @@ + }; + + static inline u32 +-mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, ++mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, + int vport_num) + { + return 0; + }; ++ ++static inline u32 ++mlx5_eswitch_get_vport_metadata_mask(void) ++{ ++ return 0; ++} + #endif /* CONFIG_MLX5_ESWITCH */ + + #endif diff --git a/SOURCES/0228-netdrv-net-mlx5-E-Switch-Get-reg_c0-value-on-CQE.patch b/SOURCES/0228-netdrv-net-mlx5-E-Switch-Get-reg_c0-value-on-CQE.patch new file mode 100644 index 0000000..da2f51d --- /dev/null +++ b/SOURCES/0228-netdrv-net-mlx5-E-Switch-Get-reg_c0-value-on-CQE.patch @@ -0,0 +1,330 @@ +From 5090598d1fb3b4c5e13188a61519c1a76d07d0be Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:50 -0400 +Subject: [PATCH 228/312] [netdrv] net/mlx5: E-Switch, Get reg_c0 value on CQE + +Message-id: <20200519074934.6303-20-ahleihel@redhat.com> +Patchwork-id: 310522 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 19/63] net/mlx5: E-Switch, Get reg_c0 value on CQE +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/fs_core.c + Apply same conflict resolution as done in upstream merge commit: + bf3347c4d15e ("Merge branch 'ct-offload' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux") + ---> Keep OFFLOADS_MAX_FT defined to 2. + +commit 11b717d6152699623fb1133759f9b8f235935a51 +Author: Paul Blakey +Date: Sun Feb 16 12:01:27 2020 +0200 + + net/mlx5: E-Switch, Get reg_c0 value on CQE + + On RX side create a restore table in OFFLOADS namespace. + This table will match on all values for reg_c0 we will use, + and set it to the flow_tag. This flow tag can then be read on the CQE. + + As there is no copy action from reg c0 to flow tag, instead we have to + set the flow tag explictily. We add an API so callers can add all the used + reg_c0 values (tags) and for each of those we add a restore rule. + + This will be used in a following patch to save the miss chain mapping + tag on reg_c0 and from it restore the tc chain on the skb. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 16 +++ + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 147 +++++++++++++++++++-- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 +- + include/linux/mlx5/eswitch.h | 2 + + 4 files changed, 158 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index bd229bda630e..a384cab195c1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -230,6 +230,11 @@ struct mlx5_eswitch_fdb { + }; + + struct mlx5_esw_offload { ++#ifndef __GENKSYMS__ ++ struct mlx5_flow_table *ft_offloads_restore; ++ struct mlx5_flow_group *restore_group; ++#endif ++ + struct mlx5_flow_table *ft_offloads; + struct mlx5_flow_group *vport_rx_group; + struct mlx5_eswitch_rep *vport_reps; +@@ -677,6 +682,11 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, + int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw); + void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw); + ++struct mlx5_flow_handle * ++esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); ++u32 ++esw_get_max_restore_tag(struct mlx5_eswitch *esw); ++ + #else /* CONFIG_MLX5_ESWITCH */ + /* eswitch API stubs */ + static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } +@@ -692,6 +702,12 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) + + static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} + ++static struct mlx5_flow_handle * ++esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ + #endif /* CONFIG_MLX5_ESWITCH */ + + #endif /* __MLX5_ESWITCH_H__ */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 0db21244ca31..eab876a2208a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1022,6 +1022,54 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) + return err; + } + ++struct mlx5_flow_handle * ++esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) ++{ ++ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; ++ struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore; ++ struct mlx5_flow_context *flow_context; ++ struct mlx5_flow_handle *flow_rule; ++ struct mlx5_flow_destination dest; ++ struct mlx5_flow_spec *spec; ++ void *misc; ++ ++ spec = kzalloc(sizeof(*spec), GFP_KERNEL); ++ if (!spec) ++ return ERR_PTR(-ENOMEM); ++ ++ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ++ misc_parameters_2); ++ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, ++ ESW_CHAIN_TAG_METADATA_MASK); ++ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, ++ misc_parameters_2); ++ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); ++ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; ++ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; ++ ++ flow_context = &spec->flow_context; ++ flow_context->flags |= FLOW_CONTEXT_HAS_TAG; ++ flow_context->flow_tag = tag; ++ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; ++ dest.ft = esw->offloads.ft_offloads; ++ ++ flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); ++ kfree(spec); ++ ++ if (IS_ERR(flow_rule)) ++ esw_warn(esw->dev, ++ "Failed to create restore rule for tag: %d, err(%d)\n", ++ tag, (int)PTR_ERR(flow_rule)); ++ ++ return flow_rule; ++} ++ ++u32 ++esw_get_max_restore_tag(struct mlx5_eswitch *esw) ++{ ++ return ESW_CHAIN_TAG_METADATA_MASK; ++} ++ + #define MAX_PF_SQ 256 + #define MAX_SQ_NVPORTS 32 + +@@ -1245,6 +1293,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) + } + + ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; ++ ft_attr.prio = 1; + + ft_offloads = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft_offloads)) { +@@ -1351,6 +1400,81 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, + return flow_rule; + } + ++static void esw_destroy_restore_table(struct mlx5_eswitch *esw) ++{ ++ struct mlx5_esw_offload *offloads = &esw->offloads; ++ ++ mlx5_destroy_flow_group(offloads->restore_group); ++ mlx5_destroy_flow_table(offloads->ft_offloads_restore); ++} ++ ++static int esw_create_restore_table(struct mlx5_eswitch *esw) ++{ ++ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); ++ struct mlx5_flow_table_attr ft_attr = {}; ++ struct mlx5_core_dev *dev = esw->dev; ++ struct mlx5_flow_namespace *ns; ++ void *match_criteria, *misc; ++ struct mlx5_flow_table *ft; ++ struct mlx5_flow_group *g; ++ u32 *flow_group_in; ++ int err = 0; ++ ++ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); ++ if (!ns) { ++ esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ flow_group_in = kvzalloc(inlen, GFP_KERNEL); ++ if (!flow_group_in) { ++ err = -ENOMEM; ++ goto out_free; ++ } ++ ++ ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS; ++ ft = mlx5_create_flow_table(ns, &ft_attr); ++ if (IS_ERR(ft)) { ++ err = PTR_ERR(ft); ++ esw_warn(esw->dev, "Failed to create restore table, err %d\n", ++ err); ++ goto out_free; ++ } ++ ++ memset(flow_group_in, 0, inlen); ++ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, ++ match_criteria); ++ misc = MLX5_ADDR_OF(fte_match_param, match_criteria, ++ misc_parameters_2); ++ ++ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, ++ ESW_CHAIN_TAG_METADATA_MASK); ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ++ ft_attr.max_fte - 1); ++ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, ++ MLX5_MATCH_MISC_PARAMETERS_2); ++ g = mlx5_create_flow_group(ft, flow_group_in); ++ if (IS_ERR(g)) { ++ err = PTR_ERR(g); ++ esw_warn(dev, "Failed to create restore flow group, err: %d\n", ++ err); ++ goto err_group; ++ } ++ ++ esw->offloads.ft_offloads_restore = ft; ++ esw->offloads.restore_group = g; ++ ++ return 0; ++ ++err_group: ++ mlx5_destroy_flow_table(ft); ++out_free: ++ kvfree(flow_group_in); ++ ++ return err; ++} ++ + static int esw_offloads_start(struct mlx5_eswitch *esw, + struct netlink_ext_ack *extack) + { +@@ -2122,13 +2246,17 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + if (err) + return err; + +- err = esw_create_offloads_fdb_tables(esw, total_vports); ++ err = esw_create_offloads_table(esw, total_vports); + if (err) +- goto create_fdb_err; ++ goto create_offloads_err; + +- err = esw_create_offloads_table(esw, total_vports); ++ err = esw_create_restore_table(esw); + if (err) +- goto create_ft_err; ++ goto create_restore_err; ++ ++ err = esw_create_offloads_fdb_tables(esw, total_vports); ++ if (err) ++ goto create_fdb_err; + + err = esw_create_vport_rx_group(esw, total_vports); + if (err) +@@ -2140,12 +2268,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + return 0; + + create_fg_err: +- esw_destroy_offloads_table(esw); +- +-create_ft_err: + esw_destroy_offloads_fdb_tables(esw); +- + create_fdb_err: ++ esw_destroy_restore_table(esw); ++create_restore_err: ++ esw_destroy_offloads_table(esw); ++create_offloads_err: + esw_destroy_uplink_offloads_acl_tables(esw); + + return err; +@@ -2155,8 +2283,9 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) + { + mutex_destroy(&esw->fdb_table.offloads.vports.lock); + esw_destroy_vport_rx_group(esw); +- esw_destroy_offloads_table(esw); + esw_destroy_offloads_fdb_tables(esw); ++ esw_destroy_restore_table(esw); ++ esw_destroy_offloads_table(esw); + esw_destroy_uplink_offloads_acl_tables(esw); + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 344e5470a81c..f44e366ecfa8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -111,8 +111,8 @@ + #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) + + #define OFFLOADS_MAX_FT 2 +-#define OFFLOADS_NUM_PRIOS 1 +-#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) ++#define OFFLOADS_NUM_PRIOS 2 ++#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS) + + #define LAG_PRIO_NUM_LEVELS 1 + #define LAG_NUM_PRIOS 1 +diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h +index 9338c8cc6092..92e39e8a30f0 100644 +--- a/include/linux/mlx5/eswitch.h ++++ b/include/linux/mlx5/eswitch.h +@@ -84,6 +84,8 @@ bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); + #define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS) + #define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS) + #define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS) ++#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\ ++ 0) + + static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) + { +-- +2.13.6 + diff --git a/SOURCES/0229-netdrv-net-mlx5-E-Switch-Mark-miss-packets-with-new-.patch b/SOURCES/0229-netdrv-net-mlx5-E-Switch-Mark-miss-packets-with-new-.patch new file mode 100644 index 0000000..b1b09c2 --- /dev/null +++ b/SOURCES/0229-netdrv-net-mlx5-E-Switch-Mark-miss-packets-with-new-.patch @@ -0,0 +1,375 @@ +From d1efc329df288e6249c8652723b9e120b71cccec Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:51 -0400 +Subject: [PATCH 229/312] [netdrv] net/mlx5: E-Switch, Mark miss packets with + new chain id mapping + +Message-id: <20200519074934.6303-21-ahleihel@redhat.com> +Patchwork-id: 310523 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 20/63] net/mlx5: E-Switch, Mark miss packets with new chain id mapping +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit 8f1e0b97cc708aa6a4d84b0431afc410feba00b6 +Author: Paul Blakey +Date: Sun Feb 16 12:01:28 2020 +0200 + + net/mlx5: E-Switch, Mark miss packets with new chain id mapping + + Currently, if we miss in hardware after jumping to some chain, + we continue in chain 0 in software. This is wrong, and with the new + tc skb extension we can now restore the chain id on the skb, so + tc can continue with in the correct chain. + + To restore the chain id in software after a miss in hardware, we create + a register mapping from 32bit chain ids to 16bit of reg_c0 (that + survives loopback), to 32bit chain ids. We then mark packets that + miss on some chain with the current chain id mapping on their reg_c0 + field. Using this mapping, we will support up to 64K concurrent + chains. + + This register survives loopback and gets to the CQE on flow_tag + via the eswitch restore rules. + + In next commit, we will reverse the mapping we got on the CQE + to a chain id and tell tc to continue in the sw chain where we + left off via the tc skb extension. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 8 ++ + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 12 ++ + .../mellanox/mlx5/core/eswitch_offloads_chains.c | 130 ++++++++++++++++++++- + .../mellanox/mlx5/core/eswitch_offloads_chains.h | 4 +- + 4 files changed, 150 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 9d6ac9a1461b..a9142bde2dc6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -153,6 +153,14 @@ struct mlx5e_tc_flow_parse_attr { + #define MLX5E_TC_TABLE_NUM_GROUPS 4 + #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) + ++struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { ++ [CHAIN_TO_REG] = { ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, ++ .moffset = 0, ++ .mlen = 2, ++ }, ++}; ++ + struct mlx5e_hairpin { + struct mlx5_hairpin *pair; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 262cdb7b69b1..e2dbbae6d4d7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -91,6 +91,18 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); + + void mlx5e_tc_reoffload_flows_work(struct work_struct *work); + ++enum mlx5e_tc_attr_to_reg { ++ CHAIN_TO_REG, ++}; ++ ++struct mlx5e_tc_attr_to_reg_mapping { ++ int mfield; /* rewrite field */ ++ int moffset; /* offset of mfield */ ++ int mlen; /* bytes to rewrite/match */ ++}; ++ ++extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; ++ + bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, + struct net_device *out_dev); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index 6ffc4f041b6c..12ca184cd795 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -6,14 +6,17 @@ + #include + + #include "eswitch_offloads_chains.h" ++#include "en/mapping.h" + #include "mlx5_core.h" + #include "fs_core.h" + #include "eswitch.h" + #include "en.h" ++#include "en_tc.h" + + #define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv) + #define esw_chains_lock(esw) (esw_chains_priv(esw)->lock) + #define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht) ++#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping) + #define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht) + #define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left) + #define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb) +@@ -42,6 +45,7 @@ struct mlx5_esw_chains_priv { + struct mutex lock; + + struct mlx5_flow_table *tc_end_fdb; ++ struct mapping_ctx *chains_mapping; + + int fdb_left[ARRAY_SIZE(ESW_POOLS)]; + }; +@@ -52,9 +56,12 @@ struct fdb_chain { + u32 chain; + + int ref; ++ int id; + + struct mlx5_eswitch *esw; + struct list_head prios_list; ++ struct mlx5_flow_handle *restore_rule; ++ struct mlx5_modify_hdr *miss_modify_hdr; + }; + + struct fdb_prio_key { +@@ -258,6 +265,70 @@ mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw, + mlx5_destroy_flow_table(fdb); + } + ++static int ++create_fdb_chain_restore(struct fdb_chain *fdb_chain) ++{ ++ char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)]; ++ struct mlx5_eswitch *esw = fdb_chain->esw; ++ struct mlx5_modify_hdr *mod_hdr; ++ u32 index; ++ int err; ++ ++ if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw)) ++ return 0; ++ ++ err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index); ++ if (err) ++ return err; ++ if (index == MLX5_FS_DEFAULT_FLOW_TAG) { ++ /* we got the special default flow tag id, so we won't know ++ * if we actually marked the packet with the restore rule ++ * we create. ++ * ++ * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0. ++ */ ++ err = mapping_add(esw_chains_mapping(esw), ++ &fdb_chain->chain, &index); ++ mapping_remove(esw_chains_mapping(esw), ++ MLX5_FS_DEFAULT_FLOW_TAG); ++ if (err) ++ return err; ++ } ++ ++ fdb_chain->id = index; ++ ++ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); ++ MLX5_SET(set_action_in, modact, field, ++ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield); ++ MLX5_SET(set_action_in, modact, offset, ++ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8); ++ MLX5_SET(set_action_in, modact, length, ++ mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8); ++ MLX5_SET(set_action_in, modact, data, fdb_chain->id); ++ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, ++ 1, modact); ++ if (IS_ERR(mod_hdr)) { ++ err = PTR_ERR(mod_hdr); ++ goto err_mod_hdr; ++ } ++ fdb_chain->miss_modify_hdr = mod_hdr; ++ ++ fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id); ++ if (IS_ERR(fdb_chain->restore_rule)) { ++ err = PTR_ERR(fdb_chain->restore_rule); ++ goto err_rule; ++ } ++ ++ return 0; ++ ++err_rule: ++ mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr); ++err_mod_hdr: ++ /* Datapath can't find this mapping, so we can safely remove it */ ++ mapping_remove(esw_chains_mapping(esw), fdb_chain->id); ++ return err; ++} ++ + static struct fdb_chain * + mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) + { +@@ -272,6 +343,10 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) + fdb_chain->chain = chain; + INIT_LIST_HEAD(&fdb_chain->prios_list); + ++ err = create_fdb_chain_restore(fdb_chain); ++ if (err) ++ goto err_restore; ++ + err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node, + chain_params); + if (err) +@@ -280,6 +355,12 @@ mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain) + return fdb_chain; + + err_insert: ++ if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { ++ mlx5_del_flow_rules(fdb_chain->restore_rule); ++ mlx5_modify_header_dealloc(esw->dev, ++ fdb_chain->miss_modify_hdr); ++ } ++err_restore: + kvfree(fdb_chain); + return ERR_PTR(err); + } +@@ -291,6 +372,15 @@ mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain) + + rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node, + chain_params); ++ ++ if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { ++ mlx5_del_flow_rules(fdb_chain->restore_rule); ++ mlx5_modify_header_dealloc(esw->dev, ++ fdb_chain->miss_modify_hdr); ++ ++ mapping_remove(esw_chains_mapping(esw), fdb_chain->id); ++ } ++ + kvfree(fdb_chain); + } + +@@ -313,10 +403,12 @@ mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain) + } + + static struct mlx5_flow_handle * +-mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb, ++mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain, ++ struct mlx5_flow_table *fdb, + struct mlx5_flow_table *next_fdb) + { + static const struct mlx5_flow_spec spec = {}; ++ struct mlx5_eswitch *esw = fdb_chain->esw; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act act = {}; + +@@ -325,6 +417,11 @@ mlx5_esw_chains_add_miss_rule(struct mlx5_flow_table *fdb, + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = next_fdb; + ++ if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { ++ act.modify_hdr = fdb_chain->miss_modify_hdr; ++ act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; ++ } ++ + return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1); + } + +@@ -348,7 +445,8 @@ mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio, + list_for_each_entry_continue_reverse(pos, + &fdb_chain->prios_list, + list) { +- miss_rules[n] = mlx5_esw_chains_add_miss_rule(pos->fdb, ++ miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain, ++ pos->fdb, + next_fdb); + if (IS_ERR(miss_rules[n])) { + err = PTR_ERR(miss_rules[n]); +@@ -462,7 +560,7 @@ mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw, + } + + /* Add miss rule to next_fdb */ +- miss_rule = mlx5_esw_chains_add_miss_rule(fdb, next_fdb); ++ miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb); + if (IS_ERR(miss_rule)) { + err = PTR_ERR(miss_rule); + goto err_miss_rule; +@@ -627,6 +725,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) + struct mlx5_esw_chains_priv *chains_priv; + struct mlx5_core_dev *dev = esw->dev; + u32 max_flow_counter, fdb_max; ++ struct mapping_ctx *mapping; + int err; + + chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL); +@@ -663,10 +762,20 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) + if (err) + goto init_prios_ht_err; + ++ mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw), ++ true); ++ if (IS_ERR(mapping)) { ++ err = PTR_ERR(mapping); ++ goto mapping_err; ++ } ++ esw_chains_mapping(esw) = mapping; ++ + mutex_init(&esw_chains_lock(esw)); + + return 0; + ++mapping_err: ++ rhashtable_destroy(&esw_prios_ht(esw)); + init_prios_ht_err: + rhashtable_destroy(&esw_chains_ht(esw)); + init_chains_ht_err: +@@ -678,6 +787,7 @@ static void + mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw) + { + mutex_destroy(&esw_chains_lock(esw)); ++ mapping_destroy(esw_chains_mapping(esw)); + rhashtable_destroy(&esw_prios_ht(esw)); + rhashtable_destroy(&esw_chains_ht(esw)); + +@@ -756,3 +866,17 @@ mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) + mlx5_esw_chains_close(esw); + mlx5_esw_chains_cleanup(esw); + } ++ ++int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, ++ u32 *chain) ++{ ++ int err; ++ ++ err = mapping_find(esw_chains_mapping(esw), tag, chain); ++ if (err) { ++ esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag); ++ return -ENOENT; ++ } ++ ++ return 0; ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +index 4ae2baf2a7a1..e806d8de868e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +@@ -28,5 +28,7 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw); + int mlx5_esw_chains_create(struct mlx5_eswitch *esw); + void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); + +-#endif /* __ML5_ESW_CHAINS_H__ */ ++int ++mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain); + ++#endif /* __ML5_ESW_CHAINS_H__ */ +-- +2.13.6 + diff --git a/SOURCES/0230-netdrv-net-mlx5e-Rx-Split-rep-rx-mpwqe-handler-from-.patch b/SOURCES/0230-netdrv-net-mlx5e-Rx-Split-rep-rx-mpwqe-handler-from-.patch new file mode 100644 index 0000000..4cf648b --- /dev/null +++ b/SOURCES/0230-netdrv-net-mlx5e-Rx-Split-rep-rx-mpwqe-handler-from-.patch @@ -0,0 +1,149 @@ +From 3c41a6893c6cf8a0874f5cb36edf495b2d352733 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:52 -0400 +Subject: [PATCH 230/312] [netdrv] net/mlx5e: Rx, Split rep rx mpwqe handler + from nic + +Message-id: <20200519074934.6303-22-ahleihel@redhat.com> +Patchwork-id: 310524 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 21/63] net/mlx5e: Rx, Split rep rx mpwqe handler from nic +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit dfd9e7500cd4b21b61d65907e02880b20de929aa +Author: Paul Blakey +Date: Sun Feb 16 12:01:29 2020 +0200 + + net/mlx5e: Rx, Split rep rx mpwqe handler from nic + + Copy the current rep mpwqe rx handler which is also used by nic + profile. In the next patch, we will add rep specific logic, just + for the rep profile rx handler. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 4 +- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 2 + + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 54 ++++++++++++++++++++++++ + 3 files changed, 58 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index dcf97bd4fa49..1cb47297285e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1934,7 +1934,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { + .update_rx = mlx5e_update_rep_rx, + .update_stats = mlx5e_update_ndo_stats, + .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, +- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, ++ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, + .max_tc = 1, + .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), + .stats_grps = mlx5e_rep_stats_grps, +@@ -1954,7 +1954,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { + .update_stats = mlx5e_update_ndo_stats, + .update_carrier = mlx5e_update_carrier, + .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, +- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, ++ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, + .max_tc = MLX5E_MAX_NUM_TC, + .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), + .stats_grps = mlx5e_ul_rep_stats_grps, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +index 4bc5d5cd071c..9f44293ff153 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +@@ -191,6 +191,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); + void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); + + void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); ++void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, ++ struct mlx5_cqe64 *cqe); + + int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 1d606e13a336..85c2428c2f3e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1233,6 +1233,60 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + wq_cyc_pop: + mlx5_wq_cyc_pop(wq); + } ++ ++void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, ++ struct mlx5_cqe64 *cqe) ++{ ++ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); ++ u16 wqe_id = be16_to_cpu(cqe->wqe_id); ++ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; ++ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); ++ u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; ++ u32 head_offset = wqe_offset & (PAGE_SIZE - 1); ++ u32 page_idx = wqe_offset >> PAGE_SHIFT; ++ struct mlx5e_rx_wqe_ll *wqe; ++ struct mlx5_wq_ll *wq; ++ struct sk_buff *skb; ++ u16 cqe_bcnt; ++ ++ wi->consumed_strides += cstrides; ++ ++ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { ++ trigger_report(rq, cqe); ++ rq->stats->wqe_err++; ++ goto mpwrq_cqe_out; ++ } ++ ++ if (unlikely(mpwrq_is_filler_cqe(cqe))) { ++ struct mlx5e_rq_stats *stats = rq->stats; ++ ++ stats->mpwqe_filler_cqes++; ++ stats->mpwqe_filler_strides += cstrides; ++ goto mpwrq_cqe_out; ++ } ++ ++ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); ++ ++ skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, ++ mlx5e_skb_from_cqe_mpwrq_linear, ++ mlx5e_skb_from_cqe_mpwrq_nonlinear, ++ rq, wi, cqe_bcnt, head_offset, page_idx); ++ if (!skb) ++ goto mpwrq_cqe_out; ++ ++ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); ++ ++ napi_gro_receive(rq->cq.napi, skb); ++ ++mpwrq_cqe_out: ++ if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) ++ return; ++ ++ wq = &rq->mpwqe.wq; ++ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); ++ mlx5e_free_rx_mpwqe(rq, wi, true); ++ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); ++} + #endif + + struct sk_buff * +-- +2.13.6 + diff --git a/SOURCES/0231-netdrv-net-mlx5-E-Switch-Restore-chain-id-on-miss.patch b/SOURCES/0231-netdrv-net-mlx5-E-Switch-Restore-chain-id-on-miss.patch new file mode 100644 index 0000000..206b5db --- /dev/null +++ b/SOURCES/0231-netdrv-net-mlx5-E-Switch-Restore-chain-id-on-miss.patch @@ -0,0 +1,142 @@ +From 08538a7288a5d470ad96476b5aa2c9c4f995e06f Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:53 -0400 +Subject: [PATCH 231/312] [netdrv] net/mlx5: E-Switch, Restore chain id on miss + +Message-id: <20200519074934.6303-23-ahleihel@redhat.com> +Patchwork-id: 310525 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 22/63] net/mlx5: E-Switch, Restore chain id on miss +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit d6d27782864f7dd5584fefe050c030283cc40d71 +Author: Paul Blakey +Date: Sun Feb 16 12:01:30 2020 +0200 + + net/mlx5: E-Switch, Restore chain id on miss + + Chain ids are mapped to the lower part of reg C, and after loopback + are copied to to CQE via a restore rule's flow_tag. + + To let tc continue in the correct chain, we find the corresponding + chain id in the eswitch chain id <-> reg C mapping, and set the SKB's + tc extension chain to it. + + That tells tc to continue processing from this set chain. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 ++++ + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 43 +++++++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 2 ++ + 3 files changed, 51 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 85c2428c2f3e..1baeba194794 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1226,6 +1226,9 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + if (rep->vlan && skb_vlan_tag_present(skb)) + skb_vlan_pop(skb); + ++ if (!mlx5e_tc_rep_update_skb(cqe, skb)) ++ goto free_wqe; ++ + napi_gro_receive(rq->cq.napi, skb); + + free_wqe: +@@ -1276,6 +1279,9 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + ++ if (!mlx5e_tc_rep_update_skb(cqe, skb)) ++ goto mpwrq_cqe_out; ++ + napi_gro_receive(rq->cq.napi, skb); + + mpwrq_cqe_out: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index a9142bde2dc6..8523dedd8065 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -4393,3 +4393,46 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work) + } + mutex_unlock(&rpriv->unready_flows_lock); + } ++ ++bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, ++ struct sk_buff *skb) ++{ ++#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) ++ struct tc_skb_ext *tc_skb_ext; ++ struct mlx5_eswitch *esw; ++ struct mlx5e_priv *priv; ++ u32 chain = 0, reg_c0; ++ int err; ++ ++ reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); ++ if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) ++ reg_c0 = 0; ++ ++ if (!reg_c0) ++ return true; ++ ++ priv = netdev_priv(skb->dev); ++ esw = priv->mdev->priv.eswitch; ++ ++ err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain); ++ if (err) { ++ netdev_dbg(priv->netdev, ++ "Couldn't find chain for chain tag: %d, err: %d\n", ++ reg_c0, err); ++ return false; ++ } ++ ++ if (!chain) ++ return true; ++ ++ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); ++ if (!tc_skb_ext) { ++ WARN_ON_ONCE(1); ++ return false; ++ } ++ ++ tc_skb_ext->chain = chain; ++#endif /* CONFIG_NET_TC_SKB_EXT */ ++ ++ return true; ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index e2dbbae6d4d7..9d5fcf61650c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -106,6 +106,8 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; + bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, + struct net_device *out_dev); + ++bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb); ++ + #else /* CONFIG_MLX5_ESWITCH */ + static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } + static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} +-- +2.13.6 + diff --git a/SOURCES/0232-netdrv-net-mlx5e-Allow-re-allocating-mod-header-acti.patch b/SOURCES/0232-netdrv-net-mlx5e-Allow-re-allocating-mod-header-acti.patch new file mode 100644 index 0000000..4b428b5 --- /dev/null +++ b/SOURCES/0232-netdrv-net-mlx5e-Allow-re-allocating-mod-header-acti.patch @@ -0,0 +1,323 @@ +From 9d1ff6442c66e7ebecd88d00e2b7e4a35842c077 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:54 -0400 +Subject: [PATCH 232/312] [netdrv] net/mlx5e: Allow re-allocating mod header + actions + +Message-id: <20200519074934.6303-24-ahleihel@redhat.com> +Patchwork-id: 310527 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 23/63] net/mlx5e: Allow re-allocating mod header actions +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit 6ae4a6a594b8f642906922b86d4c920c68e09404 +Author: Paul Blakey +Date: Sun Feb 16 12:01:31 2020 +0200 + + net/mlx5e: Allow re-allocating mod header actions + + Currently the size of the mod header actions array is deduced from the + number of parsed TC header rewrite actions. However, mod header actions + are also used for setting HW register values. Support the dynamic + reallocation of the mod header array as a pre-step for adding HW + registers mod actions. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 120 +++++++++++++----------- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 11 +++ + 2 files changed, 76 insertions(+), 55 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 8523dedd8065..747979c6601a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -144,9 +144,7 @@ struct mlx5e_tc_flow_parse_attr { + const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; + struct net_device *filter_dev; + struct mlx5_flow_spec spec; +- int num_mod_hdr_actions; +- int max_mod_hdr_actions; +- void *mod_hdr_actions; ++ struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; + int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; + }; + +@@ -369,10 +367,10 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, + struct mod_hdr_key key; + u32 hash_key; + +- num_actions = parse_attr->num_mod_hdr_actions; ++ num_actions = parse_attr->mod_hdr_acts.num_actions; + actions_size = MLX5_MH_ACT_SZ * num_actions; + +- key.actions = parse_attr->mod_hdr_actions; ++ key.actions = parse_attr->mod_hdr_acts.actions; + key.num_actions = num_actions; + + hash_key = hash_mod_hdr_info(&key); +@@ -962,7 +960,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); + flow_act.modify_hdr = attr->modify_hdr; +- kfree(parse_attr->mod_hdr_actions); ++ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + if (err) + return err; + } +@@ -1232,7 +1230,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { + err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); +- kfree(parse_attr->mod_hdr_actions); ++ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + if (err) + return err; + } +@@ -2389,25 +2387,26 @@ static struct mlx5_fields fields[] = { + OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), + }; + +-/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at +- * max from the SW pedit action. On success, attr->num_mod_hdr_actions +- * says how many HW actions were actually parsed. +- */ +-static int offload_pedit_fields(struct pedit_headers_action *hdrs, ++static int offload_pedit_fields(struct mlx5e_priv *priv, ++ int namespace, ++ struct pedit_headers_action *hdrs, + struct mlx5e_tc_flow_parse_attr *parse_attr, + u32 *action_flags, + struct netlink_ext_ack *extack) + { + struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; +- int i, action_size, nactions, max_actions, first, last, next_z; ++ int i, action_size, first, last, next_z; + void *headers_c, *headers_v, *action, *vals_p; + u32 *s_masks_p, *a_masks_p, s_mask, a_mask; ++ struct mlx5e_tc_mod_hdr_acts *mod_acts; + struct mlx5_fields *f; + unsigned long mask; + __be32 mask_be32; + __be16 mask_be16; ++ int err; + u8 cmd; + ++ mod_acts = &parse_attr->mod_hdr_acts; + headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec); + headers_v = get_match_headers_value(*action_flags, &parse_attr->spec); + +@@ -2417,11 +2416,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + add_vals = &hdrs[1].vals; + + action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); +- action = parse_attr->mod_hdr_actions + +- parse_attr->num_mod_hdr_actions * action_size; +- +- max_actions = parse_attr->max_mod_hdr_actions; +- nactions = parse_attr->num_mod_hdr_actions; + + for (i = 0; i < ARRAY_SIZE(fields); i++) { + bool skip; +@@ -2447,13 +2441,6 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + return -EOPNOTSUPP; + } + +- if (nactions == max_actions) { +- NL_SET_ERR_MSG_MOD(extack, +- "too many pedit actions, can't offload"); +- printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions); +- return -EOPNOTSUPP; +- } +- + skip = false; + if (s_mask) { + void *match_mask = headers_c + f->match_offset; +@@ -2501,6 +2488,18 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + return -EOPNOTSUPP; + } + ++ err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts); ++ if (err) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "too many pedit actions, can't offload"); ++ mlx5_core_warn(priv->mdev, ++ "mlx5: parsed %d pedit actions, can't do more\n", ++ mod_acts->num_actions); ++ return err; ++ } ++ ++ action = mod_acts->actions + ++ (mod_acts->num_actions * action_size); + MLX5_SET(set_action_in, action, action_type, cmd); + MLX5_SET(set_action_in, action, field, f->field); + +@@ -2523,11 +2522,9 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs, + else if (f->field_bsize == 8) + MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first); + +- action += action_size; +- nactions++; ++ ++mod_acts->num_actions; + } + +- parse_attr->num_mod_hdr_actions = nactions; + return 0; + } + +@@ -2540,29 +2537,48 @@ static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev, + return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions); + } + +-static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, +- struct pedit_headers_action *hdrs, +- int namespace, +- struct mlx5e_tc_flow_parse_attr *parse_attr) ++int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, ++ int namespace, ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) + { +- int nkeys, action_size, max_actions; ++ int action_size, new_num_actions, max_hw_actions; ++ size_t new_sz, old_sz; ++ void *ret; + +- nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits + +- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits; +- action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); ++ if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions) ++ return 0; + +- max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace); +- /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */ +- max_actions = min(max_actions, nkeys * 16); ++ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); + +- parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL); +- if (!parse_attr->mod_hdr_actions) ++ max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev, ++ namespace); ++ new_num_actions = min(max_hw_actions, ++ mod_hdr_acts->actions ? ++ mod_hdr_acts->max_actions * 2 : 1); ++ if (mod_hdr_acts->max_actions == new_num_actions) ++ return -ENOSPC; ++ ++ new_sz = action_size * new_num_actions; ++ old_sz = mod_hdr_acts->max_actions * action_size; ++ ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL); ++ if (!ret) + return -ENOMEM; + +- parse_attr->max_mod_hdr_actions = max_actions; ++ memset(ret + old_sz, 0, new_sz - old_sz); ++ mod_hdr_acts->actions = ret; ++ mod_hdr_acts->max_actions = new_num_actions; ++ + return 0; + } + ++void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) ++{ ++ kfree(mod_hdr_acts->actions); ++ mod_hdr_acts->actions = NULL; ++ mod_hdr_acts->num_actions = 0; ++ mod_hdr_acts->max_actions = 0; ++} ++ + static const struct pedit_headers zero_masks = {}; + + static int parse_tc_pedit_action(struct mlx5e_priv *priv, +@@ -2614,13 +2630,8 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, + int err; + u8 cmd; + +- if (!parse_attr->mod_hdr_actions) { +- err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr); +- if (err) +- goto out_err; +- } +- +- err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack); ++ err = offload_pedit_fields(priv, namespace, hdrs, parse_attr, ++ action_flags, extack); + if (err < 0) + goto out_dealloc_parsed_actions; + +@@ -2640,8 +2651,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, + return 0; + + out_dealloc_parsed_actions: +- kfree(parse_attr->mod_hdr_actions); +-out_err: ++ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + return err; + } + +@@ -2976,9 +2986,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, + /* in case all pedit actions are skipped, remove the MOD_HDR + * flag. + */ +- if (parse_attr->num_mod_hdr_actions == 0) { ++ if (parse_attr->mod_hdr_acts.num_actions == 0) { + action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; +- kfree(parse_attr->mod_hdr_actions); ++ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + } + } + +@@ -3618,9 +3628,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + * flag. we might have set split_count either by pedit or + * pop/push. if there is no pop/push either, reset it too. + */ +- if (parse_attr->num_mod_hdr_actions == 0) { ++ if (parse_attr->mod_hdr_acts.num_actions == 0) { + action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; +- kfree(parse_attr->mod_hdr_actions); ++ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || + (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) + attr->split_count = 0; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 9d5fcf61650c..3848ec7b6c1e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -108,6 +108,17 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, + + bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb); + ++struct mlx5e_tc_mod_hdr_acts { ++ int num_actions; ++ int max_actions; ++ void *actions; ++}; ++ ++int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, ++ int namespace, ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); ++void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); ++ + #else /* CONFIG_MLX5_ESWITCH */ + static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } + static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} +-- +2.13.6 + diff --git a/SOURCES/0233-netdrv-net-mlx5e-Move-tc-tunnel-parsing-logic-with-t.patch b/SOURCES/0233-netdrv-net-mlx5e-Move-tc-tunnel-parsing-logic-with-t.patch new file mode 100644 index 0000000..69c2b79 --- /dev/null +++ b/SOURCES/0233-netdrv-net-mlx5e-Move-tc-tunnel-parsing-logic-with-t.patch @@ -0,0 +1,327 @@ +From af5a918bec18ded5235fe365baf92c6978e36369 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:55 -0400 +Subject: [PATCH 233/312] [netdrv] net/mlx5e: Move tc tunnel parsing logic with + the rest at tc_tun module + +Message-id: <20200519074934.6303-25-ahleihel@redhat.com> +Patchwork-id: 310526 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 24/63] net/mlx5e: Move tc tunnel parsing logic with the rest at tc_tun module +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit ea4cd837b99df6693c3aa067ade528f62544c18d +Author: Paul Blakey +Date: Sun Feb 16 12:01:32 2020 +0200 + + net/mlx5e: Move tc tunnel parsing logic with the rest at tc_tun module + + Currently, tunnel parsing is split between en_tc and tc_tun. The next + patch will replace the tunnel fields matching with a register match, + and will not need this parsing. + + Move the tunnel parsing logic to tc_tun as a pre-step for skipping + it in the next patch. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/tc_tun.c | 112 ++++++++++++++++++++- + .../net/ethernet/mellanox/mlx5/core/en/tc_tun.h | 3 +- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 109 +------------------- + 3 files changed, 112 insertions(+), 112 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +index b855933f6bec..ae497981ce58 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +@@ -469,10 +469,15 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, + struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, +- void *headers_c, +- void *headers_v, u8 *match_level) ++ u8 *match_level) + { + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); ++ struct flow_rule *rule = flow_cls_offload_flow_rule(f); ++ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ++ outer_headers); ++ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, ++ outer_headers); ++ struct netlink_ext_ack *extack = f->common.extack; + int err = 0; + + if (!tunnel) { +@@ -499,6 +504,109 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, + goto out; + } + ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { ++ struct flow_match_control match; ++ u16 addr_type; ++ ++ flow_rule_match_enc_control(rule, &match); ++ addr_type = match.key->addr_type; ++ ++ /* For tunnel addr_type used same key id`s as for non-tunnel */ ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { ++ struct flow_match_ipv4_addrs match; ++ ++ flow_rule_match_enc_ipv4_addrs(rule, &match); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ++ src_ipv4_src_ipv6.ipv4_layout.ipv4, ++ ntohl(match.mask->src)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ++ src_ipv4_src_ipv6.ipv4_layout.ipv4, ++ ntohl(match.key->src)); ++ ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ++ dst_ipv4_dst_ipv6.ipv4_layout.ipv4, ++ ntohl(match.mask->dst)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ++ dst_ipv4_dst_ipv6.ipv4_layout.ipv4, ++ ntohl(match.key->dst)); ++ ++ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ++ ethertype); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ++ ETH_P_IP); ++ } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { ++ struct flow_match_ipv6_addrs match; ++ ++ flow_rule_match_enc_ipv6_addrs(rule, &match); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, ++ src_ipv4_src_ipv6.ipv6_layout.ipv6), ++ &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ++ ipv6)); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ++ src_ipv4_src_ipv6.ipv6_layout.ipv6), ++ &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ++ ipv6)); ++ ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, ++ dst_ipv4_dst_ipv6.ipv6_layout.ipv6), ++ &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ++ ipv6)); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ++ dst_ipv4_dst_ipv6.ipv6_layout.ipv6), ++ &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ++ ipv6)); ++ ++ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ++ ethertype); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ++ ETH_P_IPV6); ++ } ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { ++ struct flow_match_ip match; ++ ++ flow_rule_match_enc_ip(rule, &match); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, ++ match.mask->tos & 0x3); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, ++ match.key->tos & 0x3); ++ ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, ++ match.mask->tos >> 2); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, ++ match.key->tos >> 2); ++ ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, ++ match.mask->ttl); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, ++ match.key->ttl); ++ ++ if (match.mask->ttl && ++ !MLX5_CAP_ESW_FLOWTABLE_FDB ++ (priv->mdev, ++ ft_field_support.outer_ipv4_ttl)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Matching on TTL is not supported"); ++ err = -EOPNOTSUPP; ++ goto out; ++ } ++ } ++ ++ /* Enforce DMAC when offloading incoming tunneled flows. ++ * Flow counters require a match on the DMAC. ++ */ ++ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); ++ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); ++ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ++ dmac_47_16), priv->netdev->dev_addr); ++ ++ /* let software handle IP fragments */ ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); ++ ++ return 0; ++ + out: + return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +index 6f9a78c85ffd..1630f0ec3ad7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +@@ -76,8 +76,7 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, + struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, +- void *headers_c, +- void *headers_v, u8 *match_level); ++ u8 *match_level); + + int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 747979c6601a..3d0a5c63d083 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1675,122 +1675,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, + struct net_device *filter_dev, u8 *match_level) + { + struct netlink_ext_ack *extack = f->common.extack; +- void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, +- outer_headers); +- void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, +- outer_headers); +- struct flow_rule *rule = flow_cls_offload_flow_rule(f); + int err; + +- err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, +- headers_c, headers_v, match_level); ++ err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, match_level); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "failed to parse tunnel attributes"); + return err; + } + +- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { +- struct flow_match_control match; +- u16 addr_type; +- +- flow_rule_match_enc_control(rule, &match); +- addr_type = match.key->addr_type; +- +- /* For tunnel addr_type used same key id`s as for non-tunnel */ +- if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { +- struct flow_match_ipv4_addrs match; +- +- flow_rule_match_enc_ipv4_addrs(rule, &match); +- MLX5_SET(fte_match_set_lyr_2_4, headers_c, +- src_ipv4_src_ipv6.ipv4_layout.ipv4, +- ntohl(match.mask->src)); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, +- src_ipv4_src_ipv6.ipv4_layout.ipv4, +- ntohl(match.key->src)); +- +- MLX5_SET(fte_match_set_lyr_2_4, headers_c, +- dst_ipv4_dst_ipv6.ipv4_layout.ipv4, +- ntohl(match.mask->dst)); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, +- dst_ipv4_dst_ipv6.ipv4_layout.ipv4, +- ntohl(match.key->dst)); +- +- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, +- ethertype); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, +- ETH_P_IP); +- } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { +- struct flow_match_ipv6_addrs match; +- +- flow_rule_match_enc_ipv6_addrs(rule, &match); +- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, +- src_ipv4_src_ipv6.ipv6_layout.ipv6), +- &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, +- ipv6)); +- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, +- src_ipv4_src_ipv6.ipv6_layout.ipv6), +- &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, +- ipv6)); +- +- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, +- dst_ipv4_dst_ipv6.ipv6_layout.ipv6), +- &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, +- ipv6)); +- memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, +- dst_ipv4_dst_ipv6.ipv6_layout.ipv6), +- &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, +- ipv6)); +- +- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, +- ethertype); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, +- ETH_P_IPV6); +- } +- } +- +- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { +- struct flow_match_ip match; +- +- flow_rule_match_enc_ip(rule, &match); +- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, +- match.mask->tos & 0x3); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, +- match.key->tos & 0x3); +- +- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, +- match.mask->tos >> 2); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, +- match.key->tos >> 2); +- +- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, +- match.mask->ttl); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, +- match.key->ttl); +- +- if (match.mask->ttl && +- !MLX5_CAP_ESW_FLOWTABLE_FDB +- (priv->mdev, +- ft_field_support.outer_ipv4_ttl)) { +- NL_SET_ERR_MSG_MOD(extack, +- "Matching on TTL is not supported"); +- return -EOPNOTSUPP; +- } +- +- } +- +- /* Enforce DMAC when offloading incoming tunneled flows. +- * Flow counters require a match on the DMAC. +- */ +- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16); +- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0); +- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, +- dmac_47_16), priv->netdev->dev_addr); +- +- /* let software handle IP fragments */ +- MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); +- MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0); +- + return 0; + } + +-- +2.13.6 + diff --git a/SOURCES/0234-netdrv-net-mlx5e-Disallow-inserting-vxlan-vlan-egres.patch b/SOURCES/0234-netdrv-net-mlx5e-Disallow-inserting-vxlan-vlan-egres.patch new file mode 100644 index 0000000..6aa370b --- /dev/null +++ b/SOURCES/0234-netdrv-net-mlx5e-Disallow-inserting-vxlan-vlan-egres.patch @@ -0,0 +1,88 @@ +From ac5bd9adb75cdce1a63983928583ca614781f712 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:56 -0400 +Subject: [PATCH 234/312] [netdrv] net/mlx5e: Disallow inserting vxlan/vlan + egress rules without decap/pop + +Message-id: <20200519074934.6303-26-ahleihel@redhat.com> +Patchwork-id: 310528 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 25/63] net/mlx5e: Disallow inserting vxlan/vlan egress rules without decap/pop +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit 7f2fd0a5f8d859d71e710a664a113c4a2620dc4f +Author: Paul Blakey +Date: Sun Feb 16 12:01:33 2020 +0200 + + net/mlx5e: Disallow inserting vxlan/vlan egress rules without decap/pop + + Currently, rules on tunnel devices can be offloaded without decap action + when a vlan pop action exists. Similarly, the driver will offload rules + on vlan interfaces with no pop action when a decap action exists. + + Disallow the faulty behavior by checking that vlan egress rules do pop or + drop and vxlan egress rules do decap, as intended. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 3d0a5c63d083..b3b006230b89 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2673,6 +2673,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) + { ++ struct net_device *filter_dev = parse_attr->filter_dev; ++ bool drop_action, decap_action, pop_action; + u32 actions; + + if (mlx5e_is_eswitch_flow(flow)) +@@ -2680,11 +2682,19 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + else + actions = flow->nic_attr->action; + +- if (flow_flag_test(flow, EGRESS) && +- !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || +- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || +- (actions & MLX5_FLOW_CONTEXT_ACTION_DROP))) +- return false; ++ drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP; ++ decap_action = actions & MLX5_FLOW_CONTEXT_ACTION_DECAP; ++ pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; ++ ++ if (flow_flag_test(flow, EGRESS) && !drop_action) { ++ /* If no drop, we must decap (vxlan) or pop (vlan) */ ++ if (mlx5e_get_tc_tun(filter_dev) && !decap_action) ++ return false; ++ else if (is_vlan_dev(filter_dev) && !pop_action) ++ return false; ++ else ++ return false; /* Sanity */ ++ } + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + return modify_header_match_supported(&parse_attr->spec, +-- +2.13.6 + diff --git a/SOURCES/0235-netdrv-net-mlx5e-Support-inner-header-rewrite-with-g.patch b/SOURCES/0235-netdrv-net-mlx5e-Support-inner-header-rewrite-with-g.patch new file mode 100644 index 0000000..daacab3 --- /dev/null +++ b/SOURCES/0235-netdrv-net-mlx5e-Support-inner-header-rewrite-with-g.patch @@ -0,0 +1,740 @@ +From 8b777eb2b9193201f58579233ba22c5ea715b995 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:57 -0400 +Subject: [PATCH 235/312] [netdrv] net/mlx5e: Support inner header rewrite with + goto action + +Message-id: <20200519074934.6303-27-ahleihel@redhat.com> +Patchwork-id: 310529 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 26/63] net/mlx5e: Support inner header rewrite with goto action +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit 0a7fcb78cc21d339c4eba2827df846e69cec1d07 +Author: Paul Blakey +Date: Sun Feb 16 12:01:34 2020 +0200 + + net/mlx5e: Support inner header rewrite with goto action + + The hardware supports header rewrite of outer headers only. + To perform header rewrite on inner headers, we must first + decapsulate the packet. + + Currently, the hardware decap action is explicitly set by the tc + tunnel_key unset action. However, with goto action the user won't + use the tunnel_key unset action. In addition, header rewrites actions + will not apply to the inner header as done by the software model. + + To support this, we will map each tunnel matches seen on a tc rule to + a unique tunnel id, implicity add a decap action on tc chain 0 flows, + and mark the packets with this unique tunnel id. Tunnel matches on + the decapsulated tunnel on later chains will match on this unique id + instead of the actual packet. + + We will also use this mapping to restore the tunnel info metadata + on miss. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 5 + + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 472 ++++++++++++++++++++--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 13 + + 3 files changed, 445 insertions(+), 45 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +index 9f44293ff153..100b9a2d3ea6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +@@ -80,6 +80,11 @@ struct mlx5_rep_uplink_priv { + struct mutex unready_flows_lock; + struct list_head unready_flows; + struct work_struct reoffload_flows_work; ++ ++ /* maps tun_info to a unique id*/ ++ struct mapping_ctx *tunnel_mapping; ++ /* maps tun_enc_opts to a unique id*/ ++ struct mapping_ctx *tunnel_enc_opts_mapping; + }; + + struct mlx5e_rep_priv { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index b3b006230b89..a7d3cca88718 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -55,10 +55,13 @@ + #include "fs_core.h" + #include "en/port.h" + #include "en/tc_tun.h" ++#include "en/mapping.h" + #include "lib/devcom.h" + #include "lib/geneve.h" + #include "diag/en_tc_tracepoint.h" + ++#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) ++ + struct mlx5_nic_flow_attr { + u32 action; + u32 flow_tag; +@@ -134,6 +137,8 @@ struct mlx5e_tc_flow { + refcount_t refcnt; + struct rcu_head rcu_head; + struct completion init_done; ++ int tunnel_id; /* the mapped tunnel id of this flow */ ++ + union { + struct mlx5_esw_flow_attr esw_attr[0]; + struct mlx5_nic_flow_attr nic_attr[0]; +@@ -151,14 +156,105 @@ struct mlx5e_tc_flow_parse_attr { + #define MLX5E_TC_TABLE_NUM_GROUPS 4 + #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) + ++struct tunnel_match_key { ++ struct flow_dissector_key_control enc_control; ++ struct flow_dissector_key_keyid enc_key_id; ++ struct flow_dissector_key_ports enc_tp; ++ struct flow_dissector_key_ip enc_ip; ++ union { ++ struct flow_dissector_key_ipv4_addrs enc_ipv4; ++ struct flow_dissector_key_ipv6_addrs enc_ipv6; ++ }; ++ ++ int filter_ifindex; ++}; ++ ++/* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS. ++ * Upper TUNNEL_INFO_BITS for general tunnel info. ++ * Lower ENC_OPTS_BITS bits for enc_opts. ++ */ ++#define TUNNEL_INFO_BITS 6 ++#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) ++#define ENC_OPTS_BITS 2 ++#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) ++#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) ++#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) ++ + struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { + [CHAIN_TO_REG] = { + .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, + .moffset = 0, + .mlen = 2, + }, ++ [TUNNEL_TO_REG] = { ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, ++ .moffset = 3, ++ .mlen = 1, ++ .soffset = MLX5_BYTE_OFF(fte_match_param, ++ misc_parameters_2.metadata_reg_c_1), ++ }, + }; + ++static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); ++ ++void ++mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, ++ enum mlx5e_tc_attr_to_reg type, ++ u32 data, ++ u32 mask) ++{ ++ int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; ++ int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; ++ void *headers_c = spec->match_criteria; ++ void *headers_v = spec->match_value; ++ void *fmask, *fval; ++ ++ fmask = headers_c + soffset; ++ fval = headers_v + soffset; ++ ++ mask = cpu_to_be32(mask) >> (32 - (match_len * 8)); ++ data = cpu_to_be32(data) >> (32 - (match_len * 8)); ++ ++ memcpy(fmask, &mask, match_len); ++ memcpy(fval, &data, match_len); ++ ++ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; ++} ++ ++int ++mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, ++ enum mlx5e_tc_attr_to_reg type, ++ u32 data) ++{ ++ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; ++ int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; ++ int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; ++ char *modact; ++ int err; ++ ++ err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB, ++ mod_hdr_acts); ++ if (err) ++ return err; ++ ++ modact = mod_hdr_acts->actions + ++ (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ); ++ ++ /* Firmware has 5bit length field and 0 means 32bits */ ++ if (mlen == 4) ++ mlen = 0; ++ ++ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); ++ MLX5_SET(set_action_in, modact, field, mfield); ++ MLX5_SET(set_action_in, modact, offset, moffset * 8); ++ MLX5_SET(set_action_in, modact, length, mlen * 8); ++ MLX5_SET(set_action_in, modact, data, data); ++ mod_hdr_acts->num_actions++; ++ ++ return 0; ++} ++ + struct mlx5e_hairpin { + struct mlx5_hairpin *pair; + +@@ -216,8 +312,6 @@ struct mlx5e_mod_hdr_entry { + int compl_result; + }; + +-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) +- + static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + +@@ -1280,6 +1374,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + int out_index; + ++ mlx5e_put_flow_tunnel_id(flow); ++ + if (flow_flag_test(flow, NOT_READY)) { + remove_unready_flow(flow); + kvfree(attr->parse_attr); +@@ -1668,43 +1764,267 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + } + } + ++static int flow_has_tc_fwd_action(struct flow_cls_offload *f) ++{ ++ struct flow_rule *rule = flow_cls_offload_flow_rule(f); ++ struct flow_action *flow_action = &rule->action; ++ const struct flow_action_entry *act; ++ int i; ++ ++ flow_action_for_each(i, act, flow_action) { ++ switch (act->id) { ++ case FLOW_ACTION_GOTO: ++ return true; ++ default: ++ continue; ++ } ++ } ++ ++ return false; ++} ++ ++static int ++enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, ++ struct flow_dissector_key_enc_opts *opts, ++ struct netlink_ext_ack *extack, ++ bool *dont_care) ++{ ++ struct geneve_opt *opt; ++ int off = 0; ++ ++ *dont_care = true; ++ ++ while (opts->len > off) { ++ opt = (struct geneve_opt *)&opts->data[off]; ++ ++ if (!(*dont_care) || opt->opt_class || opt->type || ++ memchr_inv(opt->opt_data, 0, opt->length * 4)) { ++ *dont_care = false; ++ ++ if (opt->opt_class != U16_MAX || ++ opt->type != U8_MAX || ++ memchr_inv(opt->opt_data, 0xFF, ++ opt->length * 4)) { ++ NL_SET_ERR_MSG(extack, ++ "Partial match of tunnel options in chain > 0 isn't supported"); ++ netdev_warn(priv->netdev, ++ "Partial match of tunnel options in chain > 0 isn't supported"); ++ return -EOPNOTSUPP; ++ } ++ } ++ ++ off += sizeof(struct geneve_opt) + opt->length * 4; ++ } ++ ++ return 0; ++} ++ ++#define COPY_DISSECTOR(rule, diss_key, dst)\ ++({ \ ++ struct flow_rule *__rule = (rule);\ ++ typeof(dst) __dst = dst;\ ++\ ++ memcpy(__dst,\ ++ skb_flow_dissector_target(__rule->match.dissector,\ ++ diss_key,\ ++ __rule->match.key),\ ++ sizeof(*__dst));\ ++}) ++ ++static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct flow_cls_offload *f, ++ struct net_device *filter_dev) ++{ ++ struct flow_rule *rule = flow_cls_offload_flow_rule(f); ++ struct netlink_ext_ack *extack = f->common.extack; ++ struct mlx5_esw_flow_attr *attr = flow->esw_attr; ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; ++ struct flow_match_enc_opts enc_opts_match; ++ struct mlx5_rep_uplink_priv *uplink_priv; ++ struct mlx5e_rep_priv *uplink_rpriv; ++ struct tunnel_match_key tunnel_key; ++ bool enc_opts_is_dont_care = true; ++ u32 tun_id, enc_opts_id = 0; ++ struct mlx5_eswitch *esw; ++ u32 value, mask; ++ int err; ++ ++ esw = priv->mdev->priv.eswitch; ++ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); ++ uplink_priv = &uplink_rpriv->uplink_priv; ++ ++ memset(&tunnel_key, 0, sizeof(tunnel_key)); ++ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, ++ &tunnel_key.enc_control); ++ if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) ++ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, ++ &tunnel_key.enc_ipv4); ++ else ++ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, ++ &tunnel_key.enc_ipv6); ++ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip); ++ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, ++ &tunnel_key.enc_tp); ++ COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, ++ &tunnel_key.enc_key_id); ++ tunnel_key.filter_ifindex = filter_dev->ifindex; ++ ++ err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id); ++ if (err) ++ return err; ++ ++ flow_rule_match_enc_opts(rule, &enc_opts_match); ++ err = enc_opts_is_dont_care_or_full_match(priv, ++ enc_opts_match.mask, ++ extack, ++ &enc_opts_is_dont_care); ++ if (err) ++ goto err_enc_opts; ++ ++ if (!enc_opts_is_dont_care) { ++ err = mapping_add(uplink_priv->tunnel_enc_opts_mapping, ++ enc_opts_match.key, &enc_opts_id); ++ if (err) ++ goto err_enc_opts; ++ } ++ ++ value = tun_id << ENC_OPTS_BITS | enc_opts_id; ++ mask = enc_opts_id ? TUNNEL_ID_MASK : ++ (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK); ++ ++ if (attr->chain) { ++ mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec, ++ TUNNEL_TO_REG, value, mask); ++ } else { ++ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; ++ err = mlx5e_tc_match_to_reg_set(priv->mdev, ++ mod_hdr_acts, ++ TUNNEL_TO_REG, value); ++ if (err) ++ goto err_set; ++ ++ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; ++ } ++ ++ flow->tunnel_id = value; ++ return 0; ++ ++err_set: ++ if (enc_opts_id) ++ mapping_remove(uplink_priv->tunnel_enc_opts_mapping, ++ enc_opts_id); ++err_enc_opts: ++ mapping_remove(uplink_priv->tunnel_mapping, tun_id); ++ return err; ++} ++ ++static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow) ++{ ++ u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK; ++ u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS; ++ struct mlx5_rep_uplink_priv *uplink_priv; ++ struct mlx5e_rep_priv *uplink_rpriv; ++ struct mlx5_eswitch *esw; ++ ++ esw = flow->priv->mdev->priv.eswitch; ++ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); ++ uplink_priv = &uplink_rpriv->uplink_priv; ++ ++ if (tun_id) ++ mapping_remove(uplink_priv->tunnel_mapping, tun_id); ++ if (enc_opts_id) ++ mapping_remove(uplink_priv->tunnel_enc_opts_mapping, ++ enc_opts_id); ++} + + static int parse_tunnel_attr(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, +- struct net_device *filter_dev, u8 *match_level) ++ struct net_device *filter_dev, ++ u8 *match_level, ++ bool *match_inner) + { ++ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct netlink_ext_ack *extack = f->common.extack; ++ bool needs_mapping, sets_mapping; + int err; + +- err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, match_level); +- if (err) { +- NL_SET_ERR_MSG_MOD(extack, +- "failed to parse tunnel attributes"); +- return err; ++ if (!mlx5e_is_eswitch_flow(flow)) ++ return -EOPNOTSUPP; ++ ++ needs_mapping = !!flow->esw_attr->chain; ++ sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f); ++ *match_inner = !needs_mapping; ++ ++ if ((needs_mapping || sets_mapping) && ++ !mlx5_eswitch_vport_match_metadata_enabled(esw)) { ++ NL_SET_ERR_MSG(extack, ++ "Chains on tunnel devices isn't supported without register metadata support"); ++ netdev_warn(priv->netdev, ++ "Chains on tunnel devices isn't supported without register metadata support"); ++ return -EOPNOTSUPP; + } + +- return 0; ++ if (!flow->esw_attr->chain) { ++ err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, ++ match_level); ++ if (err) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Failed to parse tunnel attributes"); ++ netdev_warn(priv->netdev, ++ "Failed to parse tunnel attributes"); ++ return err; ++ } ++ ++ flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; ++ } ++ ++ if (!needs_mapping && !sets_mapping) ++ return 0; ++ ++ return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev); + } + +-static void *get_match_headers_criteria(u32 flags, +- struct mlx5_flow_spec *spec) ++static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec) + { +- return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? +- MLX5_ADDR_OF(fte_match_param, spec->match_criteria, +- inner_headers) : +- MLX5_ADDR_OF(fte_match_param, spec->match_criteria, +- outer_headers); ++ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ++ inner_headers); ++} ++ ++static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec) ++{ ++ return MLX5_ADDR_OF(fte_match_param, spec->match_value, ++ inner_headers); ++} ++ ++static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec) ++{ ++ return MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ++ outer_headers); ++} ++ ++static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec) ++{ ++ return MLX5_ADDR_OF(fte_match_param, spec->match_value, ++ outer_headers); + } + + static void *get_match_headers_value(u32 flags, + struct mlx5_flow_spec *spec) + { + return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? +- MLX5_ADDR_OF(fte_match_param, spec->match_value, +- inner_headers) : +- MLX5_ADDR_OF(fte_match_param, spec->match_value, +- outer_headers); ++ get_match_inner_headers_value(spec) : ++ get_match_outer_headers_value(spec); ++} ++ ++static void *get_match_headers_criteria(u32 flags, ++ struct mlx5_flow_spec *spec) ++{ ++ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ? ++ get_match_inner_headers_criteria(spec) : ++ get_match_outer_headers_criteria(spec); + } + + static int mlx5e_flower_parse_meta(struct net_device *filter_dev, +@@ -1742,6 +2062,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, + } + + static int __parse_cls_flower(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, + struct net_device *filter_dev, +@@ -1791,18 +2112,22 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + } + + if (mlx5e_get_tc_tun(filter_dev)) { +- if (parse_tunnel_attr(priv, spec, f, filter_dev, +- outer_match_level)) +- return -EOPNOTSUPP; ++ bool match_inner = false; + +- /* At this point, header pointers should point to the inner +- * headers, outer header were already set by parse_tunnel_attr +- */ +- match_level = inner_match_level; +- headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, +- spec); +- headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, +- spec); ++ err = parse_tunnel_attr(priv, flow, spec, f, filter_dev, ++ outer_match_level, &match_inner); ++ if (err) ++ return err; ++ ++ if (match_inner) { ++ /* header pointers should point to the inner headers ++ * if the packet was decapsulated already. ++ * outer headers are set by parse_tunnel_attr. ++ */ ++ match_level = inner_match_level; ++ headers_c = get_match_inner_headers_criteria(spec); ++ headers_v = get_match_inner_headers_value(spec); ++ } + } + + err = mlx5e_flower_parse_meta(filter_dev, f); +@@ -2119,8 +2444,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, + inner_match_level = MLX5_MATCH_NONE; + outer_match_level = MLX5_MATCH_NONE; + +- err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level, +- &outer_match_level); ++ err = __parse_cls_flower(priv, flow, spec, f, filter_dev, ++ &inner_match_level, &outer_match_level); + non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? + outer_match_level : inner_match_level; + +@@ -2674,7 +2999,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + struct netlink_ext_ack *extack) + { + struct net_device *filter_dev = parse_attr->filter_dev; +- bool drop_action, decap_action, pop_action; ++ bool drop_action, pop_action; + u32 actions; + + if (mlx5e_is_eswitch_flow(flow)) +@@ -2683,17 +3008,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + actions = flow->nic_attr->action; + + drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP; +- decap_action = actions & MLX5_FLOW_CONTEXT_ACTION_DECAP; + pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + + if (flow_flag_test(flow, EGRESS) && !drop_action) { +- /* If no drop, we must decap (vxlan) or pop (vlan) */ +- if (mlx5e_get_tc_tun(filter_dev) && !decap_action) +- return false; +- else if (is_vlan_dev(filter_dev) && !pop_action) ++ /* We only support filters on tunnel device, or on vlan ++ * devices if they have pop/drop action ++ */ ++ if (!mlx5e_get_tc_tun(filter_dev) || ++ (is_vlan_dev(filter_dev) && !pop_action)) + return false; +- else +- return false; /* Sanity */ + } + + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) +@@ -3288,9 +3611,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; + bool ft_flow = mlx5e_is_ft_flow(flow); + const struct flow_action_entry *act; ++ bool encap = false, decap = false; ++ u32 action = attr->action; + int err, i, if_count = 0; +- bool encap = false; +- u32 action = 0; + + if (!flow_action_has_entries(flow_action)) + return -EINVAL; +@@ -3492,7 +3815,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + attr->split_count = attr->out_count; + break; + case FLOW_ACTION_TUNNEL_DECAP: +- action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; ++ decap = true; + break; + case FLOW_ACTION_GOTO: + err = mlx5_validate_goto_chain(esw, flow, act, action, +@@ -3545,6 +3868,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + return -EOPNOTSUPP; + + if (attr->dest_chain) { ++ if (decap) { ++ /* It can be supported if we'll create a mapping for ++ * the tunnel device only (without tunnel), and set ++ * this tunnel id with this decap flow. ++ * ++ * On restore (miss), we'll just set this saved tunnel ++ * device. ++ */ ++ ++ NL_SET_ERR_MSG(extack, ++ "Decap with goto isn't supported"); ++ netdev_warn(priv->netdev, ++ "Decap with goto isn't supported"); ++ return -EOPNOTSUPP; ++ } ++ + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + NL_SET_ERR_MSG_MOD(extack, + "Mirroring goto chain rules isn't supported"); +@@ -4269,12 +4608,55 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) + + int mlx5e_tc_esw_init(struct rhashtable *tc_ht) + { +- return rhashtable_init(tc_ht, &tc_ht_params); ++ const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts); ++ struct mlx5_rep_uplink_priv *uplink_priv; ++ struct mlx5e_rep_priv *priv; ++ struct mapping_ctx *mapping; ++ int err; ++ ++ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); ++ priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); ++ ++ mapping = mapping_create(sizeof(struct tunnel_match_key), ++ TUNNEL_INFO_BITS_MASK, true); ++ if (IS_ERR(mapping)) { ++ err = PTR_ERR(mapping); ++ goto err_tun_mapping; ++ } ++ uplink_priv->tunnel_mapping = mapping; ++ ++ mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true); ++ if (IS_ERR(mapping)) { ++ err = PTR_ERR(mapping); ++ goto err_enc_opts_mapping; ++ } ++ uplink_priv->tunnel_enc_opts_mapping = mapping; ++ ++ err = rhashtable_init(tc_ht, &tc_ht_params); ++ if (err) ++ goto err_ht_init; ++ ++ return err; ++ ++err_ht_init: ++ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); ++err_enc_opts_mapping: ++ mapping_destroy(uplink_priv->tunnel_mapping); ++err_tun_mapping: ++ netdev_warn(priv->netdev, ++ "Failed to initialize tc (eswitch), err: %d", err); ++ return err; + } + + void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) + { ++ struct mlx5_rep_uplink_priv *uplink_priv; ++ + rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); ++ ++ uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); ++ mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); ++ mapping_destroy(uplink_priv->tunnel_mapping); + } + + int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 3848ec7b6c1e..2fab76b0bec5 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -93,12 +93,15 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work); + + enum mlx5e_tc_attr_to_reg { + CHAIN_TO_REG, ++ TUNNEL_TO_REG, + }; + + struct mlx5e_tc_attr_to_reg_mapping { + int mfield; /* rewrite field */ + int moffset; /* offset of mfield */ + int mlen; /* bytes to rewrite/match */ ++ ++ int soffset; /* offset of spec for match */ + }; + + extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; +@@ -114,6 +117,16 @@ struct mlx5e_tc_mod_hdr_acts { + void *actions; + }; + ++int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, ++ enum mlx5e_tc_attr_to_reg type, ++ u32 data); ++ ++void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, ++ enum mlx5e_tc_attr_to_reg type, ++ u32 data, ++ u32 mask); ++ + int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, + int namespace, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); +-- +2.13.6 + diff --git a/SOURCES/0236-netdrv-net-mlx5-E-Switch-Get-reg_c1-value-on-miss.patch b/SOURCES/0236-netdrv-net-mlx5-E-Switch-Get-reg_c1-value-on-miss.patch new file mode 100644 index 0000000..d67ff7e --- /dev/null +++ b/SOURCES/0236-netdrv-net-mlx5-E-Switch-Get-reg_c1-value-on-miss.patch @@ -0,0 +1,143 @@ +From 28d18d2bc0d56e447a0adeebc3e48b7da6890b2a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:58 -0400 +Subject: [PATCH 236/312] [netdrv] net/mlx5: E-Switch, Get reg_c1 value on miss + +Message-id: <20200519074934.6303-28-ahleihel@redhat.com> +Patchwork-id: 310530 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 27/63] net/mlx5: E-Switch, Get reg_c1 value on miss +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit 6724e66b90eebb19d146b7623b3e2af15616782b +Author: Paul Blakey +Date: Sun Feb 16 12:01:35 2020 +0200 + + net/mlx5: E-Switch, Get reg_c1 value on miss + + The HW model implicitly decapsulates tunnels on chain 0 and sets reg_c1 + with the mapped tunnel id. On miss, the packet does not have the outer + header and the driver restores the tunnel information from the tunnel id. + + Getting reg_c1 value in software requires enabling reg_c1 loopback and + copying reg_c1 to reg_b. reg_b comes up on CQE as cqe->imm_inval_pkey. + + Use the reg_c0 restoration rules to also copy reg_c1 to reg_B. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 31 +++++++++++++++++++--- + 2 files changed, 29 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index a384cab195c1..3240f6de94bd 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -233,6 +233,7 @@ struct mlx5_esw_offload { + #ifndef __GENKSYMS__ + struct mlx5_flow_table *ft_offloads_restore; + struct mlx5_flow_group *restore_group; ++ struct mlx5_modify_hdr *restore_copy_hdr_id; + #endif + + struct mlx5_flow_table *ft_offloads; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index eab876a2208a..a9b2522b9f03 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -782,9 +782,11 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) + esw_vport_context.fdb_to_vport_reg_c_id); + + if (enable) +- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; ++ fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0 | ++ MLX5_FDB_TO_VPORT_REG_C_1; + else +- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; ++ fdb_to_vport_reg_c_id &= ~(MLX5_FDB_TO_VPORT_REG_C_0 | ++ MLX5_FDB_TO_VPORT_REG_C_1); + + MLX5_SET(modify_esw_vport_context_in, in, + esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); +@@ -1045,7 +1047,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) + misc_parameters_2); + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; +- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; ++ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | ++ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; ++ flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id; + + flow_context = &spec->flow_context; + flow_context->flags |= FLOW_CONTEXT_HAS_TAG; +@@ -1404,16 +1408,19 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw) + { + struct mlx5_esw_offload *offloads = &esw->offloads; + ++ mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); + mlx5_destroy_flow_group(offloads->restore_group); + mlx5_destroy_flow_table(offloads->ft_offloads_restore); + } + + static int esw_create_restore_table(struct mlx5_eswitch *esw) + { ++ u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {}; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *ns; ++ struct mlx5_modify_hdr *mod_hdr; + void *match_criteria, *misc; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *g; +@@ -1462,11 +1469,29 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) + goto err_group; + } + ++ MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY); ++ MLX5_SET(copy_action_in, modact, src_field, ++ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1); ++ MLX5_SET(copy_action_in, modact, dst_field, ++ MLX5_ACTION_IN_FIELD_METADATA_REG_B); ++ mod_hdr = mlx5_modify_header_alloc(esw->dev, ++ MLX5_FLOW_NAMESPACE_KERNEL, 1, ++ modact); ++ if (IS_ERR(mod_hdr)) { ++ esw_warn(dev, "Failed to create restore mod header, err: %d\n", ++ err); ++ err = PTR_ERR(mod_hdr); ++ goto err_mod_hdr; ++ } ++ + esw->offloads.ft_offloads_restore = ft; + esw->offloads.restore_group = g; ++ esw->offloads.restore_copy_hdr_id = mod_hdr; + + return 0; + ++err_mod_hdr: ++ mlx5_destroy_flow_group(g); + err_group: + mlx5_destroy_flow_table(ft); + out_free: +-- +2.13.6 + diff --git a/SOURCES/0237-netdrv-net-mlx5e-Restore-tunnel-metadata-on-miss.patch b/SOURCES/0237-netdrv-net-mlx5e-Restore-tunnel-metadata-on-miss.patch new file mode 100644 index 0000000..3ccf5a7 --- /dev/null +++ b/SOURCES/0237-netdrv-net-mlx5e-Restore-tunnel-metadata-on-miss.patch @@ -0,0 +1,270 @@ +From a4c8a21efaa55f5607829f5b667a06f32e00a2b0 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:59 -0400 +Subject: [PATCH 237/312] [netdrv] net/mlx5e: Restore tunnel metadata on miss + +Message-id: <20200519074934.6303-29-ahleihel@redhat.com> +Patchwork-id: 310532 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 28/63] net/mlx5e: Restore tunnel metadata on miss +Bugzilla: 1790219 1790218 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Bugzilla: http://bugzilla.redhat.com/1790218 +Upstream: v5.7-rc1 + +commit b8ce90370977dbe24d2ed7271b65710ec9c40166 +Author: Paul Blakey +Date: Sun Feb 16 12:01:36 2020 +0200 + + net/mlx5e: Restore tunnel metadata on miss + + In tunnel and chains setup, we decapsulate the packets on first chain hop, + if we miss on later chains, the packet will comes up without tunnel header, + so it won't be taken by the tunnel device automatically, which fills the + tunnel metadata, and further tc tunnel matches won't work. + + On miss, we get the tunnel mapping id, which was set on the chain 0 rule + that decapsulated the packet. This rule matched the tunnel outer + headers. From the tunnel mapping id, we get to this tunnel matches + and restore the equivalent tunnel info metadata dst on the skb. + We also set the skb->dev to the relevant device (tunnel device). + Now further tc processing can be done on the relevant device. + + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 10 ++- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 110 ++++++++++++++++++++++-- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 9 +- + 3 files changed, 117 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 1baeba194794..7aad59376ff4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1194,6 +1194,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; ++ struct mlx5e_tc_update_priv tc_priv = {}; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + struct mlx5e_wqe_frag_info *wi; + struct sk_buff *skb; +@@ -1226,11 +1227,13 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + if (rep->vlan && skb_vlan_tag_present(skb)) + skb_vlan_pop(skb); + +- if (!mlx5e_tc_rep_update_skb(cqe, skb)) ++ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) + goto free_wqe; + + napi_gro_receive(rq->cq.napi, skb); + ++ mlx5_tc_rep_post_napi_receive(&tc_priv); ++ + free_wqe: + mlx5e_free_rx_wqe(rq, wi, true); + wq_cyc_pop: +@@ -1247,6 +1250,7 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, + u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); + u32 page_idx = wqe_offset >> PAGE_SHIFT; ++ struct mlx5e_tc_update_priv tc_priv = {}; + struct mlx5e_rx_wqe_ll *wqe; + struct mlx5_wq_ll *wq; + struct sk_buff *skb; +@@ -1279,11 +1283,13 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + +- if (!mlx5e_tc_rep_update_skb(cqe, skb)) ++ if (!mlx5e_tc_rep_update_skb(cqe, skb, &tc_priv)) + goto mpwrq_cqe_out; + + napi_gro_receive(rq->cq.napi, skb); + ++ mlx5_tc_rep_post_napi_receive(&tc_priv); ++ + mpwrq_cqe_out: + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) + return; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index a7d3cca88718..269258c5f961 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -4689,19 +4689,102 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work) + mutex_unlock(&rpriv->unready_flows_lock); + } + ++#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) ++static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, ++ struct mlx5e_tc_update_priv *tc_priv, ++ u32 tunnel_id) ++{ ++ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; ++ struct flow_dissector_key_enc_opts enc_opts = {}; ++ struct mlx5_rep_uplink_priv *uplink_priv; ++ struct mlx5e_rep_priv *uplink_rpriv; ++ struct metadata_dst *tun_dst; ++ struct tunnel_match_key key; ++ u32 tun_id, enc_opts_id; ++ struct net_device *dev; ++ int err; ++ ++ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; ++ tun_id = tunnel_id >> ENC_OPTS_BITS; ++ ++ if (!tun_id) ++ return true; ++ ++ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); ++ uplink_priv = &uplink_rpriv->uplink_priv; ++ ++ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key); ++ if (err) { ++ WARN_ON_ONCE(true); ++ netdev_dbg(priv->netdev, ++ "Couldn't find tunnel for tun_id: %d, err: %d\n", ++ tun_id, err); ++ return false; ++ } ++ ++ if (enc_opts_id) { ++ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping, ++ enc_opts_id, &enc_opts); ++ if (err) { ++ netdev_dbg(priv->netdev, ++ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n", ++ enc_opts_id, err); ++ return false; ++ } ++ } ++ ++ tun_dst = tun_rx_dst(enc_opts.len); ++ if (!tun_dst) { ++ WARN_ON_ONCE(true); ++ return false; ++ } ++ ++ ip_tunnel_key_init(&tun_dst->u.tun_info.key, ++ key.enc_ipv4.src, key.enc_ipv4.dst, ++ key.enc_ip.tos, key.enc_ip.ttl, ++ 0, /* label */ ++ key.enc_tp.src, key.enc_tp.dst, ++ key32_to_tunnel_id(key.enc_key_id.keyid), ++ TUNNEL_KEY); ++ ++ if (enc_opts.len) ++ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data, ++ enc_opts.len, enc_opts.dst_opt_type); ++ ++ skb_dst_set(skb, (struct dst_entry *)tun_dst); ++ dev = dev_get_by_index(&init_net, key.filter_ifindex); ++ if (!dev) { ++ netdev_dbg(priv->netdev, ++ "Couldn't find tunnel device with ifindex: %d\n", ++ key.filter_ifindex); ++ return false; ++ } ++ ++ /* Set tun_dev so we do dev_put() after datapath */ ++ tc_priv->tun_dev = dev; ++ ++ skb->dev = dev; ++ ++ return true; ++} ++#endif /* CONFIG_NET_TC_SKB_EXT */ ++ + bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, +- struct sk_buff *skb) ++ struct sk_buff *skb, ++ struct mlx5e_tc_update_priv *tc_priv) + { + #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) ++ u32 chain = 0, reg_c0, reg_c1, tunnel_id; + struct tc_skb_ext *tc_skb_ext; + struct mlx5_eswitch *esw; + struct mlx5e_priv *priv; +- u32 chain = 0, reg_c0; ++ int tunnel_moffset; + int err; + + reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); + if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) + reg_c0 = 0; ++ reg_c1 = be32_to_cpu(cqe->imm_inval_pkey); + + if (!reg_c0) + return true; +@@ -4717,17 +4800,26 @@ bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, + return false; + } + +- if (!chain) +- return true; ++ if (chain) { ++ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); ++ if (!tc_skb_ext) { ++ WARN_ON(1); ++ return false; ++ } + +- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); +- if (!tc_skb_ext) { +- WARN_ON_ONCE(1); +- return false; ++ tc_skb_ext->chain = chain; + } + +- tc_skb_ext->chain = chain; ++ tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset; ++ tunnel_id = reg_c1 >> (8 * tunnel_moffset); ++ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); + #endif /* CONFIG_NET_TC_SKB_EXT */ + + return true; + } ++ ++void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) ++{ ++ if (tc_priv->tun_dev) ++ dev_put(tc_priv->tun_dev); ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 2fab76b0bec5..21cbde472b64 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -109,7 +109,14 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; + bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, + struct net_device *out_dev); + +-bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb); ++struct mlx5e_tc_update_priv { ++ struct net_device *tun_dev; ++}; ++ ++bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, ++ struct mlx5e_tc_update_priv *tc_priv); ++ ++void mlx5_tc_rep_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv); + + struct mlx5e_tc_mod_hdr_acts { + int num_actions; +-- +2.13.6 + diff --git a/SOURCES/0238-netdrv-net-mlx5-E-Switch-Enable-reg-c1-loopback-when.patch b/SOURCES/0238-netdrv-net-mlx5-E-Switch-Enable-reg-c1-loopback-when.patch new file mode 100644 index 0000000..b571c90 --- /dev/null +++ b/SOURCES/0238-netdrv-net-mlx5-E-Switch-Enable-reg-c1-loopback-when.patch @@ -0,0 +1,168 @@ +From e9a9c073ba5d91c2e0336afdb0edead9c47f30df Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:00 -0400 +Subject: [PATCH 238/312] [netdrv] net/mlx5: E-Switch, Enable reg c1 loopback + when possible + +Message-id: <20200519074934.6303-30-ahleihel@redhat.com> +Patchwork-id: 310531 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 29/63] net/mlx5: E-Switch, Enable reg c1 loopback when possible +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit 5b7cb7451585f83d414512a70b79b2086b8c6ed1 +Author: Paul Blakey +Date: Thu Mar 12 12:23:03 2020 +0200 + + net/mlx5: E-Switch, Enable reg c1 loopback when possible + + Enable reg c1 loopback if firmware reports it's supported, + as this is needed for restoring packet metadata (e.g chain). + + Also define helper to query if it is enabled. + + Signed-off-by: Paul Blakey + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 44 ++++++++++++++++------ + include/linux/mlx5/eswitch.h | 7 ++++ + 3 files changed, 41 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 3240f6de94bd..ccb4f0f566ea 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -271,6 +271,7 @@ struct mlx5_esw_functions { + + enum { + MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), ++ MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), + }; + + struct mlx5_eswitch { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index a9b2522b9f03..da5730c8c3fb 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -763,14 +763,21 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) + mlx5_del_flow_rules(rule); + } + ++static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw) ++{ ++ return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) & ++ MLX5_FDB_TO_VPORT_REG_C_1; ++} ++ + static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) + { + u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; + u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; +- u8 fdb_to_vport_reg_c_id; ++ u8 curr, wanted; + int err; + +- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) ++ if (!mlx5_eswitch_reg_c1_loopback_supported(esw) && ++ !mlx5_eswitch_vport_match_metadata_enabled(esw)) + return 0; + + err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false, +@@ -778,24 +785,33 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) + if (err) + return err; + +- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, +- esw_vport_context.fdb_to_vport_reg_c_id); ++ curr = MLX5_GET(query_esw_vport_context_out, out, ++ esw_vport_context.fdb_to_vport_reg_c_id); ++ wanted = MLX5_FDB_TO_VPORT_REG_C_0; ++ if (mlx5_eswitch_reg_c1_loopback_supported(esw)) ++ wanted |= MLX5_FDB_TO_VPORT_REG_C_1; + + if (enable) +- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0 | +- MLX5_FDB_TO_VPORT_REG_C_1; ++ curr |= wanted; + else +- fdb_to_vport_reg_c_id &= ~(MLX5_FDB_TO_VPORT_REG_C_0 | +- MLX5_FDB_TO_VPORT_REG_C_1); ++ curr &= ~wanted; + + MLX5_SET(modify_esw_vport_context_in, in, +- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); ++ esw_vport_context.fdb_to_vport_reg_c_id, curr); + + MLX5_SET(modify_esw_vport_context_in, in, + field_select.fdb_to_vport_reg_c_id, 1); + +- return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, +- in, sizeof(in)); ++ err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in, ++ sizeof(in)); ++ if (!err) { ++ if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1)) ++ esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; ++ else ++ esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED; ++ } ++ ++ return err; + } + + static void peer_miss_rules_setup(struct mlx5_eswitch *esw, +@@ -2833,6 +2849,12 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num) + vport_num <= esw->dev->priv.sriov.max_vfs; + } + ++bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) ++{ ++ return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED); ++} ++EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled); ++ + bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) + { + return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA); +diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h +index 92e39e8a30f0..75d846948683 100644 +--- a/include/linux/mlx5/eswitch.h ++++ b/include/linux/mlx5/eswitch.h +@@ -70,6 +70,7 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); + enum devlink_eswitch_encap_mode + mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); + ++bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw); + bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); + + /* Reg C0 usage: +@@ -114,6 +115,12 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) + } + + static inline bool ++mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) ++{ ++ return false; ++}; ++ ++static inline bool + mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) + { + return false; +-- +2.13.6 + diff --git a/SOURCES/0239-netdrv-net-mlx5e-en_rep-Create-uplink-rep-root-table.patch b/SOURCES/0239-netdrv-net-mlx5e-en_rep-Create-uplink-rep-root-table.patch new file mode 100644 index 0000000..03cc22e --- /dev/null +++ b/SOURCES/0239-netdrv-net-mlx5e-en_rep-Create-uplink-rep-root-table.patch @@ -0,0 +1,65 @@ +From 9ad313bd9dbadd1f235e0869170d9eec9662400e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:01 -0400 +Subject: [PATCH 239/312] [netdrv] net/mlx5e: en_rep: Create uplink rep root + table after eswitch offloads table + +Message-id: <20200519074934.6303-31-ahleihel@redhat.com> +Patchwork-id: 310538 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 30/63] net/mlx5e: en_rep: Create uplink rep root table after eswitch offloads table +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit c6fe5729dcff469be1ee3c516f9d9d2c3f1598c2 +Author: Paul Blakey +Date: Thu Mar 12 12:23:04 2020 +0200 + + net/mlx5e: en_rep: Create uplink rep root table after eswitch offloads table + + The eswitch offloads table, which has the reps (vport) rx miss rules, + was moved from OFFLOADS namespace [0,0] (prio, level), to [1,0], so + the restore table (the new [0,0]) can come before it. The destinations + of these miss rules is the rep root ft (ttc for non uplink reps). + + Uplink rep root ft is created as OFFLOADS namespace [0,1], and is used + as a hook to next RX prio (either ethtool or ttc), but this fails to + pass fs_core level's check. + + Move uplink rep root ft to OFFLOADS prio 1, level 1 ([1,1]), so it + will keep the same relative position after the restore table + change. + + Signed-off-by: Paul Blakey + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 1cb47297285e..797ecdb6a165 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1600,6 +1600,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv) + } + + ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */ ++ ft_attr.prio = 1; + ft_attr.level = 1; + + rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr); +-- +2.13.6 + diff --git a/SOURCES/0240-netdrv-net-mlx5-E-Switch-Introduce-global-tables.patch b/SOURCES/0240-netdrv-net-mlx5-E-Switch-Introduce-global-tables.patch new file mode 100644 index 0000000..d1e21a1 --- /dev/null +++ b/SOURCES/0240-netdrv-net-mlx5-E-Switch-Introduce-global-tables.patch @@ -0,0 +1,173 @@ +From a56298693c73f43b0b603c702eeb66b4c80ba6fb Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:02 -0400 +Subject: [PATCH 240/312] [netdrv] net/mlx5: E-Switch, Introduce global tables + +Message-id: <20200519074934.6303-32-ahleihel@redhat.com> +Patchwork-id: 310534 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 31/63] net/mlx5: E-Switch, Introduce global tables +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit d18296ffd9ccde82c82c220263fca2e76d5258be +Author: Paul Blakey +Date: Thu Mar 12 12:23:10 2020 +0200 + + net/mlx5: E-Switch, Introduce global tables + + Currently, flow tables are automatically connected according to their + tuple. + + Introduce global tables which are flow tables that are detached from the + eswitch chains processing, and will be connected by explicitly referencing + them from multiple chains. + + Add this new table type, and allow connecting them by refenece. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 ++ + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 18 +++++++++---- + .../mellanox/mlx5/core/eswitch_offloads_chains.c | 30 ++++++++++++++++++++++ + .../mellanox/mlx5/core/eswitch_offloads_chains.h | 6 +++++ + 4 files changed, 51 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index ccb4f0f566ea..e7de5e8e5605 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -462,6 +462,8 @@ struct mlx5_esw_flow_attr { + u32 dest_chain; + #ifndef __GENKSYMS__ + u32 flags; ++ struct mlx5_flow_table *fdb; ++ struct mlx5_flow_table *dest_ft; + #endif + struct mlx5e_tc_flow_parse_attr *parse_attr; + }; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index da5730c8c3fb..e8d9a0bd943a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -324,7 +324,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + struct mlx5_flow_table *ft; + +- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { ++ if (attr->dest_ft) { ++ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; ++ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; ++ dest[i].ft = attr->dest_ft; ++ i++; ++ } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { + flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw); +@@ -378,8 +383,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + if (split) { + fdb = esw_vport_tbl_get(esw, attr); + } else { +- fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, +- 0); ++ if (attr->chain || attr->prio) ++ fdb = mlx5_esw_chains_get_table(esw, attr->chain, ++ attr->prio, 0); ++ else ++ fdb = attr->fdb; + mlx5_eswitch_set_rule_source_port(esw, spec, attr); + } + if (IS_ERR(fdb)) { +@@ -402,7 +410,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + err_add_rule: + if (split) + esw_vport_tbl_put(esw, attr); +- else ++ else if (attr->chain || attr->prio) + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); + err_esw_get: + if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) +@@ -499,7 +507,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, + } else { + if (split) + esw_vport_tbl_put(esw, attr); +- else ++ else if (attr->chain || attr->prio) + mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, + 0); + if (attr->dest_chain) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index 12ca184cd795..6f62a326071a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -719,6 +719,36 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) + return tc_end_fdb(esw); + } + ++struct mlx5_flow_table * ++mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw) ++{ ++ int chain, prio, level, err; ++ ++ if (!fdb_ignore_flow_level_supported(esw)) { ++ err = -EOPNOTSUPP; ++ ++ esw_warn(esw->dev, ++ "Couldn't create global flow table, ignore_flow_level not supported."); ++ goto err_ignore; ++ } ++ ++ chain = mlx5_esw_chains_get_chain_range(esw), ++ prio = mlx5_esw_chains_get_prio_range(esw); ++ level = mlx5_esw_chains_get_level_range(esw); ++ ++ return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level); ++ ++err_ignore: ++ return ERR_PTR(err); ++} ++ ++void ++mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw, ++ struct mlx5_flow_table *ft) ++{ ++ mlx5_esw_chains_destroy_fdb_table(esw, ft); ++} ++ + static int + mlx5_esw_chains_init(struct mlx5_eswitch *esw) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +index e806d8de868e..c7bc609acb91 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +@@ -25,6 +25,12 @@ mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio, + struct mlx5_flow_table * + mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw); + ++struct mlx5_flow_table * ++mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw); ++void ++mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw, ++ struct mlx5_flow_table *ft); ++ + int mlx5_esw_chains_create(struct mlx5_eswitch *esw); + void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); + +-- +2.13.6 + diff --git a/SOURCES/0241-netdrv-net-mlx5-E-Switch-Add-support-for-offloading-.patch b/SOURCES/0241-netdrv-net-mlx5-E-Switch-Add-support-for-offloading-.patch new file mode 100644 index 0000000..dbcf398 --- /dev/null +++ b/SOURCES/0241-netdrv-net-mlx5-E-Switch-Add-support-for-offloading-.patch @@ -0,0 +1,72 @@ +From bf0023a9b4c8fea261dfcf28876ef882eff650ca Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:03 -0400 +Subject: [PATCH 241/312] [netdrv] net/mlx5: E-Switch, Add support for + offloading rules with no in_port + +Message-id: <20200519074934.6303-33-ahleihel@redhat.com> +Patchwork-id: 310533 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 32/63] net/mlx5: E-Switch, Add support for offloading rules with no in_port +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit 6fb0701a9cfa248f1c1e5dfde15c4d79bb1bdc69 +Author: Paul Blakey +Date: Thu Mar 12 12:23:11 2020 +0200 + + net/mlx5: E-Switch, Add support for offloading rules with no in_port + + FTEs in global tables may match on packets from multiple in_ports. + Provide the capability to omit the in_port match condition. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 4 +++- + 2 files changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index e7de5e8e5605..ecf3d7157d4d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -428,6 +428,7 @@ enum { + enum { + MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), + MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), ++ MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), + }; + + struct mlx5_esw_flow_attr { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index e8d9a0bd943a..3a6434ba2a58 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -388,7 +388,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + attr->prio, 0); + else + fdb = attr->fdb; +- mlx5_eswitch_set_rule_source_port(esw, spec, attr); ++ ++ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT)) ++ mlx5_eswitch_set_rule_source_port(esw, spec, attr); + } + if (IS_ERR(fdb)) { + rule = ERR_CAST(fdb); +-- +2.13.6 + diff --git a/SOURCES/0242-netdrv-net-mlx5-E-Switch-Support-getting-chain-mappi.patch b/SOURCES/0242-netdrv-net-mlx5-E-Switch-Support-getting-chain-mappi.patch new file mode 100644 index 0000000..87b32da --- /dev/null +++ b/SOURCES/0242-netdrv-net-mlx5-E-Switch-Support-getting-chain-mappi.patch @@ -0,0 +1,90 @@ +From 28a820a8329a0261bbccae76fe2c5a2cf84d857a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:04 -0400 +Subject: [PATCH 242/312] [netdrv] net/mlx5: E-Switch, Support getting chain + mapping + +Message-id: <20200519074934.6303-34-ahleihel@redhat.com> +Patchwork-id: 310536 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 33/63] net/mlx5: E-Switch, Support getting chain mapping +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit 43435e91396fd156a31f0e3977f60f564a66a328 +Author: Paul Blakey +Date: Thu Mar 12 12:23:12 2020 +0200 + + net/mlx5: E-Switch, Support getting chain mapping + + Currently, we write chain register mapping on miss from the the last + prio of a chain. It is used to restore the chain in software. + + To support re-using the chain register mapping from global tables (such + as CT tuple table) misses, export the chain mapping. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c | 13 +++++++++++++ + .../ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h | 7 +++++++ + 2 files changed, 20 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index 6f62a326071a..0f9c9aae11bf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -897,6 +897,19 @@ mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) + mlx5_esw_chains_cleanup(esw); + } + ++int ++mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain, ++ u32 *chain_mapping) ++{ ++ return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping); ++} ++ ++int ++mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping) ++{ ++ return mapping_remove(esw_chains_mapping(esw), chain_mapping); ++} ++ + int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, + u32 *chain) + { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +index c7bc609acb91..f3b9ae6798f3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.h +@@ -31,6 +31,13 @@ void + mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw, + struct mlx5_flow_table *ft); + ++int ++mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain, ++ u32 *chain_mapping); ++int ++mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, ++ u32 chain_mapping); ++ + int mlx5_esw_chains_create(struct mlx5_eswitch *esw); + void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw); + +-- +2.13.6 + diff --git a/SOURCES/0243-netdrv-net-mlx5e-CT-Introduce-connection-tracking.patch b/SOURCES/0243-netdrv-net-mlx5e-CT-Introduce-connection-tracking.patch new file mode 100644 index 0000000..83c1c2e --- /dev/null +++ b/SOURCES/0243-netdrv-net-mlx5e-CT-Introduce-connection-tracking.patch @@ -0,0 +1,1120 @@ +From c500913088310b727470cefa9b1a04c3bd62142d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:05 -0400 +Subject: [PATCH 243/312] [netdrv] net/mlx5e: CT: Introduce connection tracking + +Message-id: <20200519074934.6303-35-ahleihel@redhat.com> +Patchwork-id: 310535 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 34/63] net/mlx5e: CT: Introduce connection tracking +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit 4c3844d9e97e10f0cf024fe7f24dcefa133fe9e2 +Author: Paul Blakey +Date: Thu Mar 12 12:23:14 2020 +0200 + + net/mlx5e: CT: Introduce connection tracking + + Add support for offloading tc ct action and ct matches. + We translate the tc filter with CT action the following HW model: + + +-------------------+ +--------------------+ +--------------+ + + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +-----> + + original match + | + tuple + zone match + | + fte_id match + | + +-------------------+ | +--------------------+ | +--------------+ | + v v v + set chain miss mapping set mark original + set fte_id set label filter + set zone set established actions + set tunnel_id do nat (if needed) + do decap + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Roi Dayan + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 10 + + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 1 + + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 541 +++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h | 140 ++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 3 + + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 104 +++- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 8 + + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 + + 8 files changed, 795 insertions(+), 16 deletions(-) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +index 6919161c8f9b..e1dff89804f6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +@@ -77,6 +77,16 @@ config MLX5_ESWITCH + Legacy SRIOV mode (L2 mac vlan steering based). + Switchdev mode (eswitch offloads). + ++config MLX5_TC_CT ++ bool "MLX5 TC connection tracking offload support" ++ depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT ++ default y ++ help ++ Say Y here if you want to support offloading connection tracking rules ++ via tc ct action. ++ ++ If unsure, set to Y ++ + config MLX5_CORE_EN_DCB + bool "Data Center Bridging (DCB) Support" + default y +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index be4e7470830f..9e85def607b9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -36,6 +36,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o + mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \ + lib/geneve.o en/mapping.o en/tc_tun_vxlan.o en/tc_tun_gre.o \ + en/tc_tun_geneve.o diag/en_tc_tracepoint.o ++mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o + + # + # Core extra +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +new file mode 100644 +index 000000000000..c1130460bb60 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -0,0 +1,541 @@ ++// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB ++/* Copyright (c) 2019 Mellanox Technologies. */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "en/tc_ct.h" ++#include "en.h" ++#include "en_tc.h" ++#include "en_rep.h" ++#include "eswitch_offloads_chains.h" ++ ++#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen * 8) ++#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0) ++#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1) ++#define MLX5_CT_STATE_TRK_BIT BIT(2) ++ ++#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8) ++#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) ++#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX ++ ++#define ct_dbg(fmt, args...)\ ++ netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args) ++ ++struct mlx5_tc_ct_priv { ++ struct mlx5_eswitch *esw; ++ const struct net_device *netdev; ++ struct idr fte_ids; ++ struct mlx5_flow_table *ct; ++ struct mlx5_flow_table *ct_nat; ++ struct mlx5_flow_table *post_ct; ++ struct mutex control_lock; /* guards parallel adds/dels */ ++}; ++ ++struct mlx5_ct_flow { ++ struct mlx5_esw_flow_attr pre_ct_attr; ++ struct mlx5_esw_flow_attr post_ct_attr; ++ struct mlx5_flow_handle *pre_ct_rule; ++ struct mlx5_flow_handle *post_ct_rule; ++ u32 fte_id; ++ u32 chain_mapping; ++}; ++ ++static struct mlx5_tc_ct_priv * ++mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) ++{ ++ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; ++ struct mlx5_rep_uplink_priv *uplink_priv; ++ struct mlx5e_rep_priv *uplink_rpriv; ++ ++ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); ++ uplink_priv = &uplink_rpriv->uplink_priv; ++ return uplink_priv->ct_priv; ++} ++ ++int ++mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ++ struct mlx5_flow_spec *spec, ++ struct flow_cls_offload *f, ++ struct netlink_ext_ack *extack) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); ++ struct flow_dissector_key_ct *mask, *key; ++ bool trk, est, untrk, unest, new, unnew; ++ u32 ctstate = 0, ctstate_mask = 0; ++ u16 ct_state_on, ct_state_off; ++ u16 ct_state, ct_state_mask; ++ struct flow_match_ct match; ++ ++ if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) ++ return 0; ++ ++ if (!ct_priv) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "offload of ct matching isn't available"); ++ return -EOPNOTSUPP; ++ } ++ ++ flow_rule_match_ct(f->rule, &match); ++ ++ key = match.key; ++ mask = match.mask; ++ ++ ct_state = key->ct_state; ++ ct_state_mask = mask->ct_state; ++ ++ if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | ++ TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | ++ TCA_FLOWER_KEY_CT_FLAGS_NEW)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "only ct_state trk, est and new are supported for offload"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "only lower 32bits of ct_labels are supported for offload"); ++ return -EOPNOTSUPP; ++ } ++ ++ ct_state_on = ct_state & ct_state_mask; ++ ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask; ++ trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; ++ new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW; ++ est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; ++ untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; ++ unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW; ++ unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; ++ ++ ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0; ++ ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; ++ ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0; ++ ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; ++ ++ if (new) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "matching on ct_state +new isn't supported"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (mask->ct_zone) ++ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, ++ key->ct_zone, MLX5_CT_ZONE_MASK); ++ if (ctstate_mask) ++ mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ++ ctstate, ctstate_mask); ++ if (mask->ct_mark) ++ mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG, ++ key->ct_mark, mask->ct_mark); ++ if (mask->ct_labels[0]) ++ mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ++ key->ct_labels[0], ++ mask->ct_labels[0]); ++ ++ return 0; ++} ++ ++int ++mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, ++ struct mlx5_esw_flow_attr *attr, ++ const struct flow_action_entry *act, ++ struct netlink_ext_ack *extack) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); ++ ++ if (!ct_priv) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "offload of ct action isn't available"); ++ return -EOPNOTSUPP; ++ } ++ ++ attr->ct_attr.zone = act->ct.zone; ++ attr->ct_attr.ct_action = act->ct.action; ++ ++ return 0; ++} ++ ++/* We translate the tc filter with CT action to the following HW model: ++ * ++ * +-------------------+ +--------------------+ +--------------+ ++ * + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +-----> ++ * + original match + | + tuple + zone match + | + fte_id match + | ++ * +-------------------+ | +--------------------+ | +--------------+ | ++ * v v v ++ * set chain miss mapping set mark original ++ * set fte_id set label filter ++ * set zone set established actions ++ * set tunnel_id do nat (if needed) ++ * do decap ++ */ ++static int ++__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct mlx5_flow_spec *orig_spec, ++ struct mlx5_esw_flow_attr *attr, ++ struct mlx5_flow_handle **flow_rule) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); ++ bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; ++ struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {}; ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ struct mlx5_flow_spec post_ct_spec = {}; ++ struct mlx5_esw_flow_attr *pre_ct_attr; ++ struct mlx5_modify_hdr *mod_hdr; ++ struct mlx5_flow_handle *rule; ++ struct mlx5_ct_flow *ct_flow; ++ int chain_mapping = 0, err; ++ u32 fte_id = 1; ++ ++ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); ++ if (!ct_flow) ++ return -ENOMEM; ++ ++ err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id, ++ MLX5_FTE_ID_MAX, GFP_KERNEL); ++ if (err) { ++ netdev_warn(priv->netdev, ++ "Failed to allocate fte id, err: %d\n", err); ++ goto err_idr; ++ } ++ ct_flow->fte_id = fte_id; ++ ++ /* Base esw attributes of both rules on original rule attribute */ ++ pre_ct_attr = &ct_flow->pre_ct_attr; ++ memcpy(pre_ct_attr, attr, sizeof(*attr)); ++ memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr)); ++ ++ /* Modify the original rule's action to fwd and modify, leave decap */ ++ pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP; ++ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | ++ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; ++ ++ /* Write chain miss tag for miss in ct table as we ++ * don't go though all prios of this chain as normal tc rules ++ * miss. ++ */ ++ err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain, ++ &chain_mapping); ++ if (err) { ++ ct_dbg("Failed to get chain register mapping for chain"); ++ goto err_get_chain; ++ } ++ ct_flow->chain_mapping = chain_mapping; ++ ++ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ++ CHAIN_TO_REG, chain_mapping); ++ if (err) { ++ ct_dbg("Failed to set chain register mapping"); ++ goto err_mapping; ++ } ++ ++ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ZONE_TO_REG, ++ attr->ct_attr.zone & ++ MLX5_CT_ZONE_MASK); ++ if (err) { ++ ct_dbg("Failed to set zone register mapping"); ++ goto err_mapping; ++ } ++ ++ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ++ FTEID_TO_REG, fte_id); ++ if (err) { ++ ct_dbg("Failed to set fte_id register mapping"); ++ goto err_mapping; ++ } ++ ++ /* If original flow is decap, we do it before going into ct table ++ * so add a rewrite for the tunnel match_id. ++ */ ++ if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) && ++ attr->chain == 0) { ++ u32 tun_id = mlx5e_tc_get_flow_tun_id(flow); ++ ++ err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ++ TUNNEL_TO_REG, ++ tun_id); ++ if (err) { ++ ct_dbg("Failed to set tunnel register mapping"); ++ goto err_mapping; ++ } ++ } ++ ++ mod_hdr = mlx5_modify_header_alloc(esw->dev, ++ MLX5_FLOW_NAMESPACE_FDB, ++ pre_mod_acts.num_actions, ++ pre_mod_acts.actions); ++ if (IS_ERR(mod_hdr)) { ++ err = PTR_ERR(mod_hdr); ++ ct_dbg("Failed to create pre ct mod hdr"); ++ goto err_mapping; ++ } ++ pre_ct_attr->modify_hdr = mod_hdr; ++ ++ /* Post ct rule matches on fte_id and executes original rule's ++ * tc rule action ++ */ ++ mlx5e_tc_match_to_reg_match(&post_ct_spec, FTEID_TO_REG, ++ fte_id, MLX5_FTE_ID_MASK); ++ ++ /* Put post_ct rule on post_ct fdb */ ++ ct_flow->post_ct_attr.chain = 0; ++ ct_flow->post_ct_attr.prio = 0; ++ ct_flow->post_ct_attr.fdb = ct_priv->post_ct; ++ ++ ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE; ++ ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE; ++ ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); ++ rule = mlx5_eswitch_add_offloaded_rule(esw, &post_ct_spec, ++ &ct_flow->post_ct_attr); ++ ct_flow->post_ct_rule = rule; ++ if (IS_ERR(ct_flow->post_ct_rule)) { ++ err = PTR_ERR(ct_flow->post_ct_rule); ++ ct_dbg("Failed to add post ct rule"); ++ goto err_insert_post_ct; ++ } ++ ++ /* Change original rule point to ct table */ ++ pre_ct_attr->dest_chain = 0; ++ pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct; ++ ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw, ++ orig_spec, ++ pre_ct_attr); ++ if (IS_ERR(ct_flow->pre_ct_rule)) { ++ err = PTR_ERR(ct_flow->pre_ct_rule); ++ ct_dbg("Failed to add pre ct rule"); ++ goto err_insert_orig; ++ } ++ ++ attr->ct_attr.ct_flow = ct_flow; ++ *flow_rule = ct_flow->post_ct_rule; ++ dealloc_mod_hdr_actions(&pre_mod_acts); ++ ++ return 0; ++ ++err_insert_orig: ++ mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule, ++ &ct_flow->post_ct_attr); ++err_insert_post_ct: ++ mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); ++err_mapping: ++ dealloc_mod_hdr_actions(&pre_mod_acts); ++ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping); ++err_get_chain: ++ idr_remove(&ct_priv->fte_ids, fte_id); ++err_idr: ++ kfree(ct_flow); ++ netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); ++ return err; ++} ++ ++struct mlx5_flow_handle * ++mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct mlx5_flow_spec *spec, ++ struct mlx5_esw_flow_attr *attr) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); ++ struct mlx5_flow_handle *rule; ++ int err; ++ ++ if (!ct_priv) ++ return ERR_PTR(-EOPNOTSUPP); ++ ++ mutex_lock(&ct_priv->control_lock); ++ err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr, &rule); ++ mutex_unlock(&ct_priv->control_lock); ++ if (err) ++ return ERR_PTR(err); ++ ++ return rule; ++} ++ ++static void ++__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv, ++ struct mlx5_ct_flow *ct_flow) ++{ ++ struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr; ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ ++ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule, ++ pre_ct_attr); ++ mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr); ++ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule, ++ &ct_flow->post_ct_attr); ++ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping); ++ idr_remove(&ct_priv->fte_ids, ct_flow->fte_id); ++ kfree(ct_flow); ++} ++ ++void ++mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, ++ struct mlx5_esw_flow_attr *attr) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); ++ struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow; ++ ++ /* We are called on error to clean up stuff from parsing ++ * but we don't have anything for now ++ */ ++ if (!ct_flow) ++ return; ++ ++ mutex_lock(&ct_priv->control_lock); ++ __mlx5_tc_ct_delete_flow(ct_priv, ct_flow); ++ mutex_unlock(&ct_priv->control_lock); ++} ++ ++static int ++mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw, ++ const char **err_msg) ++{ ++#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) ++ /* cannot restore chain ID on HW miss */ ++ ++ *err_msg = "tc skb extension missing"; ++ return -EOPNOTSUPP; ++#endif ++ ++ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) { ++ *err_msg = "firmware level support is missing"; ++ return -EOPNOTSUPP; ++ } ++ ++ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) { ++ /* vlan workaround should be avoided for multi chain rules. ++ * This is just a sanity check as pop vlan action should ++ * be supported by any FW that supports ignore_flow_level ++ */ ++ ++ *err_msg = "firmware vlan actions support is missing"; ++ return -EOPNOTSUPP; ++ } ++ ++ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, ++ fdb_modify_header_fwd_to_table)) { ++ /* CT always writes to registers which are mod header actions. ++ * Therefore, mod header and goto is required ++ */ ++ ++ *err_msg = "firmware fwd and modify support is missing"; ++ return -EOPNOTSUPP; ++ } ++ ++ if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) { ++ *err_msg = "register loopback isn't supported"; ++ return -EOPNOTSUPP; ++ } ++ ++ return 0; ++} ++ ++static void ++mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err) ++{ ++ if (msg) ++ netdev_warn(rpriv->netdev, ++ "tc ct offload not supported, %s, err: %d\n", ++ msg, err); ++ else ++ netdev_warn(rpriv->netdev, ++ "tc ct offload not supported, err: %d\n", ++ err); ++} ++ ++int ++mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ++{ ++ struct mlx5_tc_ct_priv *ct_priv; ++ struct mlx5e_rep_priv *rpriv; ++ struct mlx5_eswitch *esw; ++ struct mlx5e_priv *priv; ++ const char *msg; ++ int err; ++ ++ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); ++ priv = netdev_priv(rpriv->netdev); ++ esw = priv->mdev->priv.eswitch; ++ ++ err = mlx5_tc_ct_init_check_support(esw, &msg); ++ if (err) { ++ mlx5_tc_ct_init_err(rpriv, msg, err); ++ goto err_support; ++ } ++ ++ ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL); ++ if (!ct_priv) { ++ mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM); ++ goto err_alloc; ++ } ++ ++ ct_priv->esw = esw; ++ ct_priv->netdev = rpriv->netdev; ++ ct_priv->ct = mlx5_esw_chains_create_global_table(esw); ++ if (IS_ERR(ct_priv->ct)) { ++ err = PTR_ERR(ct_priv->ct); ++ mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err); ++ goto err_ct_tbl; ++ } ++ ++ ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw); ++ if (IS_ERR(ct_priv->ct_nat)) { ++ err = PTR_ERR(ct_priv->ct_nat); ++ mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table", ++ err); ++ goto err_ct_nat_tbl; ++ } ++ ++ ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw); ++ if (IS_ERR(ct_priv->post_ct)) { ++ err = PTR_ERR(ct_priv->post_ct); ++ mlx5_tc_ct_init_err(rpriv, "failed to create post ct table", ++ err); ++ goto err_post_ct_tbl; ++ } ++ ++ idr_init(&ct_priv->fte_ids); ++ mutex_init(&ct_priv->control_lock); ++ ++ /* Done, set ct_priv to know it initializted */ ++ uplink_priv->ct_priv = ct_priv; ++ ++ return 0; ++ ++err_post_ct_tbl: ++ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat); ++err_ct_nat_tbl: ++ mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct); ++err_ct_tbl: ++ kfree(ct_priv); ++err_alloc: ++err_support: ++ ++ return 0; ++} ++ ++void ++mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv; ++ ++ if (!ct_priv) ++ return; ++ ++ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct); ++ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); ++ mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct); ++ ++ mutex_destroy(&ct_priv->control_lock); ++ idr_destroy(&ct_priv->fte_ids); ++ kfree(ct_priv); ++ ++ uplink_priv->ct_priv = NULL; ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +new file mode 100644 +index 000000000000..3a8421671c23 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +@@ -0,0 +1,140 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ ++/* Copyright (c) 2018 Mellanox Technologies. */ ++ ++#ifndef __MLX5_EN_TC_CT_H__ ++#define __MLX5_EN_TC_CT_H__ ++ ++#include ++#include ++#include ++ ++struct mlx5_esw_flow_attr; ++struct mlx5_rep_uplink_priv; ++struct mlx5e_tc_flow; ++struct mlx5e_priv; ++ ++struct mlx5_ct_flow; ++ ++struct mlx5_ct_attr { ++ u16 zone; ++ u16 ct_action; ++ struct mlx5_ct_flow *ct_flow; ++}; ++ ++#define zone_to_reg_ct {\ ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\ ++ .moffset = 0,\ ++ .mlen = 2,\ ++ .soffset = MLX5_BYTE_OFF(fte_match_param,\ ++ misc_parameters_2.metadata_reg_c_2) + 2,\ ++} ++ ++#define ctstate_to_reg_ct {\ ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\ ++ .moffset = 2,\ ++ .mlen = 2,\ ++ .soffset = MLX5_BYTE_OFF(fte_match_param,\ ++ misc_parameters_2.metadata_reg_c_2),\ ++} ++ ++#define mark_to_reg_ct {\ ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_3,\ ++ .moffset = 0,\ ++ .mlen = 4,\ ++ .soffset = MLX5_BYTE_OFF(fte_match_param,\ ++ misc_parameters_2.metadata_reg_c_3),\ ++} ++ ++#define labels_to_reg_ct {\ ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_4,\ ++ .moffset = 0,\ ++ .mlen = 4,\ ++ .soffset = MLX5_BYTE_OFF(fte_match_param,\ ++ misc_parameters_2.metadata_reg_c_4),\ ++} ++ ++#define fteid_to_reg_ct {\ ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\ ++ .moffset = 0,\ ++ .mlen = 4,\ ++ .soffset = MLX5_BYTE_OFF(fte_match_param,\ ++ misc_parameters_2.metadata_reg_c_5),\ ++} ++ ++#if IS_ENABLED(CONFIG_MLX5_TC_CT) ++ ++int ++mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv); ++void ++mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv); ++ ++int ++mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ++ struct mlx5_flow_spec *spec, ++ struct flow_cls_offload *f, ++ struct netlink_ext_ack *extack); ++int ++mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, ++ struct mlx5_esw_flow_attr *attr, ++ const struct flow_action_entry *act, ++ struct netlink_ext_ack *extack); ++ ++struct mlx5_flow_handle * ++mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct mlx5_flow_spec *spec, ++ struct mlx5_esw_flow_attr *attr); ++void ++mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct mlx5_esw_flow_attr *attr); ++ ++#else /* CONFIG_MLX5_TC_CT */ ++ ++static inline int ++mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ++{ ++ return 0; ++} ++ ++static inline void ++mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) ++{ ++} ++ ++static inline int ++mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ++ struct mlx5_flow_spec *spec, ++ struct flow_cls_offload *f, ++ struct netlink_ext_ack *extack) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline int ++mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, ++ struct mlx5_esw_flow_attr *attr, ++ const struct flow_action_entry *act, ++ struct netlink_ext_ack *extack) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline struct mlx5_flow_handle * ++mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct mlx5_flow_spec *spec, ++ struct mlx5_esw_flow_attr *attr) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++static inline void ++mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct mlx5_esw_flow_attr *attr) ++{ ++} ++ ++#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */ ++#endif /* __MLX5_EN_TC_CT_H__ */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +index 100b9a2d3ea6..eccf61e1ac96 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +@@ -55,6 +55,7 @@ struct mlx5e_neigh_update_table { + unsigned long min_interval; /* jiffies */ + }; + ++struct mlx5_tc_ct_priv; + struct mlx5_rep_uplink_priv { + /* Filters DB - instantiated by the uplink representor and shared by + * the uplink's VFs +@@ -85,6 +86,8 @@ struct mlx5_rep_uplink_priv { + struct mapping_ctx *tunnel_mapping; + /* maps tun_enc_opts to a unique id*/ + struct mapping_ctx *tunnel_enc_opts_mapping; ++ ++ struct mlx5_tc_ct_priv *ct_priv; + }; + + struct mlx5e_rep_priv { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 269258c5f961..3e15cb4992a6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -56,6 +56,7 @@ + #include "en/port.h" + #include "en/tc_tun.h" + #include "en/mapping.h" ++#include "en/tc_ct.h" + #include "lib/devcom.h" + #include "lib/geneve.h" + #include "diag/en_tc_tracepoint.h" +@@ -87,6 +88,7 @@ enum { + MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, + MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, + MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, ++ MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7, + }; + + #define MLX5E_TC_MAX_SPLITS 1 +@@ -193,6 +195,11 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { + .soffset = MLX5_BYTE_OFF(fte_match_param, + misc_parameters_2.metadata_reg_c_1), + }, ++ [ZONE_TO_REG] = zone_to_reg_ct, ++ [CTSTATE_TO_REG] = ctstate_to_reg_ct, ++ [MARK_TO_REG] = mark_to_reg_ct, ++ [LABELS_TO_REG] = labels_to_reg_ct, ++ [FTEID_TO_REG] = fteid_to_reg_ct, + }; + + static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); +@@ -1144,6 +1151,10 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5_esw_flow_attr *attr) + { + struct mlx5_flow_handle *rule; ++ struct mlx5e_tc_mod_hdr_acts; ++ ++ if (flow_flag_test(flow, CT)) ++ return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr); + + rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); + if (IS_ERR(rule)) +@@ -1163,10 +1174,15 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, + static void + mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, +- struct mlx5_esw_flow_attr *attr) ++ struct mlx5_esw_flow_attr *attr) + { + flow_flag_clear(flow, OFFLOADED); + ++ if (flow_flag_test(flow, CT)) { ++ mlx5_tc_ct_delete_flow(flow->priv, flow, attr); ++ return; ++ } ++ + if (attr->split_count) + mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); + +@@ -1938,6 +1954,11 @@ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow) + enc_opts_id); + } + ++u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow) ++{ ++ return flow->tunnel_id; ++} ++ + static int parse_tunnel_attr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, +@@ -2103,6 +2124,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_TCP) | + BIT(FLOW_DISSECTOR_KEY_IP) | ++ BIT(FLOW_DISSECTOR_KEY_CT) | + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); +@@ -2914,7 +2936,9 @@ struct ipv6_hoplimit_word { + __u8 hop_limit; + }; + +-static bool is_action_keys_supported(const struct flow_action_entry *act) ++static int is_action_keys_supported(const struct flow_action_entry *act, ++ bool ct_flow, bool *modify_ip_header, ++ struct netlink_ext_ack *extack) + { + u32 mask, offset; + u8 htype; +@@ -2933,7 +2957,13 @@ static bool is_action_keys_supported(const struct flow_action_entry *act) + if (offset != offsetof(struct iphdr, ttl) || + ttl_word->protocol || + ttl_word->check) { +- return true; ++ *modify_ip_header = true; ++ } ++ ++ if (ct_flow && offset >= offsetof(struct iphdr, saddr)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "can't offload re-write of ipv4 address with action ct"); ++ return -EOPNOTSUPP; + } + } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { + struct ipv6_hoplimit_word *hoplimit_word = +@@ -2942,15 +2972,27 @@ static bool is_action_keys_supported(const struct flow_action_entry *act) + if (offset != offsetof(struct ipv6hdr, payload_len) || + hoplimit_word->payload_len || + hoplimit_word->nexthdr) { +- return true; ++ *modify_ip_header = true; ++ } ++ ++ if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "can't offload re-write of ipv6 address with action ct"); ++ return -EOPNOTSUPP; + } ++ } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || ++ htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "can't offload re-write of transport header ports with action ct"); ++ return -EOPNOTSUPP; + } +- return false; ++ ++ return 0; + } + + static bool modify_header_match_supported(struct mlx5_flow_spec *spec, + struct flow_action *flow_action, +- u32 actions, ++ u32 actions, bool ct_flow, + struct netlink_ext_ack *extack) + { + const struct flow_action_entry *act; +@@ -2958,7 +3000,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, + void *headers_v; + u16 ethertype; + u8 ip_proto; +- int i; ++ int i, err; + + headers_v = get_match_headers_value(actions, spec); + ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); +@@ -2973,10 +3015,10 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, + act->id != FLOW_ACTION_ADD) + continue; + +- if (is_action_keys_supported(act)) { +- modify_ip_header = true; +- break; +- } ++ err = is_action_keys_supported(act, ct_flow, ++ &modify_ip_header, extack); ++ if (err) ++ return err; + } + + ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); +@@ -2999,13 +3041,24 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + struct netlink_ext_ack *extack) + { + struct net_device *filter_dev = parse_attr->filter_dev; +- bool drop_action, pop_action; ++ bool drop_action, pop_action, ct_flow; + u32 actions; + +- if (mlx5e_is_eswitch_flow(flow)) ++ ct_flow = flow_flag_test(flow, CT); ++ if (mlx5e_is_eswitch_flow(flow)) { + actions = flow->esw_attr->action; +- else ++ ++ if (flow->esw_attr->split_count && ct_flow) { ++ /* All registers used by ct are cleared when using ++ * split rules. ++ */ ++ NL_SET_ERR_MSG_MOD(extack, ++ "Can't offload mirroring with action ct"); ++ return -EOPNOTSUPP; ++ } ++ } else { + actions = flow->nic_attr->action; ++ } + + drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP; + pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; +@@ -3022,7 +3075,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + return modify_header_match_supported(&parse_attr->spec, + flow_action, actions, +- extack); ++ ct_flow, extack); + + return true; + } +@@ -3826,6 +3879,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + attr->dest_chain = act->chain_index; + break; ++ case FLOW_ACTION_CT: ++ err = mlx5_tc_ct_parse_action(priv, attr, act, extack); ++ if (err) ++ return err; ++ ++ flow_flag_set(flow, CT); ++ break; + default: + NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported"); + return -EOPNOTSUPP; +@@ -4066,6 +4126,10 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, + if (err) + goto err_free; + ++ err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack); ++ if (err) ++ goto err_free; ++ + err = mlx5e_tc_add_fdb_flow(priv, flow, extack); + complete_all(&flow->init_done); + if (err) { +@@ -4350,7 +4414,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, + goto errout; + } + +- if (mlx5e_is_offloaded_flow(flow)) { ++ if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) { + counter = mlx5e_tc_get_counter(flow); + if (!counter) + goto errout; +@@ -4617,6 +4681,10 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) + uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); + priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); + ++ err = mlx5_tc_ct_init(uplink_priv); ++ if (err) ++ goto err_ct; ++ + mapping = mapping_create(sizeof(struct tunnel_match_key), + TUNNEL_INFO_BITS_MASK, true); + if (IS_ERR(mapping)) { +@@ -4643,6 +4711,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) + err_enc_opts_mapping: + mapping_destroy(uplink_priv->tunnel_mapping); + err_tun_mapping: ++ mlx5_tc_ct_clean(uplink_priv); ++err_ct: + netdev_warn(priv->netdev, + "Failed to initialize tc (eswitch), err: %d", err); + return err; +@@ -4657,6 +4727,8 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) + uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); + mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); + mapping_destroy(uplink_priv->tunnel_mapping); ++ ++ mlx5_tc_ct_clean(uplink_priv); + } + + int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 21cbde472b64..31c9e81b9287 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -94,6 +94,11 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work); + enum mlx5e_tc_attr_to_reg { + CHAIN_TO_REG, + TUNNEL_TO_REG, ++ CTSTATE_TO_REG, ++ ZONE_TO_REG, ++ MARK_TO_REG, ++ LABELS_TO_REG, ++ FTEID_TO_REG, + }; + + struct mlx5e_tc_attr_to_reg_mapping { +@@ -139,6 +144,9 @@ int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); + void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); + ++struct mlx5e_tc_flow; ++u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow); ++ + #else /* CONFIG_MLX5_ESWITCH */ + static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } + static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index ecf3d7157d4d..79e4dfa5368d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -42,6 +42,9 @@ + #include + #include + #include "lib/mpfs.h" ++#ifndef __GENKSYMS__ ++# include "en/tc_ct.h" ++#endif + + #define FDB_TC_MAX_CHAIN 3 + #define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) +@@ -465,6 +468,7 @@ struct mlx5_esw_flow_attr { + u32 flags; + struct mlx5_flow_table *fdb; + struct mlx5_flow_table *dest_ft; ++ struct mlx5_ct_attr ct_attr; + #endif + struct mlx5e_tc_flow_parse_attr *parse_attr; + }; +-- +2.13.6 + diff --git a/SOURCES/0244-netdrv-net-mlx5e-CT-Offload-established-flows.patch b/SOURCES/0244-netdrv-net-mlx5e-CT-Offload-established-flows.patch new file mode 100644 index 0000000..fd5ed33 --- /dev/null +++ b/SOURCES/0244-netdrv-net-mlx5e-CT-Offload-established-flows.patch @@ -0,0 +1,842 @@ +From fd5d5ebb952ea760a85e507918dd849f1816ea9e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:06 -0400 +Subject: [PATCH 244/312] [netdrv] net/mlx5e: CT: Offload established flows + +Message-id: <20200519074934.6303-36-ahleihel@redhat.com> +Patchwork-id: 310537 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 35/63] net/mlx5e: CT: Offload established flows +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit ac991b48d43ce52ce1a43602068d641d232b03dd +Author: Paul Blakey +Date: Thu Mar 12 12:23:15 2020 +0200 + + net/mlx5e: CT: Offload established flows + + Register driver callbacks with the nf flow table platform. + FT add/delete events will create/delete FTE in the CT/CT_NAT tables. + + Restoring the CT state on miss will be added in the following patch. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Roi Dayan + Reviewed-by: Jiri Pirko + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 688 +++++++++++++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h | 3 + + 2 files changed, 691 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index c1130460bb60..e9826e379aca 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + + #include "en/tc_ct.h" +@@ -34,6 +35,7 @@ struct mlx5_tc_ct_priv { + struct mlx5_eswitch *esw; + const struct net_device *netdev; + struct idr fte_ids; ++ struct rhashtable zone_ht; + struct mlx5_flow_table *ct; + struct mlx5_flow_table *ct_nat; + struct mlx5_flow_table *post_ct; +@@ -45,10 +47,53 @@ struct mlx5_ct_flow { + struct mlx5_esw_flow_attr post_ct_attr; + struct mlx5_flow_handle *pre_ct_rule; + struct mlx5_flow_handle *post_ct_rule; ++ struct mlx5_ct_ft *ft; + u32 fte_id; + u32 chain_mapping; + }; + ++struct mlx5_ct_zone_rule { ++ struct mlx5_flow_handle *rule; ++ struct mlx5_esw_flow_attr attr; ++ bool nat; ++}; ++ ++struct mlx5_ct_ft { ++ struct rhash_head node; ++ u16 zone; ++ refcount_t refcount; ++ struct nf_flowtable *nf_ft; ++ struct mlx5_tc_ct_priv *ct_priv; ++ struct rhashtable ct_entries_ht; ++ struct list_head ct_entries_list; ++}; ++ ++struct mlx5_ct_entry { ++ struct list_head list; ++ u16 zone; ++ struct rhash_head node; ++ struct flow_rule *flow_rule; ++ struct mlx5_fc *counter; ++ unsigned long lastuse; ++ unsigned long cookie; ++ struct mlx5_ct_zone_rule zone_rules[2]; ++}; ++ ++static const struct rhashtable_params cts_ht_params = { ++ .head_offset = offsetof(struct mlx5_ct_entry, node), ++ .key_offset = offsetof(struct mlx5_ct_entry, cookie), ++ .key_len = sizeof(((struct mlx5_ct_entry *)0)->cookie), ++ .automatic_shrinking = true, ++ .min_size = 16 * 1024, ++}; ++ ++static const struct rhashtable_params zone_params = { ++ .head_offset = offsetof(struct mlx5_ct_ft, node), ++ .key_offset = offsetof(struct mlx5_ct_ft, zone), ++ .key_len = sizeof(((struct mlx5_ct_ft *)0)->zone), ++ .automatic_shrinking = true, ++}; ++ + static struct mlx5_tc_ct_priv * + mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) + { +@@ -61,6 +106,561 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) + return uplink_priv->ct_priv; + } + ++static int ++mlx5_tc_ct_set_tuple_match(struct mlx5_flow_spec *spec, ++ struct flow_rule *rule) ++{ ++ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, ++ outer_headers); ++ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, ++ outer_headers); ++ u16 addr_type = 0; ++ u8 ip_proto = 0; ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { ++ struct flow_match_basic match; ++ ++ flow_rule_match_basic(rule, &match); ++ ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, ++ ntohs(match.mask->n_proto)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ++ ntohs(match.key->n_proto)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, ++ match.mask->ip_proto); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, ++ match.key->ip_proto); ++ ++ ip_proto = match.key->ip_proto; ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { ++ struct flow_match_control match; ++ ++ flow_rule_match_control(rule, &match); ++ addr_type = match.key->addr_type; ++ } ++ ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { ++ struct flow_match_ipv4_addrs match; ++ ++ flow_rule_match_ipv4_addrs(rule, &match); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, ++ src_ipv4_src_ipv6.ipv4_layout.ipv4), ++ &match.mask->src, sizeof(match.mask->src)); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ++ src_ipv4_src_ipv6.ipv4_layout.ipv4), ++ &match.key->src, sizeof(match.key->src)); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, ++ dst_ipv4_dst_ipv6.ipv4_layout.ipv4), ++ &match.mask->dst, sizeof(match.mask->dst)); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ++ dst_ipv4_dst_ipv6.ipv4_layout.ipv4), ++ &match.key->dst, sizeof(match.key->dst)); ++ } ++ ++ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { ++ struct flow_match_ipv6_addrs match; ++ ++ flow_rule_match_ipv6_addrs(rule, &match); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, ++ src_ipv4_src_ipv6.ipv6_layout.ipv6), ++ &match.mask->src, sizeof(match.mask->src)); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ++ src_ipv4_src_ipv6.ipv6_layout.ipv6), ++ &match.key->src, sizeof(match.key->src)); ++ ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, ++ dst_ipv4_dst_ipv6.ipv6_layout.ipv6), ++ &match.mask->dst, sizeof(match.mask->dst)); ++ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ++ dst_ipv4_dst_ipv6.ipv6_layout.ipv6), ++ &match.key->dst, sizeof(match.key->dst)); ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { ++ struct flow_match_ports match; ++ ++ flow_rule_match_ports(rule, &match); ++ switch (ip_proto) { ++ case IPPROTO_TCP: ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ++ tcp_sport, ntohs(match.mask->src)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ++ tcp_sport, ntohs(match.key->src)); ++ ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ++ tcp_dport, ntohs(match.mask->dst)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ++ tcp_dport, ntohs(match.key->dst)); ++ break; ++ ++ case IPPROTO_UDP: ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ++ udp_sport, ntohs(match.mask->src)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ++ udp_sport, ntohs(match.key->src)); ++ ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ++ udp_dport, ntohs(match.mask->dst)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ++ udp_dport, ntohs(match.key->dst)); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { ++ struct flow_match_tcp match; ++ ++ flow_rule_match_tcp(rule, &match); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, ++ ntohs(match.mask->flags)); ++ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, ++ ntohs(match.key->flags)); ++ } ++ ++ return 0; ++} ++ ++static void ++mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, ++ struct mlx5_ct_entry *entry, ++ bool nat) ++{ ++ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; ++ struct mlx5_esw_flow_attr *attr = &zone_rule->attr; ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ ++ ct_dbg("Deleting ct entry rule in zone %d", entry->zone); ++ ++ mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); ++ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); ++} ++ ++static void ++mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv, ++ struct mlx5_ct_entry *entry) ++{ ++ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true); ++ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); ++ ++ mlx5_fc_destroy(ct_priv->esw->dev, entry->counter); ++} ++ ++static struct flow_action_entry * ++mlx5_tc_ct_get_ct_metadata_action(struct flow_rule *flow_rule) ++{ ++ struct flow_action *flow_action = &flow_rule->action; ++ struct flow_action_entry *act; ++ int i; ++ ++ flow_action_for_each(i, act, flow_action) { ++ if (act->id == FLOW_ACTION_CT_METADATA) ++ return act; ++ } ++ ++ return NULL; ++} ++ ++static int ++mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, ++ struct mlx5e_tc_mod_hdr_acts *mod_acts, ++ u8 ct_state, ++ u32 mark, ++ u32 label) ++{ ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ int err; ++ ++ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, ++ CTSTATE_TO_REG, ct_state); ++ if (err) ++ return err; ++ ++ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, ++ MARK_TO_REG, mark); ++ if (err) ++ return err; ++ ++ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, ++ LABELS_TO_REG, label); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++static int ++mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act, ++ char *modact) ++{ ++ u32 offset = act->mangle.offset, field; ++ ++ switch (act->mangle.htype) { ++ case FLOW_ACT_MANGLE_HDR_TYPE_IP4: ++ MLX5_SET(set_action_in, modact, length, 0); ++ if (offset == offsetof(struct iphdr, saddr)) ++ field = MLX5_ACTION_IN_FIELD_OUT_SIPV4; ++ else if (offset == offsetof(struct iphdr, daddr)) ++ field = MLX5_ACTION_IN_FIELD_OUT_DIPV4; ++ else ++ return -EOPNOTSUPP; ++ break; ++ ++ case FLOW_ACT_MANGLE_HDR_TYPE_IP6: ++ MLX5_SET(set_action_in, modact, length, 0); ++ if (offset == offsetof(struct ipv6hdr, saddr)) ++ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0; ++ else if (offset == offsetof(struct ipv6hdr, saddr) + 4) ++ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32; ++ else if (offset == offsetof(struct ipv6hdr, saddr) + 8) ++ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64; ++ else if (offset == offsetof(struct ipv6hdr, saddr) + 12) ++ field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96; ++ else if (offset == offsetof(struct ipv6hdr, daddr)) ++ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0; ++ else if (offset == offsetof(struct ipv6hdr, daddr) + 4) ++ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32; ++ else if (offset == offsetof(struct ipv6hdr, daddr) + 8) ++ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64; ++ else if (offset == offsetof(struct ipv6hdr, daddr) + 12) ++ field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96; ++ else ++ return -EOPNOTSUPP; ++ break; ++ ++ case FLOW_ACT_MANGLE_HDR_TYPE_TCP: ++ MLX5_SET(set_action_in, modact, length, 16); ++ if (offset == offsetof(struct tcphdr, source)) ++ field = MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT; ++ else if (offset == offsetof(struct tcphdr, dest)) ++ field = MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT; ++ else ++ return -EOPNOTSUPP; ++ break; ++ ++ case FLOW_ACT_MANGLE_HDR_TYPE_UDP: ++ MLX5_SET(set_action_in, modact, length, 16); ++ if (offset == offsetof(struct udphdr, source)) ++ field = MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT; ++ else if (offset == offsetof(struct udphdr, dest)) ++ field = MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT; ++ else ++ return -EOPNOTSUPP; ++ break; ++ ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); ++ MLX5_SET(set_action_in, modact, offset, 0); ++ MLX5_SET(set_action_in, modact, field, field); ++ MLX5_SET(set_action_in, modact, data, act->mangle.val); ++ ++ return 0; ++} ++ ++static int ++mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv, ++ struct flow_rule *flow_rule, ++ struct mlx5e_tc_mod_hdr_acts *mod_acts) ++{ ++ struct flow_action *flow_action = &flow_rule->action; ++ struct mlx5_core_dev *mdev = ct_priv->esw->dev; ++ struct flow_action_entry *act; ++ size_t action_size; ++ char *modact; ++ int err, i; ++ ++ action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); ++ ++ flow_action_for_each(i, act, flow_action) { ++ switch (act->id) { ++ case FLOW_ACTION_MANGLE: { ++ err = alloc_mod_hdr_actions(mdev, ++ MLX5_FLOW_NAMESPACE_FDB, ++ mod_acts); ++ if (err) ++ return err; ++ ++ modact = mod_acts->actions + ++ mod_acts->num_actions * action_size; ++ ++ err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact); ++ if (err) ++ return err; ++ ++ mod_acts->num_actions++; ++ } ++ break; ++ ++ case FLOW_ACTION_CT_METADATA: ++ /* Handled earlier */ ++ continue; ++ default: ++ return -EOPNOTSUPP; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ++mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ++ struct mlx5_esw_flow_attr *attr, ++ struct flow_rule *flow_rule, ++ bool nat) ++{ ++ struct mlx5e_tc_mod_hdr_acts mod_acts = {}; ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ struct mlx5_modify_hdr *mod_hdr; ++ struct flow_action_entry *meta; ++ int err; ++ ++ meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule); ++ if (!meta) ++ return -EOPNOTSUPP; ++ ++ if (meta->ct_metadata.labels[1] || ++ meta->ct_metadata.labels[2] || ++ meta->ct_metadata.labels[3]) { ++ ct_dbg("Failed to offload ct entry due to unsupported label"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (nat) { ++ err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, ++ &mod_acts); ++ if (err) ++ goto err_mapping; ++ } ++ ++ err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts, ++ (MLX5_CT_STATE_ESTABLISHED_BIT | ++ MLX5_CT_STATE_TRK_BIT), ++ meta->ct_metadata.mark, ++ meta->ct_metadata.labels[0]); ++ if (err) ++ goto err_mapping; ++ ++ mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, ++ mod_acts.num_actions, ++ mod_acts.actions); ++ if (IS_ERR(mod_hdr)) { ++ err = PTR_ERR(mod_hdr); ++ goto err_mapping; ++ } ++ attr->modify_hdr = mod_hdr; ++ ++ dealloc_mod_hdr_actions(&mod_acts); ++ return 0; ++ ++err_mapping: ++ dealloc_mod_hdr_actions(&mod_acts); ++ return err; ++} ++ ++static int ++mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ++ struct flow_rule *flow_rule, ++ struct mlx5_ct_entry *entry, ++ bool nat) ++{ ++ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; ++ struct mlx5_esw_flow_attr *attr = &zone_rule->attr; ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ struct mlx5_flow_spec spec = {}; ++ int err; ++ ++ zone_rule->nat = nat; ++ ++ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, nat); ++ if (err) { ++ ct_dbg("Failed to create ct entry mod hdr"); ++ return err; ++ } ++ ++ attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | ++ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | ++ MLX5_FLOW_CONTEXT_ACTION_COUNT; ++ attr->dest_chain = 0; ++ attr->dest_ft = ct_priv->post_ct; ++ attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct; ++ attr->outer_match_level = MLX5_MATCH_L4; ++ attr->counter = entry->counter; ++ attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT; ++ ++ mlx5_tc_ct_set_tuple_match(&spec, flow_rule); ++ mlx5e_tc_match_to_reg_match(&spec, ZONE_TO_REG, ++ entry->zone & MLX5_CT_ZONE_MASK, ++ MLX5_CT_ZONE_MASK); ++ ++ zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, &spec, attr); ++ if (IS_ERR(zone_rule->rule)) { ++ err = PTR_ERR(zone_rule->rule); ++ ct_dbg("Failed to add ct entry rule, nat: %d", nat); ++ goto err_rule; ++ } ++ ++ ct_dbg("Offloaded ct entry rule in zone %d", entry->zone); ++ ++ return 0; ++ ++err_rule: ++ mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); ++ return err; ++} ++ ++static int ++mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, ++ struct flow_rule *flow_rule, ++ struct mlx5_ct_entry *entry) ++{ ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ int err; ++ ++ entry->counter = mlx5_fc_create(esw->dev, true); ++ if (IS_ERR(entry->counter)) { ++ err = PTR_ERR(entry->counter); ++ ct_dbg("Failed to create counter for ct entry"); ++ return err; ++ } ++ ++ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false); ++ if (err) ++ goto err_orig; ++ ++ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true); ++ if (err) ++ goto err_nat; ++ ++ return 0; ++ ++err_nat: ++ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); ++err_orig: ++ mlx5_fc_destroy(esw->dev, entry->counter); ++ return err; ++} ++ ++static int ++mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, ++ struct flow_cls_offload *flow) ++{ ++ struct flow_rule *flow_rule = flow_cls_offload_flow_rule(flow); ++ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv; ++ struct flow_action_entry *meta_action; ++ unsigned long cookie = flow->cookie; ++ struct mlx5_ct_entry *entry; ++ int err; ++ ++ meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule); ++ if (!meta_action) ++ return -EOPNOTSUPP; ++ ++ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, ++ cts_ht_params); ++ if (entry) ++ return 0; ++ ++ entry = kzalloc(sizeof(*entry), GFP_KERNEL); ++ if (!entry) ++ return -ENOMEM; ++ ++ entry->zone = ft->zone; ++ entry->flow_rule = flow_rule; ++ entry->cookie = flow->cookie; ++ ++ err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry); ++ if (err) ++ goto err_rules; ++ ++ err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node, ++ cts_ht_params); ++ if (err) ++ goto err_insert; ++ ++ list_add(&entry->list, &ft->ct_entries_list); ++ ++ return 0; ++ ++err_insert: ++ mlx5_tc_ct_entry_del_rules(ct_priv, entry); ++err_rules: ++ kfree(entry); ++ netdev_warn(ct_priv->netdev, ++ "Failed to offload ct entry, err: %d\n", err); ++ return err; ++} ++ ++static int ++mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, ++ struct flow_cls_offload *flow) ++{ ++ unsigned long cookie = flow->cookie; ++ struct mlx5_ct_entry *entry; ++ ++ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, ++ cts_ht_params); ++ if (!entry) ++ return -ENOENT; ++ ++ mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry); ++ WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht, ++ &entry->node, ++ cts_ht_params)); ++ list_del(&entry->list); ++ kfree(entry); ++ ++ return 0; ++} ++ ++static int ++mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, ++ struct flow_cls_offload *f) ++{ ++ unsigned long cookie = f->cookie; ++ struct mlx5_ct_entry *entry; ++ u64 lastuse, packets, bytes; ++ ++ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, ++ cts_ht_params); ++ if (!entry) ++ return -ENOENT; ++ ++ mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse); ++ flow_stats_update(&f->stats, bytes, packets, lastuse); ++ ++ return 0; ++} ++ ++static int ++mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data, ++ void *cb_priv) ++{ ++ struct flow_cls_offload *f = type_data; ++ struct mlx5_ct_ft *ft = cb_priv; ++ ++ if (type != TC_SETUP_CLSFLOWER) ++ return -EOPNOTSUPP; ++ ++ switch (f->command) { ++ case FLOW_CLS_REPLACE: ++ return mlx5_tc_ct_block_flow_offload_add(ft, f); ++ case FLOW_CLS_DESTROY: ++ return mlx5_tc_ct_block_flow_offload_del(ft, f); ++ case FLOW_CLS_STATS: ++ return mlx5_tc_ct_block_flow_offload_stats(ft, f); ++ default: ++ break; ++ }; ++ ++ return -EOPNOTSUPP; ++} ++ + int + mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, +@@ -159,10 +759,82 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, + + attr->ct_attr.zone = act->ct.zone; + attr->ct_attr.ct_action = act->ct.action; ++ attr->ct_attr.nf_ft = act->ct.flow_table; + + return 0; + } + ++static struct mlx5_ct_ft * ++mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, ++ struct nf_flowtable *nf_ft) ++{ ++ struct mlx5_ct_ft *ft; ++ int err; ++ ++ ft = rhashtable_lookup_fast(&ct_priv->zone_ht, &zone, zone_params); ++ if (ft) { ++ refcount_inc(&ft->refcount); ++ return ft; ++ } ++ ++ ft = kzalloc(sizeof(*ft), GFP_KERNEL); ++ if (!ft) ++ return ERR_PTR(-ENOMEM); ++ ++ ft->zone = zone; ++ ft->nf_ft = nf_ft; ++ ft->ct_priv = ct_priv; ++ INIT_LIST_HEAD(&ft->ct_entries_list); ++ refcount_set(&ft->refcount, 1); ++ ++ err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params); ++ if (err) ++ goto err_init; ++ ++ err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node, ++ zone_params); ++ if (err) ++ goto err_insert; ++ ++ err = nf_flow_table_offload_add_cb(ft->nf_ft, ++ mlx5_tc_ct_block_flow_offload, ft); ++ if (err) ++ goto err_add_cb; ++ ++ return ft; ++ ++err_add_cb: ++ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params); ++err_insert: ++ rhashtable_destroy(&ft->ct_entries_ht); ++err_init: ++ kfree(ft); ++ return ERR_PTR(err); ++} ++ ++static void ++mlx5_tc_ct_flush_ft(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ++{ ++ struct mlx5_ct_entry *entry; ++ ++ list_for_each_entry(entry, &ft->ct_entries_list, list) ++ mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry); ++} ++ ++static void ++mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ++{ ++ if (!refcount_dec_and_test(&ft->refcount)) ++ return; ++ ++ nf_flow_table_offload_del_cb(ft->nf_ft, ++ mlx5_tc_ct_block_flow_offload, ft); ++ mlx5_tc_ct_flush_ft(ct_priv, ft); ++ rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params); ++ rhashtable_destroy(&ft->ct_entries_ht); ++ kfree(ft); ++} ++ + /* We translate the tc filter with CT action to the following HW model: + * + * +-------------------+ +--------------------+ +--------------+ +@@ -193,12 +865,23 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + struct mlx5_flow_handle *rule; + struct mlx5_ct_flow *ct_flow; + int chain_mapping = 0, err; ++ struct mlx5_ct_ft *ft; + u32 fte_id = 1; + + ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); + if (!ct_flow) + return -ENOMEM; + ++ /* Register for CT established events */ ++ ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone, ++ attr->ct_attr.nf_ft); ++ if (IS_ERR(ft)) { ++ err = PTR_ERR(ft); ++ ct_dbg("Failed to register to ft callback"); ++ goto err_ft; ++ } ++ ct_flow->ft = ft; ++ + err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id, + MLX5_FTE_ID_MAX, GFP_KERNEL); + if (err) { +@@ -331,6 +1014,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + err_get_chain: + idr_remove(&ct_priv->fte_ids, fte_id); + err_idr: ++ mlx5_tc_ct_del_ft_cb(ct_priv, ft); ++err_ft: + kfree(ct_flow); + netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); + return err; +@@ -372,6 +1057,7 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv, + &ct_flow->post_ct_attr); + mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping); + idr_remove(&ct_priv->fte_ids, ct_flow->fte_id); ++ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); + kfree(ct_flow); + } + +@@ -503,6 +1189,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) + + idr_init(&ct_priv->fte_ids); + mutex_init(&ct_priv->control_lock); ++ rhashtable_init(&ct_priv->zone_ht, &zone_params); + + /* Done, set ct_priv to know it initializted */ + uplink_priv->ct_priv = ct_priv; +@@ -533,6 +1220,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) + mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); + mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct); + ++ rhashtable_destroy(&ct_priv->zone_ht); + mutex_destroy(&ct_priv->control_lock); + idr_destroy(&ct_priv->fte_ids); + kfree(ct_priv); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +index 3a8421671c23..f4bfda77f01a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +@@ -15,10 +15,13 @@ struct mlx5e_priv; + + struct mlx5_ct_flow; + ++struct nf_flowtable; ++ + struct mlx5_ct_attr { + u16 zone; + u16 ct_action; + struct mlx5_ct_flow *ct_flow; ++ struct nf_flowtable *nf_ft; + }; + + #define zone_to_reg_ct {\ +-- +2.13.6 + diff --git a/SOURCES/0245-netdrv-net-mlx5e-CT-Handle-misses-after-executing-CT.patch b/SOURCES/0245-netdrv-net-mlx5e-CT-Handle-misses-after-executing-CT.patch new file mode 100644 index 0000000..cb322d9 --- /dev/null +++ b/SOURCES/0245-netdrv-net-mlx5e-CT-Handle-misses-after-executing-CT.patch @@ -0,0 +1,310 @@ +From 14ce1ac023ad7ab9a3db5f1a1158f5c341a26361 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:07 -0400 +Subject: [PATCH 245/312] [netdrv] net/mlx5e: CT: Handle misses after executing + CT action + +Message-id: <20200519074934.6303-37-ahleihel@redhat.com> +Patchwork-id: 310556 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 36/63] net/mlx5e: CT: Handle misses after executing CT action +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit 5c6b9460474464c37c2d56df9e4c7044a042888b +Author: Paul Blakey +Date: Thu Mar 12 12:23:16 2020 +0200 + + net/mlx5e: CT: Handle misses after executing CT action + + Mark packets with a unique tupleid, and on miss use that id to get + the act ct restore_cookie. Using that restore cookie, we ask CT to + restore the relevant info on the SKB. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Reviewed-by: Roi Dayan + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 59 ++++++++++++++++++++-- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h | 25 +++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 12 ++++- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 1 + + 4 files changed, 92 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index e9826e379aca..c75dc97fd3a7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -35,6 +35,7 @@ struct mlx5_tc_ct_priv { + struct mlx5_eswitch *esw; + const struct net_device *netdev; + struct idr fte_ids; ++ struct idr tuple_ids; + struct rhashtable zone_ht; + struct mlx5_flow_table *ct; + struct mlx5_flow_table *ct_nat; +@@ -55,6 +56,7 @@ struct mlx5_ct_flow { + struct mlx5_ct_zone_rule { + struct mlx5_flow_handle *rule; + struct mlx5_esw_flow_attr attr; ++ int tupleid; + bool nat; + }; + +@@ -76,6 +78,7 @@ struct mlx5_ct_entry { + struct mlx5_fc *counter; + unsigned long lastuse; + unsigned long cookie; ++ unsigned long restore_cookie; + struct mlx5_ct_zone_rule zone_rules[2]; + }; + +@@ -237,6 +240,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, + + mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); + mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); ++ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); + } + + static void +@@ -269,7 +273,8 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5e_tc_mod_hdr_acts *mod_acts, + u8 ct_state, + u32 mark, +- u32 label) ++ u32 label, ++ u32 tupleid) + { + struct mlx5_eswitch *esw = ct_priv->esw; + int err; +@@ -289,6 +294,11 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, + if (err) + return err; + ++ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, ++ TUPLEID_TO_REG, tupleid); ++ if (err) ++ return err; ++ + return 0; + } + +@@ -412,6 +422,7 @@ static int + mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5_esw_flow_attr *attr, + struct flow_rule *flow_rule, ++ u32 tupleid, + bool nat) + { + struct mlx5e_tc_mod_hdr_acts mod_acts = {}; +@@ -442,7 +453,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, + (MLX5_CT_STATE_ESTABLISHED_BIT | + MLX5_CT_STATE_TRK_BIT), + meta->ct_metadata.mark, +- meta->ct_metadata.labels[0]); ++ meta->ct_metadata.labels[0], ++ tupleid); + if (err) + goto err_mapping; + +@@ -473,15 +485,27 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5_esw_flow_attr *attr = &zone_rule->attr; + struct mlx5_eswitch *esw = ct_priv->esw; + struct mlx5_flow_spec spec = {}; ++ u32 tupleid = 1; + int err; + + zone_rule->nat = nat; + +- err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, nat); ++ /* Get tuple unique id */ ++ err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid, ++ TUPLE_ID_MAX, GFP_KERNEL); + if (err) { +- ct_dbg("Failed to create ct entry mod hdr"); ++ netdev_warn(ct_priv->netdev, ++ "Failed to allocate tuple id, err: %d\n", err); + return err; + } ++ zone_rule->tupleid = tupleid; ++ ++ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, ++ tupleid, nat); ++ if (err) { ++ ct_dbg("Failed to create ct entry mod hdr"); ++ goto err_mod_hdr; ++ } + + attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | +@@ -511,6 +535,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + + err_rule: + mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); ++err_mod_hdr: ++ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); + return err; + } + +@@ -573,6 +599,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, + entry->zone = ft->zone; + entry->flow_rule = flow_rule; + entry->cookie = flow->cookie; ++ entry->restore_cookie = meta_action->ct_metadata.cookie; + + err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry); + if (err) +@@ -1188,6 +1215,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) + } + + idr_init(&ct_priv->fte_ids); ++ idr_init(&ct_priv->tuple_ids); + mutex_init(&ct_priv->control_lock); + rhashtable_init(&ct_priv->zone_ht, &zone_params); + +@@ -1222,8 +1250,31 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) + + rhashtable_destroy(&ct_priv->zone_ht); + mutex_destroy(&ct_priv->control_lock); ++ idr_destroy(&ct_priv->tuple_ids); + idr_destroy(&ct_priv->fte_ids); + kfree(ct_priv); + + uplink_priv->ct_priv = NULL; + } ++ ++bool ++mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, ++ struct sk_buff *skb, u32 tupleid) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv; ++ struct mlx5_ct_zone_rule *zone_rule; ++ struct mlx5_ct_entry *entry; ++ ++ if (!ct_priv || !tupleid) ++ return true; ++ ++ zone_rule = idr_find(&ct_priv->tuple_ids, tupleid); ++ if (!zone_rule) ++ return false; ++ ++ entry = container_of(zone_rule, struct mlx5_ct_entry, ++ zone_rules[zone_rule->nat]); ++ tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie); ++ ++ return true; ++} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +index f4bfda77f01a..464c86595309 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +@@ -64,6 +64,17 @@ struct mlx5_ct_attr { + misc_parameters_2.metadata_reg_c_5),\ + } + ++#define tupleid_to_reg_ct {\ ++ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\ ++ .moffset = 0,\ ++ .mlen = 3,\ ++ .soffset = MLX5_BYTE_OFF(fte_match_param,\ ++ misc_parameters_2.metadata_reg_c_1),\ ++} ++ ++#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8) ++#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0) ++ + #if IS_ENABLED(CONFIG_MLX5_TC_CT) + + int +@@ -92,6 +103,10 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_esw_flow_attr *attr); + ++bool ++mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, ++ struct sk_buff *skb, u32 tupleid); ++ + #else /* CONFIG_MLX5_TC_CT */ + + static inline int +@@ -139,5 +154,15 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, + { + } + ++static inline bool ++mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, ++ struct sk_buff *skb, u32 tupleid) ++{ ++ if (!tupleid) ++ return true; ++ ++ return false; ++} ++ + #endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */ + #endif /* __MLX5_EN_TC_CT_H__ */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 3e15cb4992a6..f88b199487d2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -200,6 +200,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { + [MARK_TO_REG] = mark_to_reg_ct, + [LABELS_TO_REG] = labels_to_reg_ct, + [FTEID_TO_REG] = fteid_to_reg_ct, ++ [TUPLEID_TO_REG] = tupleid_to_reg_ct, + }; + + static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); +@@ -4846,7 +4847,9 @@ bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, + struct mlx5e_tc_update_priv *tc_priv) + { + #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +- u32 chain = 0, reg_c0, reg_c1, tunnel_id; ++ u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id; ++ struct mlx5_rep_uplink_priv *uplink_priv; ++ struct mlx5e_rep_priv *uplink_rpriv; + struct tc_skb_ext *tc_skb_ext; + struct mlx5_eswitch *esw; + struct mlx5e_priv *priv; +@@ -4880,6 +4883,13 @@ bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe, + } + + tc_skb_ext->chain = chain; ++ ++ tuple_id = reg_c1 & TUPLE_ID_MAX; ++ ++ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); ++ uplink_priv = &uplink_rpriv->uplink_priv; ++ if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id)) ++ return false; + } + + tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +index 31c9e81b9287..abdcfa4c4e0e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +@@ -99,6 +99,7 @@ enum mlx5e_tc_attr_to_reg { + MARK_TO_REG, + LABELS_TO_REG, + FTEID_TO_REG, ++ TUPLEID_TO_REG, + }; + + struct mlx5e_tc_attr_to_reg_mapping { +-- +2.13.6 + diff --git a/SOURCES/0246-netdrv-net-mlx5e-CT-Support-clear-action.patch b/SOURCES/0246-netdrv-net-mlx5e-CT-Support-clear-action.patch new file mode 100644 index 0000000..f760f36 --- /dev/null +++ b/SOURCES/0246-netdrv-net-mlx5e-CT-Support-clear-action.patch @@ -0,0 +1,220 @@ +From 0adbd26f7bfb53e99b9726e4c4ba399ce91fb7b5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:08 -0400 +Subject: [PATCH 246/312] [netdrv] net/mlx5e: CT: Support clear action + +Message-id: <20200519074934.6303-38-ahleihel@redhat.com> +Patchwork-id: 310539 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 37/63] net/mlx5e: CT: Support clear action +Bugzilla: 1790219 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Bugzilla: http://bugzilla.redhat.com/1790219 +Upstream: v5.7-rc1 + +commit 1ef3018f5af3da6376fae546e4dfc3f05f063815 +Author: Paul Blakey +Date: Thu Mar 12 12:23:17 2020 +0200 + + net/mlx5e: CT: Support clear action + + Clear action, as with software, removes all ct metadata from + the packet. + + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 90 ++++++++++++++++++++-- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h | 7 +- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 10 ++- + 3 files changed, 95 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index c75dc97fd3a7..956d9ddcdeed 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -1048,12 +1048,79 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + return err; + } + ++static int ++__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, ++ struct mlx5e_tc_flow *flow, ++ struct mlx5_flow_spec *orig_spec, ++ struct mlx5_esw_flow_attr *attr, ++ struct mlx5e_tc_mod_hdr_acts *mod_acts, ++ struct mlx5_flow_handle **flow_rule) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); ++ struct mlx5_eswitch *esw = ct_priv->esw; ++ struct mlx5_esw_flow_attr *pre_ct_attr; ++ struct mlx5_modify_hdr *mod_hdr; ++ struct mlx5_flow_handle *rule; ++ struct mlx5_ct_flow *ct_flow; ++ int err; ++ ++ ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); ++ if (!ct_flow) ++ return -ENOMEM; ++ ++ /* Base esw attributes on original rule attribute */ ++ pre_ct_attr = &ct_flow->pre_ct_attr; ++ memcpy(pre_ct_attr, attr, sizeof(*attr)); ++ ++ err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0); ++ if (err) { ++ ct_dbg("Failed to set register for ct clear"); ++ goto err_set_registers; ++ } ++ ++ mod_hdr = mlx5_modify_header_alloc(esw->dev, ++ MLX5_FLOW_NAMESPACE_FDB, ++ mod_acts->num_actions, ++ mod_acts->actions); ++ if (IS_ERR(mod_hdr)) { ++ err = PTR_ERR(mod_hdr); ++ ct_dbg("Failed to add create ct clear mod hdr"); ++ goto err_set_registers; ++ } ++ ++ dealloc_mod_hdr_actions(mod_acts); ++ pre_ct_attr->modify_hdr = mod_hdr; ++ pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; ++ ++ rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr); ++ if (IS_ERR(rule)) { ++ err = PTR_ERR(rule); ++ ct_dbg("Failed to add ct clear rule"); ++ goto err_insert; ++ } ++ ++ attr->ct_attr.ct_flow = ct_flow; ++ ct_flow->pre_ct_rule = rule; ++ *flow_rule = rule; ++ ++ return 0; ++ ++err_insert: ++ mlx5_modify_header_dealloc(priv->mdev, mod_hdr); ++err_set_registers: ++ netdev_warn(priv->netdev, ++ "Failed to offload ct clear flow, err %d\n", err); ++ return err; ++} ++ + struct mlx5_flow_handle * + mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, +- struct mlx5_esw_flow_attr *attr) ++ struct mlx5_esw_flow_attr *attr, ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) + { ++ bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; + struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); + struct mlx5_flow_handle *rule; + int err; +@@ -1062,7 +1129,12 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + return ERR_PTR(-EOPNOTSUPP); + + mutex_lock(&ct_priv->control_lock); +- err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr, &rule); ++ if (clear_action) ++ err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr, ++ mod_hdr_acts, &rule); ++ else ++ err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr, ++ &rule); + mutex_unlock(&ct_priv->control_lock); + if (err) + return ERR_PTR(err); +@@ -1080,11 +1152,15 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv, + mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule, + pre_ct_attr); + mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr); +- mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule, +- &ct_flow->post_ct_attr); +- mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping); +- idr_remove(&ct_priv->fte_ids, ct_flow->fte_id); +- mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); ++ ++ if (ct_flow->post_ct_rule) { ++ mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule, ++ &ct_flow->post_ct_attr); ++ mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping); ++ idr_remove(&ct_priv->fte_ids, ct_flow->fte_id); ++ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); ++ } ++ + kfree(ct_flow); + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +index 464c86595309..6b2c893372da 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +@@ -9,6 +9,7 @@ + #include + + struct mlx5_esw_flow_attr; ++struct mlx5e_tc_mod_hdr_acts; + struct mlx5_rep_uplink_priv; + struct mlx5e_tc_flow; + struct mlx5e_priv; +@@ -97,7 +98,8 @@ struct mlx5_flow_handle * + mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, +- struct mlx5_esw_flow_attr *attr); ++ struct mlx5_esw_flow_attr *attr, ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); + void + mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, +@@ -142,7 +144,8 @@ static inline struct mlx5_flow_handle * + mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, +- struct mlx5_esw_flow_attr *attr) ++ struct mlx5_esw_flow_attr *attr, ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) + { + return ERR_PTR(-EOPNOTSUPP); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index f88b199487d2..48e4d7a5f7b8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1151,11 +1151,15 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) + { ++ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; + struct mlx5_flow_handle *rule; +- struct mlx5e_tc_mod_hdr_acts; + +- if (flow_flag_test(flow, CT)) +- return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr); ++ if (flow_flag_test(flow, CT)) { ++ mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; ++ ++ return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr, ++ mod_hdr_acts); ++ } + + rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); + if (IS_ERR(rule)) +-- +2.13.6 + diff --git a/SOURCES/0247-netdrv-net-mlx5e-CT-Fix-stack-usage-compiler-warning.patch b/SOURCES/0247-netdrv-net-mlx5e-CT-Fix-stack-usage-compiler-warning.patch new file mode 100644 index 0000000..f6c3906 --- /dev/null +++ b/SOURCES/0247-netdrv-net-mlx5e-CT-Fix-stack-usage-compiler-warning.patch @@ -0,0 +1,174 @@ +From cb2edef541ca9dba7f68efc459869b5d47665dd6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:09 -0400 +Subject: [PATCH 247/312] [netdrv] net/mlx5e: CT: Fix stack usage compiler + warning +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Message-id: <20200519074934.6303-39-ahleihel@redhat.com> +Patchwork-id: 310541 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 38/63] net/mlx5e: CT: Fix stack usage compiler warning +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit aded104d397c22a18b9040f5641309f426d6df27 +Author: Saeed Mahameed +Date: Mon Mar 16 15:47:03 2020 -0700 + + net/mlx5e: CT: Fix stack usage compiler warning + + Fix the following warnings: [-Werror=frame-larger-than=] + + In function ‘mlx5_tc_ct_entry_add_rule’: + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c:541:1: + error: the frame size of 1136 bytes is larger than 1024 bytes + + In function ‘__mlx5_tc_ct_flow_offload’: + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c:1049:1: + error: the frame size of 1168 bytes is larger than 1024 bytes + + Fixes: 4c3844d9e97e ("net/mlx5e: CT: Introduce connection tracking") + Signed-off-by: Saeed Mahameed + Reviewed-by: Paul Blakey + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 31 +++++++++++++++------- + 1 file changed, 22 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 956d9ddcdeed..cb43b53bc235 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -484,19 +484,23 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; + struct mlx5_esw_flow_attr *attr = &zone_rule->attr; + struct mlx5_eswitch *esw = ct_priv->esw; +- struct mlx5_flow_spec spec = {}; ++ struct mlx5_flow_spec *spec = NULL; + u32 tupleid = 1; + int err; + + zone_rule->nat = nat; + ++ spec = kzalloc(sizeof(*spec), GFP_KERNEL); ++ if (!spec) ++ return -ENOMEM; ++ + /* Get tuple unique id */ + err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid, + TUPLE_ID_MAX, GFP_KERNEL); + if (err) { + netdev_warn(ct_priv->netdev, + "Failed to allocate tuple id, err: %d\n", err); +- return err; ++ goto err_idr_alloc; + } + zone_rule->tupleid = tupleid; + +@@ -517,18 +521,19 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + attr->counter = entry->counter; + attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT; + +- mlx5_tc_ct_set_tuple_match(&spec, flow_rule); +- mlx5e_tc_match_to_reg_match(&spec, ZONE_TO_REG, ++ mlx5_tc_ct_set_tuple_match(spec, flow_rule); ++ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, + entry->zone & MLX5_CT_ZONE_MASK, + MLX5_CT_ZONE_MASK); + +- zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, &spec, attr); ++ zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); + if (IS_ERR(zone_rule->rule)) { + err = PTR_ERR(zone_rule->rule); + ct_dbg("Failed to add ct entry rule, nat: %d", nat); + goto err_rule; + } + ++ kfree(spec); + ct_dbg("Offloaded ct entry rule in zone %d", entry->zone); + + return 0; +@@ -537,6 +542,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); + err_mod_hdr: + idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); ++err_idr_alloc: ++ kfree(spec); + return err; + } + +@@ -885,8 +892,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); + bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; + struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {}; ++ struct mlx5_flow_spec *post_ct_spec = NULL; + struct mlx5_eswitch *esw = ct_priv->esw; +- struct mlx5_flow_spec post_ct_spec = {}; + struct mlx5_esw_flow_attr *pre_ct_attr; + struct mlx5_modify_hdr *mod_hdr; + struct mlx5_flow_handle *rule; +@@ -895,9 +902,13 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + struct mlx5_ct_ft *ft; + u32 fte_id = 1; + ++ post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL); + ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); +- if (!ct_flow) ++ if (!post_ct_spec || !ct_flow) { ++ kfree(post_ct_spec); ++ kfree(ct_flow); + return -ENOMEM; ++ } + + /* Register for CT established events */ + ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone, +@@ -992,7 +1003,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + /* Post ct rule matches on fte_id and executes original rule's + * tc rule action + */ +- mlx5e_tc_match_to_reg_match(&post_ct_spec, FTEID_TO_REG, ++ mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG, + fte_id, MLX5_FTE_ID_MASK); + + /* Put post_ct rule on post_ct fdb */ +@@ -1003,7 +1014,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE; + ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE; + ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); +- rule = mlx5_eswitch_add_offloaded_rule(esw, &post_ct_spec, ++ rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec, + &ct_flow->post_ct_attr); + ct_flow->post_ct_rule = rule; + if (IS_ERR(ct_flow->post_ct_rule)) { +@@ -1027,6 +1038,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + attr->ct_attr.ct_flow = ct_flow; + *flow_rule = ct_flow->post_ct_rule; + dealloc_mod_hdr_actions(&pre_mod_acts); ++ kfree(post_ct_spec); + + return 0; + +@@ -1043,6 +1055,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + err_idr: + mlx5_tc_ct_del_ft_cb(ct_priv, ft); + err_ft: ++ kfree(post_ct_spec); + kfree(ct_flow); + netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); + return err; +-- +2.13.6 + diff --git a/SOURCES/0248-netdrv-net-mlx5e-CT-Use-rhashtable-s-ct-entries-inst.patch b/SOURCES/0248-netdrv-net-mlx5e-CT-Use-rhashtable-s-ct-entries-inst.patch new file mode 100644 index 0000000..5eaf0c4 --- /dev/null +++ b/SOURCES/0248-netdrv-net-mlx5e-CT-Use-rhashtable-s-ct-entries-inst.patch @@ -0,0 +1,125 @@ +From 38789d72ea1a8009cd52229695b56e6749a775db Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:10 -0400 +Subject: [PATCH 248/312] [netdrv] net/mlx5e: CT: Use rhashtable's ct entries + instead of a separate list + +Message-id: <20200519074934.6303-40-ahleihel@redhat.com> +Patchwork-id: 310551 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 39/63] net/mlx5e: CT: Use rhashtable's ct entries instead of a separate list +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc2 + +commit 9808dd0a2aeebcb72239a3b082159b0186d9ac3d +Author: Paul Blakey +Date: Fri Mar 27 12:12:31 2020 +0300 + + net/mlx5e: CT: Use rhashtable's ct entries instead of a separate list + + Fixes CT entries list corruption. + + After allowing parallel insertion/removals in upper nf flow table + layer, unprotected ct entries list can be corrupted by parallel add/del + on the same flow table. + + CT entries list is only used while freeing a ct zone flow table to + go over all the ct entries offloaded on that zone/table, and flush + the table. + + As rhashtable already provides an api to go over all the inserted entries, + fix the race by using the rhashtable iteration instead, and remove the list. + + Fixes: 7da182a998d6 ("netfilter: flowtable: Use work entry per offload command") + Reviewed-by: Oz Shlomo + Signed-off-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 19 +++++++------------ + 1 file changed, 7 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index cb43b53bc235..14b29d0d5092 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -67,11 +67,9 @@ struct mlx5_ct_ft { + struct nf_flowtable *nf_ft; + struct mlx5_tc_ct_priv *ct_priv; + struct rhashtable ct_entries_ht; +- struct list_head ct_entries_list; + }; + + struct mlx5_ct_entry { +- struct list_head list; + u16 zone; + struct rhash_head node; + struct flow_rule *flow_rule; +@@ -617,8 +615,6 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, + if (err) + goto err_insert; + +- list_add(&entry->list, &ft->ct_entries_list); +- + return 0; + + err_insert: +@@ -646,7 +642,6 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, + WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht, + &entry->node, + cts_ht_params)); +- list_del(&entry->list); + kfree(entry); + + return 0; +@@ -818,7 +813,6 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, + ft->zone = zone; + ft->nf_ft = nf_ft; + ft->ct_priv = ct_priv; +- INIT_LIST_HEAD(&ft->ct_entries_list); + refcount_set(&ft->refcount, 1); + + err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params); +@@ -847,12 +841,12 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, + } + + static void +-mlx5_tc_ct_flush_ft(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ++mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) + { +- struct mlx5_ct_entry *entry; ++ struct mlx5_tc_ct_priv *ct_priv = arg; ++ struct mlx5_ct_entry *entry = ptr; + +- list_for_each_entry(entry, &ft->ct_entries_list, list) +- mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry); ++ mlx5_tc_ct_entry_del_rules(ct_priv, entry); + } + + static void +@@ -863,9 +857,10 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) + + nf_flow_table_offload_del_cb(ft->nf_ft, + mlx5_tc_ct_block_flow_offload, ft); +- mlx5_tc_ct_flush_ft(ct_priv, ft); + rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params); +- rhashtable_destroy(&ft->ct_entries_ht); ++ rhashtable_free_and_destroy(&ft->ct_entries_ht, ++ mlx5_tc_ct_flush_ft_entry, ++ ct_priv); + kfree(ft); + } + +-- +2.13.6 + diff --git a/SOURCES/0249-netdrv-net-mlx5-CT-Change-idr-to-xarray-to-protect-p.patch b/SOURCES/0249-netdrv-net-mlx5-CT-Change-idr-to-xarray-to-protect-p.patch new file mode 100644 index 0000000..e788e76 --- /dev/null +++ b/SOURCES/0249-netdrv-net-mlx5-CT-Change-idr-to-xarray-to-protect-p.patch @@ -0,0 +1,175 @@ +From e3cb19855b89bc59087b8a0a26e60fd85a5b1ba9 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:11 -0400 +Subject: [PATCH 249/312] [netdrv] net/mlx5: CT: Change idr to xarray to + protect parallel tuple id allocation + +Message-id: <20200519074934.6303-41-ahleihel@redhat.com> +Patchwork-id: 310542 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 40/63] net/mlx5: CT: Change idr to xarray to protect parallel tuple id allocation +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc3 + +commit 70840b66da4d3b9a8962905d9111a53ee628beb3 +Author: Paul Blakey +Date: Mon Apr 6 15:47:52 2020 +0300 + + net/mlx5: CT: Change idr to xarray to protect parallel tuple id allocation + + After allowing parallel tuple insertion, we get the following trace: + + [ 5505.142249] ------------[ cut here ]------------ + [ 5505.148155] WARNING: CPU: 21 PID: 13313 at lib/radix-tree.c:581 delete_node+0x16c/0x180 + [ 5505.295553] CPU: 21 PID: 13313 Comm: kworker/u50:22 Tainted: G OE 5.6.0+ #78 + [ 5505.304824] Hardware name: Supermicro Super Server/X10DRT-P, BIOS 2.0b 03/30/2017 + [ 5505.313740] Workqueue: nf_flow_table_offload flow_offload_work_handler [nf_flow_table] + [ 5505.323257] RIP: 0010:delete_node+0x16c/0x180 + [ 5505.349862] RSP: 0018:ffffb19184eb7b30 EFLAGS: 00010282 + [ 5505.356785] RAX: 0000000000000000 RBX: ffff904ac95b86d8 RCX: ffff904b6f938838 + [ 5505.365190] RDX: 0000000000000000 RSI: ffff904ac954b908 RDI: ffff904ac954b920 + [ 5505.373628] RBP: ffff904b4ac13060 R08: 0000000000000001 R09: 0000000000000000 + [ 5505.382155] R10: 0000000000000000 R11: 0000000000000040 R12: 0000000000000000 + [ 5505.390527] R13: ffffb19184eb7bfc R14: ffff904b6bef5800 R15: ffff90482c1203c0 + [ 5505.399246] FS: 0000000000000000(0000) GS:ffff904c2fc80000(0000) knlGS:0000000000000000 + [ 5505.408621] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + [ 5505.415739] CR2: 00007f5d27006010 CR3: 0000000058c10006 CR4: 00000000001626e0 + [ 5505.424547] Call Trace: + [ 5505.428429] idr_alloc_u32+0x7b/0xc0 + [ 5505.433803] mlx5_tc_ct_entry_add_rule+0xbf/0x950 [mlx5_core] + [ 5505.441354] ? mlx5_fc_create+0x23c/0x370 [mlx5_core] + [ 5505.448225] mlx5_tc_ct_block_flow_offload+0x874/0x10b0 [mlx5_core] + [ 5505.456278] ? mlx5_tc_ct_block_flow_offload+0x63d/0x10b0 [mlx5_core] + [ 5505.464532] nf_flow_offload_tuple.isra.21+0xc5/0x140 [nf_flow_table] + [ 5505.472286] ? __kmalloc+0x217/0x2f0 + [ 5505.477093] ? flow_rule_alloc+0x1c/0x30 + [ 5505.482117] flow_offload_work_handler+0x1d0/0x290 [nf_flow_table] + [ 5505.489674] ? process_one_work+0x17c/0x580 + [ 5505.494922] process_one_work+0x202/0x580 + [ 5505.500082] ? process_one_work+0x17c/0x580 + [ 5505.505696] worker_thread+0x4c/0x3f0 + [ 5505.510458] kthread+0x103/0x140 + [ 5505.514989] ? process_one_work+0x580/0x580 + [ 5505.520616] ? kthread_bind+0x10/0x10 + [ 5505.525837] ret_from_fork+0x3a/0x50 + [ 5505.570841] ---[ end trace 07995de9c56d6831 ]--- + + This happens from parallel deletes/adds to idr, as idr isn't protected. + Fix that by using xarray as the tuple_ids allocator instead of idr. + + Fixes: 7da182a998d6 ("netfilter: flowtable: Use work entry per offload command") + Reviewed-by: Roi Dayan + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 23 +++++++++++----------- + 1 file changed, 12 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 14b29d0d5092..46eba4d60db1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + #include "en/tc_ct.h" + #include "en.h" +@@ -35,7 +36,7 @@ struct mlx5_tc_ct_priv { + struct mlx5_eswitch *esw; + const struct net_device *netdev; + struct idr fte_ids; +- struct idr tuple_ids; ++ struct xarray tuple_ids; + struct rhashtable zone_ht; + struct mlx5_flow_table *ct; + struct mlx5_flow_table *ct_nat; +@@ -238,7 +239,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, + + mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); + mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); +- idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); ++ xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid); + } + + static void +@@ -483,7 +484,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5_esw_flow_attr *attr = &zone_rule->attr; + struct mlx5_eswitch *esw = ct_priv->esw; + struct mlx5_flow_spec *spec = NULL; +- u32 tupleid = 1; ++ u32 tupleid; + int err; + + zone_rule->nat = nat; +@@ -493,12 +494,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + return -ENOMEM; + + /* Get tuple unique id */ +- err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid, +- TUPLE_ID_MAX, GFP_KERNEL); ++ err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule, ++ XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL); + if (err) { + netdev_warn(ct_priv->netdev, + "Failed to allocate tuple id, err: %d\n", err); +- goto err_idr_alloc; ++ goto err_xa_alloc; + } + zone_rule->tupleid = tupleid; + +@@ -539,8 +540,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, + err_rule: + mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); + err_mod_hdr: +- idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid); +-err_idr_alloc: ++ xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid); ++err_xa_alloc: + kfree(spec); + return err; + } +@@ -1299,7 +1300,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) + } + + idr_init(&ct_priv->fte_ids); +- idr_init(&ct_priv->tuple_ids); ++ xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1); + mutex_init(&ct_priv->control_lock); + rhashtable_init(&ct_priv->zone_ht, &zone_params); + +@@ -1334,7 +1335,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) + + rhashtable_destroy(&ct_priv->zone_ht); + mutex_destroy(&ct_priv->control_lock); +- idr_destroy(&ct_priv->tuple_ids); ++ xa_destroy(&ct_priv->tuple_ids); + idr_destroy(&ct_priv->fte_ids); + kfree(ct_priv); + +@@ -1352,7 +1353,7 @@ mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, + if (!ct_priv || !tupleid) + return true; + +- zone_rule = idr_find(&ct_priv->tuple_ids, tupleid); ++ zone_rule = xa_load(&ct_priv->tuple_ids, tupleid); + if (!zone_rule) + return false; + +-- +2.13.6 + diff --git a/SOURCES/0250-netdrv-net-mlx5-E-switch-Fix-mutex-init-order.patch b/SOURCES/0250-netdrv-net-mlx5-E-switch-Fix-mutex-init-order.patch new file mode 100644 index 0000000..6762bfe --- /dev/null +++ b/SOURCES/0250-netdrv-net-mlx5-E-switch-Fix-mutex-init-order.patch @@ -0,0 +1,114 @@ +From 408a7bc962940848151942ebf8ee76c0f150d893 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:12 -0400 +Subject: [PATCH 250/312] [netdrv] net/mlx5: E-switch, Fix mutex init order + +Message-id: <20200519074934.6303-42-ahleihel@redhat.com> +Patchwork-id: 310554 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 41/63] net/mlx5: E-switch, Fix mutex init order +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc5 + +commit f8d1eddaf94abdc459ccfb881aa7233cb9f7f39a +Author: Parav Pandit +Date: Tue Apr 21 05:36:07 2020 -0500 + + net/mlx5: E-switch, Fix mutex init order + + In cited patch mutex is initialized after its used. + Below call trace is observed. + Fix the order to initialize the mutex early enough. + Similarly follow mirror sequence during cleanup. + + kernel: DEBUG_LOCKS_WARN_ON(lock->magic != lock) + kernel: WARNING: CPU: 5 PID: 45916 at kernel/locking/mutex.c:938 + __mutex_lock+0x7d6/0x8a0 + kernel: Call Trace: + kernel: ? esw_vport_tbl_get+0x3b/0x250 [mlx5_core] + kernel: ? mark_held_locks+0x55/0x70 + kernel: ? __slab_free+0x274/0x400 + kernel: ? lockdep_hardirqs_on+0x140/0x1d0 + kernel: esw_vport_tbl_get+0x3b/0x250 [mlx5_core] + kernel: ? mlx5_esw_chains_create_fdb_prio+0xa57/0xc20 [mlx5_core] + kernel: mlx5_esw_vport_tbl_get+0x88/0xf0 [mlx5_core] + kernel: mlx5_esw_chains_create+0x2f3/0x3e0 [mlx5_core] + kernel: esw_create_offloads_fdb_tables+0x11d/0x580 [mlx5_core] + kernel: esw_offloads_enable+0x26d/0x540 [mlx5_core] + kernel: mlx5_eswitch_enable_locked+0x155/0x860 [mlx5_core] + kernel: mlx5_devlink_eswitch_mode_set+0x1af/0x320 [mlx5_core] + kernel: devlink_nl_cmd_eswitch_set_doit+0x41/0xb0 + + Fixes: 96e326878fa5 ("net/mlx5e: Eswitch, Use per vport tables for mirroring") + Signed-off-by: Parav Pandit + Reviewed-by: Roi Dayan + Reviewed-by: Eli Cohen + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 3a6434ba2a58..f6fd7df0e864 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -2292,10 +2292,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); + + memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); ++ mutex_init(&esw->fdb_table.offloads.vports.lock); ++ hash_init(esw->fdb_table.offloads.vports.table); + + err = esw_create_uplink_offloads_acl_tables(esw); + if (err) +- return err; ++ goto create_acl_err; + + err = esw_create_offloads_table(esw, total_vports); + if (err) +@@ -2313,9 +2315,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + if (err) + goto create_fg_err; + +- mutex_init(&esw->fdb_table.offloads.vports.lock); +- hash_init(esw->fdb_table.offloads.vports.table); +- + return 0; + + create_fg_err: +@@ -2326,18 +2325,19 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) + esw_destroy_offloads_table(esw); + create_offloads_err: + esw_destroy_uplink_offloads_acl_tables(esw); +- ++create_acl_err: ++ mutex_destroy(&esw->fdb_table.offloads.vports.lock); + return err; + } + + static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) + { +- mutex_destroy(&esw->fdb_table.offloads.vports.lock); + esw_destroy_vport_rx_group(esw); + esw_destroy_offloads_fdb_tables(esw); + esw_destroy_restore_table(esw); + esw_destroy_offloads_table(esw); + esw_destroy_uplink_offloads_acl_tables(esw); ++ mutex_destroy(&esw->fdb_table.offloads.vports.lock); + } + + static void +-- +2.13.6 + diff --git a/SOURCES/0251-netdrv-net-mlx5-E-Switch-free-flow_group_in-after-cr.patch b/SOURCES/0251-netdrv-net-mlx5-E-Switch-free-flow_group_in-after-cr.patch new file mode 100644 index 0000000..0051b11 --- /dev/null +++ b/SOURCES/0251-netdrv-net-mlx5-E-Switch-free-flow_group_in-after-cr.patch @@ -0,0 +1,56 @@ +From ab1a12fa3425da6bc263127011ca7c09c2da19e7 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:13 -0400 +Subject: [PATCH 251/312] [netdrv] net/mlx5: E-Switch, free flow_group_in after + creating the restore table + +Message-id: <20200519074934.6303-43-ahleihel@redhat.com> +Patchwork-id: 310540 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 42/63] net/mlx5: E-Switch, free flow_group_in after creating the restore table +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit c8508713c71c21f5a16469dcc75ffb4381fbfeb4 +Author: Roi Dayan +Date: Thu Mar 19 17:48:18 2020 +0200 + + net/mlx5: E-Switch, free flow_group_in after creating the restore table + + We allocate a temporary memory but forget to free it. + + Fixes: 11b717d61526 ("net/mlx5: E-Switch, Get reg_c0 value on CQE") + Signed-off-by: Roi Dayan + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index f6fd7df0e864..02340328f11b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1514,6 +1514,8 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) + esw->offloads.restore_group = g; + esw->offloads.restore_copy_hdr_id = mod_hdr; + ++ kvfree(flow_group_in); ++ + return 0; + + err_mod_hdr: +-- +2.13.6 + diff --git a/SOURCES/0252-netdrv-net-mlx5-E-Switch-Enable-restore-table-only-i.patch b/SOURCES/0252-netdrv-net-mlx5-E-Switch-Enable-restore-table-only-i.patch new file mode 100644 index 0000000..f03fc12 --- /dev/null +++ b/SOURCES/0252-netdrv-net-mlx5-E-Switch-Enable-restore-table-only-i.patch @@ -0,0 +1,83 @@ +From 3277bd11a6f6e94fef4fa83f1d7e4da1d3f66a8a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:14 -0400 +Subject: [PATCH 252/312] [netdrv] net/mlx5: E-Switch, Enable restore table + only if reg_c1 is supported + +Message-id: <20200519074934.6303-44-ahleihel@redhat.com> +Patchwork-id: 310545 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 43/63] net/mlx5: E-Switch, Enable restore table only if reg_c1 is supported +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 60acc105cbc23c525ddb6fed595cac4796c0040b +Author: Paul Blakey +Date: Wed Mar 18 10:55:12 2020 +0200 + + net/mlx5: E-Switch, Enable restore table only if reg_c1 is supported + + Reg c0/c1 matching, rewrite of regs c0/c1, and copy header of regs c1,B + is needed for the restore table to function, might not be supported by + firmware, and creation of the restore table or the copy header will + fail. + + Check reg_c1 loopback support, as firmware which supports this, + should have all of the above. + + Fixes: 11b717d61526 ("net/mlx5: E-Switch, Get reg_c0 value on CQE") + Signed-off-by: Paul Blakey + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 02340328f11b..0c49033a3e73 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1061,6 +1061,9 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) + struct mlx5_flow_spec *spec; + void *misc; + ++ if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) ++ return ERR_PTR(-EOPNOTSUPP); ++ + spec = kzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); +@@ -1434,6 +1437,9 @@ static void esw_destroy_restore_table(struct mlx5_eswitch *esw) + { + struct mlx5_esw_offload *offloads = &esw->offloads; + ++ if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) ++ return; ++ + mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id); + mlx5_destroy_flow_group(offloads->restore_group); + mlx5_destroy_flow_table(offloads->ft_offloads_restore); +@@ -1453,6 +1459,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) + u32 *flow_group_in; + int err = 0; + ++ if (!mlx5_eswitch_reg_c1_loopback_supported(esw)) ++ return 0; ++ + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); + if (!ns) { + esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); +-- +2.13.6 + diff --git a/SOURCES/0253-netdrv-net-mlx5-Add-missing-inline-to-stub-esw_add_r.patch b/SOURCES/0253-netdrv-net-mlx5-Add-missing-inline-to-stub-esw_add_r.patch new file mode 100644 index 0000000..aa353e0 --- /dev/null +++ b/SOURCES/0253-netdrv-net-mlx5-Add-missing-inline-to-stub-esw_add_r.patch @@ -0,0 +1,65 @@ +From bff2d3c5ff056e1fec3983f032b93f35ac4d8560 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:15 -0400 +Subject: [PATCH 253/312] [netdrv] net/mlx5: Add missing inline to stub + esw_add_restore_rule + +Message-id: <20200519074934.6303-45-ahleihel@redhat.com> +Patchwork-id: 310548 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 44/63] net/mlx5: Add missing inline to stub esw_add_restore_rule +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 9d3faa51bef05d21e908f2c1bd932a5cfac59b63 +Author: Nathan Chancellor +Date: Fri Mar 13 20:40:20 2020 -0700 + + net/mlx5: Add missing inline to stub esw_add_restore_rule + + When CONFIG_MLX5_ESWITCH is unset, clang warns: + + In file included from drivers/net/ethernet/mellanox/mlx5/core/main.c:58: + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h:670:1: warning: unused + function 'esw_add_restore_rule' [-Wunused-function] + esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) + ^ + 1 warning generated. + + This stub function is missing inline; add it to suppress the warning. + + Fixes: 11b717d61526 ("net/mlx5: E-Switch, Get reg_c0 value on CQE") + Signed-off-by: Nathan Chancellor + Reviewed-by: Nick Desaulniers + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 79e4dfa5368d..dafeb2000269 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -711,7 +711,7 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) + + static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} + +-static struct mlx5_flow_handle * ++static inline struct mlx5_flow_handle * + esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) + { + return ERR_PTR(-EOPNOTSUPP); +-- +2.13.6 + diff --git a/SOURCES/0254-netdrv-net-mlx5-E-Switch-Fix-using-fwd-and-modify-wh.patch b/SOURCES/0254-netdrv-net-mlx5-E-Switch-Fix-using-fwd-and-modify-wh.patch new file mode 100644 index 0000000..621f514 --- /dev/null +++ b/SOURCES/0254-netdrv-net-mlx5-E-Switch-Fix-using-fwd-and-modify-wh.patch @@ -0,0 +1,94 @@ +From e6aa12d9737f02f82238814ede2bd3e3865ae78b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:16 -0400 +Subject: [PATCH 254/312] [netdrv] net/mlx5: E-Switch: Fix using fwd and modify + when firmware doesn't support it + +Message-id: <20200519074934.6303-46-ahleihel@redhat.com> +Patchwork-id: 310543 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 45/63] net/mlx5: E-Switch: Fix using fwd and modify when firmware doesn't support it +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 0b3a8b6b53406f2d890cda369aa9d61e30bc4162 +Author: Paul Blakey +Date: Sun Mar 1 16:12:00 2020 +0200 + + net/mlx5: E-Switch: Fix using fwd and modify when firmware doesn't support it + + Currently, if firmware doesn't support fwd and modify, driver fails + initializing eswitch chains while entering switchdev mode. + + Instead, on such cases, disable the chains and prio feature (as we can't + restore the chain on miss) and the usage of fwd and modify. + + Fixes: 8f1e0b97cc70 ("net/mlx5: E-Switch, Mark miss packets with new chain id mapping") + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index 0f9c9aae11bf..a7d88ab35bbf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -23,6 +23,8 @@ + #define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb) + #define fdb_ignore_flow_level_supported(esw) \ + (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level)) ++#define fdb_modify_header_fwd_to_table_supported(esw) \ ++ (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table)) + + /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), + * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated +@@ -106,7 +108,8 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw) + + bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw) + { +- return fdb_ignore_flow_level_supported(esw); ++ return mlx5_esw_chains_prios_supported(esw) && ++ fdb_ignore_flow_level_supported(esw); + } + + u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) +@@ -417,7 +420,8 @@ mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain, + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = next_fdb; + +- if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) { ++ if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw) && ++ fdb_modify_header_fwd_to_table_supported(esw)) { + act.modify_hdr = fdb_chain->miss_modify_hdr; + act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + } +@@ -777,6 +781,13 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) + esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { + esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n"); ++ } else if (!fdb_modify_header_fwd_to_table_supported(esw)) { ++ /* Disabled when ttl workaround is needed, e.g ++ * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig ++ */ ++ esw_warn(dev, ++ "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n"); ++ esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + } else { + esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; + esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n", +-- +2.13.6 + diff --git a/SOURCES/0255-netdrv-net-mlx5e-Fix-rejecting-all-egress-rules-not-.patch b/SOURCES/0255-netdrv-net-mlx5e-Fix-rejecting-all-egress-rules-not-.patch new file mode 100644 index 0000000..a28bbf0 --- /dev/null +++ b/SOURCES/0255-netdrv-net-mlx5e-Fix-rejecting-all-egress-rules-not-.patch @@ -0,0 +1,81 @@ +From 6c4177e30dbf47b63c1b16076e4de28345fba9f6 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:17 -0400 +Subject: [PATCH 255/312] [netdrv] net/mlx5e: Fix rejecting all egress rules + not on vlan + +Message-id: <20200519074934.6303-47-ahleihel@redhat.com> +Patchwork-id: 310561 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 46/63] net/mlx5e: Fix rejecting all egress rules not on vlan +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit d0645b3780954b7133d9a908009d166ae686bd2a +Author: Roi Dayan +Date: Tue Mar 3 11:18:53 2020 +0200 + + net/mlx5e: Fix rejecting all egress rules not on vlan + + The original condition rejected all egress rules that + are not on tunnel device. + Also, the whole point of this egress reject was to disallow bad + rules because of egdev which doesn't exists today, so remove + this check entirely. + + Fixes: 0a7fcb78cc21 ("net/mlx5e: Support inner header rewrite with goto action") + Signed-off-by: Roi Dayan + Reviewed-by: Oz Shlomo + Reviewed-by: Vlad Buslov + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 15 +-------------- + 1 file changed, 1 insertion(+), 14 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 48e4d7a5f7b8..e66a1e970196 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3045,8 +3045,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) + { +- struct net_device *filter_dev = parse_attr->filter_dev; +- bool drop_action, pop_action, ct_flow; ++ bool ct_flow; + u32 actions; + + ct_flow = flow_flag_test(flow, CT); +@@ -3065,18 +3064,6 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + actions = flow->nic_attr->action; + } + +- drop_action = actions & MLX5_FLOW_CONTEXT_ACTION_DROP; +- pop_action = actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; +- +- if (flow_flag_test(flow, EGRESS) && !drop_action) { +- /* We only support filters on tunnel device, or on vlan +- * devices if they have pop/drop action +- */ +- if (!mlx5e_get_tc_tun(filter_dev) || +- (is_vlan_dev(filter_dev) && !pop_action)) +- return false; +- } +- + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) + return modify_header_match_supported(&parse_attr->spec, + flow_action, actions, +-- +2.13.6 + diff --git a/SOURCES/0256-netdrv-net-mlx5-E-switch-Fix-printing-wrong-error-va.patch b/SOURCES/0256-netdrv-net-mlx5-E-switch-Fix-printing-wrong-error-va.patch new file mode 100644 index 0000000..c0ca288 --- /dev/null +++ b/SOURCES/0256-netdrv-net-mlx5-E-switch-Fix-printing-wrong-error-va.patch @@ -0,0 +1,62 @@ +From e4a7cf3c9eae834598d894f2335a408408c03989 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:18 -0400 +Subject: [PATCH 256/312] [netdrv] net/mlx5: E-switch, Fix printing wrong error + value + +Message-id: <20200519074934.6303-48-ahleihel@redhat.com> +Patchwork-id: 310550 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 47/63] net/mlx5: E-switch, Fix printing wrong error value +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc5 + +commit e9864539053ae15c2d6475833f62d7383f9271ce +Author: Parav Pandit +Date: Mon Apr 20 04:32:48 2020 -0500 + + net/mlx5: E-switch, Fix printing wrong error value + + When mlx5_modify_header_alloc() fails, instead of printing the error + value returned, current error log prints 0. + + Fix by printing correct error value returned by + mlx5_modify_header_alloc(). + + Fixes: 6724e66b90ee ("net/mlx5: E-Switch, Get reg_c1 value on miss") + Signed-off-by: Parav Pandit + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 0c49033a3e73..82905834ddb2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1513,9 +1513,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) + MLX5_FLOW_NAMESPACE_KERNEL, 1, + modact); + if (IS_ERR(mod_hdr)) { ++ err = PTR_ERR(mod_hdr); + esw_warn(dev, "Failed to create restore mod header, err: %d\n", + err); +- err = PTR_ERR(mod_hdr); + goto err_mod_hdr; + } + +-- +2.13.6 + diff --git a/SOURCES/0257-netdrv-net-mlx5-E-Switch-Use-correct-type-for-chain-.patch b/SOURCES/0257-netdrv-net-mlx5-E-Switch-Use-correct-type-for-chain-.patch new file mode 100644 index 0000000..0d7338a --- /dev/null +++ b/SOURCES/0257-netdrv-net-mlx5-E-Switch-Use-correct-type-for-chain-.patch @@ -0,0 +1,57 @@ +From 996ee3ea51025a424c908e69ffb74ace7d7caf6d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:19 -0400 +Subject: [PATCH 257/312] [netdrv] net/mlx5: E-Switch, Use correct type for + chain, prio and level values + +Message-id: <20200519074934.6303-49-ahleihel@redhat.com> +Patchwork-id: 310563 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 48/63] net/mlx5: E-Switch, Use correct type for chain, prio and level values +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit d528d4970503edafc23bd43d322a818d74954f7a +Author: Roi Dayan +Date: Mon Mar 23 12:14:58 2020 +0200 + + net/mlx5: E-Switch, Use correct type for chain, prio and level values + + The correct type is u32. + + Fixes: d18296ffd9cc ("net/mlx5: E-Switch, Introduce global tables") + Signed-off-by: Roi Dayan + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +index a7d88ab35bbf..1fe488e1b07e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c +@@ -726,7 +726,8 @@ mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) + struct mlx5_flow_table * + mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw) + { +- int chain, prio, level, err; ++ u32 chain, prio, level; ++ int err; + + if (!fdb_ignore_flow_level_supported(esw)) { + err = -EOPNOTSUPP; +-- +2.13.6 + diff --git a/SOURCES/0258-netdrv-net-mlx5e-CT-Avoid-false-warning-about-rule-m.patch b/SOURCES/0258-netdrv-net-mlx5e-CT-Avoid-false-warning-about-rule-m.patch new file mode 100644 index 0000000..0be18af --- /dev/null +++ b/SOURCES/0258-netdrv-net-mlx5e-CT-Avoid-false-warning-about-rule-m.patch @@ -0,0 +1,55 @@ +From 44904978b56a8f9c6338a778b692222ae0159df1 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:20 -0400 +Subject: [PATCH 258/312] [netdrv] net/mlx5e: CT: Avoid false warning about + rule may be used uninitialized + +Message-id: <20200519074934.6303-50-ahleihel@redhat.com> +Patchwork-id: 310544 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 49/63] net/mlx5e: CT: Avoid false warning about rule may be used uninitialized +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 70a5698a5683cd504b03c6030ee622b1bec3f702 +Author: Roi Dayan +Date: Sun Apr 26 09:52:02 2020 +0300 + + net/mlx5e: CT: Avoid false warning about rule may be used uninitialized + + Avoid gcc warning by preset rule to invalid ptr. + + Fixes: 4c3844d9e97e ("net/mlx5e: CT: Introduce connection tracking") + Signed-off-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 46eba4d60db1..8281dfab5e14 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -1131,7 +1131,7 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + { + bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; + struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); +- struct mlx5_flow_handle *rule; ++ struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL); + int err; + + if (!ct_priv) +-- +2.13.6 + diff --git a/SOURCES/0259-netdrv-net-mlx5e-Fix-actions_match_supported-return.patch b/SOURCES/0259-netdrv-net-mlx5e-Fix-actions_match_supported-return.patch new file mode 100644 index 0000000..eaac731 --- /dev/null +++ b/SOURCES/0259-netdrv-net-mlx5e-Fix-actions_match_supported-return.patch @@ -0,0 +1,58 @@ +From 31794e8c50e21a6560a57e05a9b679691b1238ff Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:21 -0400 +Subject: [PATCH 259/312] [netdrv] net/mlx5e: Fix actions_match_supported() + return + +Message-id: <20200519074934.6303-51-ahleihel@redhat.com> +Patchwork-id: 310546 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 50/63] net/mlx5e: Fix actions_match_supported() return +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 49397b801261160fb5f5d3f28536c792e72ecbb3 +Author: Dan Carpenter +Date: Fri Mar 20 16:23:05 2020 +0300 + + net/mlx5e: Fix actions_match_supported() return + + The actions_match_supported() function returns a bool, true for success + and false for failure. This error path is returning a negative which + is cast to true but it should return false. + + Fixes: 4c3844d9e97e ("net/mlx5e: CT: Introduce connection tracking") + Signed-off-by: Dan Carpenter + Reviewed-by: Leon Romanovsky + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index e66a1e970196..f760231213d1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3058,7 +3058,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + */ + NL_SET_ERR_MSG_MOD(extack, + "Can't offload mirroring with action ct"); +- return -EOPNOTSUPP; ++ return false; + } + } else { + actions = flow->nic_attr->action; +-- +2.13.6 + diff --git a/SOURCES/0260-netdrv-net-mlx5e-CT-Fix-insert-rules-when-TC_CT-conf.patch b/SOURCES/0260-netdrv-net-mlx5e-CT-Fix-insert-rules-when-TC_CT-conf.patch new file mode 100644 index 0000000..c7d018e --- /dev/null +++ b/SOURCES/0260-netdrv-net-mlx5e-CT-Fix-insert-rules-when-TC_CT-conf.patch @@ -0,0 +1,80 @@ +From bd80e8b7fb07dfac14cb968f5938dfb066de355d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:22 -0400 +Subject: [PATCH 260/312] [netdrv] net/mlx5e: CT: Fix insert rules when TC_CT + config isn't enabled + +Message-id: <20200519074934.6303-52-ahleihel@redhat.com> +Patchwork-id: 310552 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 51/63] net/mlx5e: CT: Fix insert rules when TC_CT config isn't enabled +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 3cfc4332edbc0e96af99be9c1cadf10c8b7113fb +Author: Paul Blakey +Date: Sun Mar 15 15:18:47 2020 +0200 + + net/mlx5e: CT: Fix insert rules when TC_CT config isn't enabled + + If CONFIG_MLX5_TC_CT isn't enabled, all offloading of eswitch tc rules + fails on parsing ct match, even if there is no ct match. + + Return success if there is no ct match, regardless of config. + + Fixes: 4c3844d9e97e ("net/mlx5e: CT: Introduce connection tracking") + Signed-off-by: Paul Blakey + Reviewed-by: Oz Shlomo + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +index 6b2c893372da..091d305b633e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +@@ -8,6 +8,8 @@ + #include + #include + ++#include "en.h" ++ + struct mlx5_esw_flow_attr; + struct mlx5e_tc_mod_hdr_acts; + struct mlx5_rep_uplink_priv; +@@ -128,6 +130,11 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + struct flow_cls_offload *f, + struct netlink_ext_ack *extack) + { ++ if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) ++ return 0; ++ ++ NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled."); ++ netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n"); + return -EOPNOTSUPP; + } + +@@ -137,6 +144,8 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) + { ++ NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled."); ++ netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n"); + return -EOPNOTSUPP; + } + +-- +2.13.6 + diff --git a/SOURCES/0261-netdrv-net-mlx5e-CT-remove-set-but-not-used-variable.patch b/SOURCES/0261-netdrv-net-mlx5e-CT-remove-set-but-not-used-variable.patch new file mode 100644 index 0000000..bc60a55 --- /dev/null +++ b/SOURCES/0261-netdrv-net-mlx5e-CT-remove-set-but-not-used-variable.patch @@ -0,0 +1,67 @@ +From 91d4bbe4a335c6433b77844c4d56fe858d3528d5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:23 -0400 +Subject: [PATCH 261/312] [netdrv] net/mlx5e: CT: remove set but not used + variable 'unnew' + +Message-id: <20200519074934.6303-53-ahleihel@redhat.com> +Patchwork-id: 310547 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 52/63] net/mlx5e: CT: remove set but not used variable 'unnew' +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 35e725e1b9d645cc412e06bbc204d63dddb1512b +Author: YueHaibing +Date: Sat Mar 14 18:44:46 2020 +0800 + + net/mlx5e: CT: remove set but not used variable 'unnew' + + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c: + In function mlx5_tc_ct_parse_match: + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c:699:36: warning: + variable unnew set but not used [-Wunused-but-set-variable] + + Fixes: 4c3844d9e97e ("net/mlx5e: CT: Introduce connection tracking") + Reported-by: Hulk Robot + Signed-off-by: YueHaibing + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 8281dfab5e14..003079b09b67 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -699,7 +699,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + { + struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); + struct flow_dissector_key_ct *mask, *key; +- bool trk, est, untrk, unest, new, unnew; ++ bool trk, est, untrk, unest, new; + u32 ctstate = 0, ctstate_mask = 0; + u16 ct_state_on, ct_state_off; + u16 ct_state, ct_state_mask; +@@ -742,7 +742,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW; + est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; + untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; +- unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW; + unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; + + ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0; +-- +2.13.6 + diff --git a/SOURCES/0262-netdrv-net-mlx5e-Fix-missing-pedit-action-after-ct-c.patch b/SOURCES/0262-netdrv-net-mlx5e-Fix-missing-pedit-action-after-ct-c.patch new file mode 100644 index 0000000..81c15f9 --- /dev/null +++ b/SOURCES/0262-netdrv-net-mlx5e-Fix-missing-pedit-action-after-ct-c.patch @@ -0,0 +1,59 @@ +From c20c4f5dcd31ef9e4acf22a472c33fa6b5f8e8ed Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:24 -0400 +Subject: [PATCH 262/312] [netdrv] net/mlx5e: Fix missing pedit action after ct + clear action + +Message-id: <20200519074934.6303-54-ahleihel@redhat.com> +Patchwork-id: 310549 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 53/63] net/mlx5e: Fix missing pedit action after ct clear action +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc2 + +commit d5a3c2b640093c8a4bb5d76170a8f6c8c2eacc17 +Author: Roi Dayan +Date: Sun Mar 29 18:54:10 2020 +0300 + + net/mlx5e: Fix missing pedit action after ct clear action + + With ct clear action we should not allocate the action in hw + and not release the mod_acts parsed in advance. + It will be done when handling the ct clear action. + + Fixes: 1ef3018f5af3 ("net/mlx5e: CT: Support clear action") + Signed-off-by: Roi Dayan + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index f760231213d1..c0e06114d328 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1343,7 +1343,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + if (err) + return err; + +- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { ++ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && ++ !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) { + err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); + dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + if (err) +-- +2.13.6 + diff --git a/SOURCES/0263-netdrv-net-mlx5e-CT-Fix-offload-with-CT-action-after.patch b/SOURCES/0263-netdrv-net-mlx5e-CT-Fix-offload-with-CT-action-after.patch new file mode 100644 index 0000000..40cd918 --- /dev/null +++ b/SOURCES/0263-netdrv-net-mlx5e-CT-Fix-offload-with-CT-action-after.patch @@ -0,0 +1,450 @@ +From dd674b21a94bd385f3ee22ffa180a2029beef026 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:25 -0400 +Subject: [PATCH 263/312] [netdrv] net/mlx5e: CT: Fix offload with CT action + after CT NAT action + +Message-id: <20200519074934.6303-55-ahleihel@redhat.com> +Patchwork-id: 310555 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 54/63] net/mlx5e: CT: Fix offload with CT action after CT NAT action +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 9102d836d296fbc94517736d2dd1131ad6b01740 +Author: Roi Dayan +Date: Sun Apr 12 15:39:15 2020 +0300 + + net/mlx5e: CT: Fix offload with CT action after CT NAT action + + It could be a chain of rules will do action CT again after CT NAT + Before this fix matching will break as we get into the CT table + after NAT changes and not CT NAT. + Fix this by adding pre ct and pre ct nat tables to skip ct/ct_nat + tables and go straight to post_ct table if ct/nat was already done. + + Signed-off-by: Roi Dayan + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 308 +++++++++++++++++++-- + 1 file changed, 286 insertions(+), 22 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 003079b09b67..8f94a4dde2bf 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -24,6 +24,7 @@ + #define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0) + #define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1) + #define MLX5_CT_STATE_TRK_BIT BIT(2) ++#define MLX5_CT_STATE_NAT_BIT BIT(3) + + #define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8) + #define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) +@@ -61,6 +62,15 @@ struct mlx5_ct_zone_rule { + bool nat; + }; + ++struct mlx5_tc_ct_pre { ++ struct mlx5_flow_table *fdb; ++ struct mlx5_flow_group *flow_grp; ++ struct mlx5_flow_group *miss_grp; ++ struct mlx5_flow_handle *flow_rule; ++ struct mlx5_flow_handle *miss_rule; ++ struct mlx5_modify_hdr *modify_hdr; ++}; ++ + struct mlx5_ct_ft { + struct rhash_head node; + u16 zone; +@@ -68,6 +78,8 @@ struct mlx5_ct_ft { + struct nf_flowtable *nf_ft; + struct mlx5_tc_ct_priv *ct_priv; + struct rhashtable ct_entries_ht; ++ struct mlx5_tc_ct_pre pre_ct; ++ struct mlx5_tc_ct_pre pre_ct_nat; + }; + + struct mlx5_ct_entry { +@@ -428,6 +440,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5_eswitch *esw = ct_priv->esw; + struct mlx5_modify_hdr *mod_hdr; + struct flow_action_entry *meta; ++ u16 ct_state = 0; + int err; + + meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule); +@@ -446,11 +459,13 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, + &mod_acts); + if (err) + goto err_mapping; ++ ++ ct_state |= MLX5_CT_STATE_NAT_BIT; + } + ++ ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT; + err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts, +- (MLX5_CT_STATE_ESTABLISHED_BIT | +- MLX5_CT_STATE_TRK_BIT), ++ ct_state, + meta->ct_metadata.mark, + meta->ct_metadata.labels[0], + tupleid); +@@ -793,6 +808,238 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, + return 0; + } + ++static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft, ++ struct mlx5_tc_ct_pre *pre_ct, ++ bool nat) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv; ++ struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {}; ++ struct mlx5_core_dev *dev = ct_priv->esw->dev; ++ struct mlx5_flow_table *fdb = pre_ct->fdb; ++ struct mlx5_flow_destination dest = {}; ++ struct mlx5_flow_act flow_act = {}; ++ struct mlx5_modify_hdr *mod_hdr; ++ struct mlx5_flow_handle *rule; ++ struct mlx5_flow_spec *spec; ++ u32 ctstate; ++ u16 zone; ++ int err; ++ ++ spec = kvzalloc(sizeof(*spec), GFP_KERNEL); ++ if (!spec) ++ return -ENOMEM; ++ ++ zone = ct_ft->zone & MLX5_CT_ZONE_MASK; ++ err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ZONE_TO_REG, zone); ++ if (err) { ++ ct_dbg("Failed to set zone register mapping"); ++ goto err_mapping; ++ } ++ ++ mod_hdr = mlx5_modify_header_alloc(dev, ++ MLX5_FLOW_NAMESPACE_FDB, ++ pre_mod_acts.num_actions, ++ pre_mod_acts.actions); ++ ++ if (IS_ERR(mod_hdr)) { ++ err = PTR_ERR(mod_hdr); ++ ct_dbg("Failed to create pre ct mod hdr"); ++ goto err_mapping; ++ } ++ pre_ct->modify_hdr = mod_hdr; ++ ++ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | ++ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; ++ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; ++ flow_act.modify_hdr = mod_hdr; ++ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; ++ ++ /* add flow rule */ ++ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, ++ zone, MLX5_CT_ZONE_MASK); ++ ctstate = MLX5_CT_STATE_TRK_BIT; ++ if (nat) ++ ctstate |= MLX5_CT_STATE_NAT_BIT; ++ mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate); ++ ++ dest.ft = ct_priv->post_ct; ++ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1); ++ if (IS_ERR(rule)) { ++ err = PTR_ERR(rule); ++ ct_dbg("Failed to add pre ct flow rule zone %d", zone); ++ goto err_flow_rule; ++ } ++ pre_ct->flow_rule = rule; ++ ++ /* add miss rule */ ++ memset(spec, 0, sizeof(*spec)); ++ dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct; ++ rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1); ++ if (IS_ERR(rule)) { ++ err = PTR_ERR(rule); ++ ct_dbg("Failed to add pre ct miss rule zone %d", zone); ++ goto err_miss_rule; ++ } ++ pre_ct->miss_rule = rule; ++ ++ dealloc_mod_hdr_actions(&pre_mod_acts); ++ kvfree(spec); ++ return 0; ++ ++err_miss_rule: ++ mlx5_del_flow_rules(pre_ct->flow_rule); ++err_flow_rule: ++ mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr); ++err_mapping: ++ dealloc_mod_hdr_actions(&pre_mod_acts); ++ kvfree(spec); ++ return err; ++} ++ ++static void ++tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft, ++ struct mlx5_tc_ct_pre *pre_ct) ++{ ++ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv; ++ struct mlx5_core_dev *dev = ct_priv->esw->dev; ++ ++ mlx5_del_flow_rules(pre_ct->flow_rule); ++ mlx5_del_flow_rules(pre_ct->miss_rule); ++ mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr); ++} ++ ++static int ++mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft, ++ struct mlx5_tc_ct_pre *pre_ct, ++ bool nat) ++{ ++ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); ++ struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv; ++ struct mlx5_core_dev *dev = ct_priv->esw->dev; ++ struct mlx5_flow_table_attr ft_attr = {}; ++ struct mlx5_flow_namespace *ns; ++ struct mlx5_flow_table *ft; ++ struct mlx5_flow_group *g; ++ u32 metadata_reg_c_2_mask; ++ u32 *flow_group_in; ++ void *misc; ++ int err; ++ ++ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); ++ if (!ns) { ++ err = -EOPNOTSUPP; ++ ct_dbg("Failed to get FDB flow namespace"); ++ return err; ++ } ++ ++ flow_group_in = kvzalloc(inlen, GFP_KERNEL); ++ if (!flow_group_in) ++ return -ENOMEM; ++ ++ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; ++ ft_attr.prio = FDB_TC_OFFLOAD; ++ ft_attr.max_fte = 2; ++ ft_attr.level = 1; ++ ft = mlx5_create_flow_table(ns, &ft_attr); ++ if (IS_ERR(ft)) { ++ err = PTR_ERR(ft); ++ ct_dbg("Failed to create pre ct table"); ++ goto out_free; ++ } ++ pre_ct->fdb = ft; ++ ++ /* create flow group */ ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); ++ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, ++ MLX5_MATCH_MISC_PARAMETERS_2); ++ ++ misc = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, ++ match_criteria.misc_parameters_2); ++ ++ metadata_reg_c_2_mask = MLX5_CT_ZONE_MASK; ++ metadata_reg_c_2_mask |= (MLX5_CT_STATE_TRK_BIT << 16); ++ if (nat) ++ metadata_reg_c_2_mask |= (MLX5_CT_STATE_NAT_BIT << 16); ++ ++ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_2, ++ metadata_reg_c_2_mask); ++ ++ g = mlx5_create_flow_group(ft, flow_group_in); ++ if (IS_ERR(g)) { ++ err = PTR_ERR(g); ++ ct_dbg("Failed to create pre ct group"); ++ goto err_flow_grp; ++ } ++ pre_ct->flow_grp = g; ++ ++ /* create miss group */ ++ memset(flow_group_in, 0, inlen); ++ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); ++ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); ++ g = mlx5_create_flow_group(ft, flow_group_in); ++ if (IS_ERR(g)) { ++ err = PTR_ERR(g); ++ ct_dbg("Failed to create pre ct miss group"); ++ goto err_miss_grp; ++ } ++ pre_ct->miss_grp = g; ++ ++ err = tc_ct_pre_ct_add_rules(ct_ft, pre_ct, nat); ++ if (err) ++ goto err_add_rules; ++ ++ kvfree(flow_group_in); ++ return 0; ++ ++err_add_rules: ++ mlx5_destroy_flow_group(pre_ct->miss_grp); ++err_miss_grp: ++ mlx5_destroy_flow_group(pre_ct->flow_grp); ++err_flow_grp: ++ mlx5_destroy_flow_table(ft); ++out_free: ++ kvfree(flow_group_in); ++ return err; ++} ++ ++static void ++mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft, ++ struct mlx5_tc_ct_pre *pre_ct) ++{ ++ tc_ct_pre_ct_del_rules(ct_ft, pre_ct); ++ mlx5_destroy_flow_group(pre_ct->miss_grp); ++ mlx5_destroy_flow_group(pre_ct->flow_grp); ++ mlx5_destroy_flow_table(pre_ct->fdb); ++} ++ ++static int ++mlx5_tc_ct_alloc_pre_ct_tables(struct mlx5_ct_ft *ft) ++{ ++ int err; ++ ++ err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct, false); ++ if (err) ++ return err; ++ ++ err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct_nat, true); ++ if (err) ++ goto err_pre_ct_nat; ++ ++ return 0; ++ ++err_pre_ct_nat: ++ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct); ++ return err; ++} ++ ++static void ++mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft) ++{ ++ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct_nat); ++ mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct); ++} ++ + static struct mlx5_ct_ft * + mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, + struct nf_flowtable *nf_ft) +@@ -815,6 +1062,10 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, + ft->ct_priv = ct_priv; + refcount_set(&ft->refcount, 1); + ++ err = mlx5_tc_ct_alloc_pre_ct_tables(ft); ++ if (err) ++ goto err_alloc_pre_ct; ++ + err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params); + if (err) + goto err_init; +@@ -836,6 +1087,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, + err_insert: + rhashtable_destroy(&ft->ct_entries_ht); + err_init: ++ mlx5_tc_ct_free_pre_ct_tables(ft); ++err_alloc_pre_ct: + kfree(ft); + return ERR_PTR(err); + } +@@ -861,21 +1114,40 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) + rhashtable_free_and_destroy(&ft->ct_entries_ht, + mlx5_tc_ct_flush_ft_entry, + ct_priv); ++ mlx5_tc_ct_free_pre_ct_tables(ft); + kfree(ft); + } + + /* We translate the tc filter with CT action to the following HW model: + * +- * +-------------------+ +--------------------+ +--------------+ +- * + pre_ct (tc chain) +----->+ CT (nat or no nat) +--->+ post_ct +-----> +- * + original match + | + tuple + zone match + | + fte_id match + | +- * +-------------------+ | +--------------------+ | +--------------+ | +- * v v v +- * set chain miss mapping set mark original +- * set fte_id set label filter +- * set zone set established actions +- * set tunnel_id do nat (if needed) +- * do decap ++ * +---------------------+ ++ * + fdb prio (tc chain) + ++ * + original match + ++ * +---------------------+ ++ * | set chain miss mapping ++ * | set fte_id ++ * | set tunnel_id ++ * | do decap ++ * v ++ * +---------------------+ ++ * + pre_ct/pre_ct_nat + if matches +---------------------+ ++ * + zone+nat match +---------------->+ post_ct (see below) + ++ * +---------------------+ set zone +---------------------+ ++ * | set zone ++ * v ++ * +--------------------+ ++ * + CT (nat or no nat) + ++ * + tuple + zone match + ++ * +--------------------+ ++ * | set mark ++ * | set label ++ * | set established ++ * | do nat (if needed) ++ * v ++ * +--------------+ ++ * + post_ct + original filter actions ++ * + fte_id match +------------------------> ++ * +--------------+ + */ + static int + __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, +@@ -890,7 +1162,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + struct mlx5_flow_spec *post_ct_spec = NULL; + struct mlx5_eswitch *esw = ct_priv->esw; + struct mlx5_esw_flow_attr *pre_ct_attr; +- struct mlx5_modify_hdr *mod_hdr; ++ struct mlx5_modify_hdr *mod_hdr; + struct mlx5_flow_handle *rule; + struct mlx5_ct_flow *ct_flow; + int chain_mapping = 0, err; +@@ -953,14 +1225,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + goto err_mapping; + } + +- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, ZONE_TO_REG, +- attr->ct_attr.zone & +- MLX5_CT_ZONE_MASK); +- if (err) { +- ct_dbg("Failed to set zone register mapping"); +- goto err_mapping; +- } +- + err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts, + FTEID_TO_REG, fte_id); + if (err) { +@@ -1020,7 +1284,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, + + /* Change original rule point to ct table */ + pre_ct_attr->dest_chain = 0; +- pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct; ++ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.fdb : ft->pre_ct.fdb; + ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw, + orig_spec, + pre_ct_attr); +-- +2.13.6 + diff --git a/SOURCES/0264-netdrv-net-mlx5-E-switch-Annotate-termtbl_mutex-mute.patch b/SOURCES/0264-netdrv-net-mlx5-E-switch-Annotate-termtbl_mutex-mute.patch new file mode 100644 index 0000000..1de6686 --- /dev/null +++ b/SOURCES/0264-netdrv-net-mlx5-E-switch-Annotate-termtbl_mutex-mute.patch @@ -0,0 +1,89 @@ +From 3963403fd564090f9d1439595b29b147f9c6a960 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:27 -0400 +Subject: [PATCH 264/312] [netdrv] net/mlx5: E-switch, Annotate termtbl_mutex + mutex destroy + +Message-id: <20200519074934.6303-57-ahleihel@redhat.com> +Patchwork-id: 310558 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 56/63] net/mlx5: E-switch, Annotate termtbl_mutex mutex destroy +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 2bb72e7e2abc6c8005baef4b35795616be2e0e4c +Author: Parav Pandit +Date: Sat Dec 14 03:24:25 2019 -0600 + + net/mlx5: E-switch, Annotate termtbl_mutex mutex destroy + + Annotate mutex destroy to keep it symmetric to init sequence. + It should be destroyed after its users (representor netdevices) are + destroyed in below flow. + + esw_offloads_disable() + esw_offloads_unload_rep() + + Hence, initialize the mutex before creating the representors which uses + it. + + Reviewed-by: Roi Dayan + Reviewed-by: Bodong Wang + Signed-off-by: Parav Pandit + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 82905834ddb2..8d277bdaccea 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -2429,6 +2429,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) + else + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; + ++ mutex_init(&esw->offloads.termtbl_mutex); + mlx5_rdma_enable_roce(esw->dev); + err = esw_offloads_steering_init(esw); + if (err) +@@ -2451,7 +2452,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) + goto err_reps; + + esw_offloads_devcom_init(esw); +- mutex_init(&esw->offloads.termtbl_mutex); + + return 0; + +@@ -2463,6 +2463,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) + esw_offloads_steering_cleanup(esw); + err_steering_init: + mlx5_rdma_disable_roce(esw->dev); ++ mutex_destroy(&esw->offloads.termtbl_mutex); + return err; + } + +@@ -2493,6 +2494,7 @@ void esw_offloads_disable(struct mlx5_eswitch *esw) + esw_set_passing_vport_metadata(esw, false); + esw_offloads_steering_cleanup(esw); + mlx5_rdma_disable_roce(esw->dev); ++ mutex_destroy(&esw->offloads.termtbl_mutex); + esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; + } + +-- +2.13.6 + diff --git a/SOURCES/0265-netdrv-net-mlx5-E-switch-Annotate-esw-state_lock-mut.patch b/SOURCES/0265-netdrv-net-mlx5-E-switch-Annotate-esw-state_lock-mut.patch new file mode 100644 index 0000000..5778bc3 --- /dev/null +++ b/SOURCES/0265-netdrv-net-mlx5-E-switch-Annotate-esw-state_lock-mut.patch @@ -0,0 +1,57 @@ +From 7ebf81988e7d25faf70f748a840ddba1d38c651b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:28 -0400 +Subject: [PATCH 265/312] [netdrv] net/mlx5: E-switch, Annotate esw state_lock + mutex destroy + +Message-id: <20200519074934.6303-58-ahleihel@redhat.com> +Patchwork-id: 310557 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 57/63] net/mlx5: E-switch, Annotate esw state_lock mutex destroy +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit d6c8022dfb06b27c96d6deca93ce6d762e842878 +Author: Parav Pandit +Date: Tue Dec 17 22:51:24 2019 -0600 + + net/mlx5: E-switch, Annotate esw state_lock mutex destroy + + Invoke mutex_destroy() to catch any esw state_lock errors. + + Reviewed-by: Roi Dayan + Reviewed-by: Bodong Wang + Signed-off-by: Parav Pandit + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 6ae084b0e612..901466fa1041 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -2198,6 +2198,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) + esw->dev->priv.eswitch = NULL; + destroy_workqueue(esw->work_queue); + esw_offloads_cleanup_reps(esw); ++ mutex_destroy(&esw->state_lock); + mutex_destroy(&esw->offloads.mod_hdr.lock); + mutex_destroy(&esw->offloads.encap_tbl_lock); + kfree(esw->vports); +-- +2.13.6 + diff --git a/SOURCES/0266-netdrv-net-mlx5-Avoid-deriving-mlx5_core_dev-second-.patch b/SOURCES/0266-netdrv-net-mlx5-Avoid-deriving-mlx5_core_dev-second-.patch new file mode 100644 index 0000000..c45c89f --- /dev/null +++ b/SOURCES/0266-netdrv-net-mlx5-Avoid-deriving-mlx5_core_dev-second-.patch @@ -0,0 +1,120 @@ +From 3be4371f8c2705031cb0913297eaeec46c615ab4 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:29 -0400 +Subject: [PATCH 266/312] [netdrv] net/mlx5: Avoid deriving mlx5_core_dev + second time + +Message-id: <20200519074934.6303-59-ahleihel@redhat.com> +Patchwork-id: 310562 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 58/63] net/mlx5: Avoid deriving mlx5_core_dev second time +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit 0e6fa491e8b0a9a0115896fc88a404f8d89c2e80 +Author: Parav Pandit +Date: Tue Dec 17 23:16:11 2019 -0600 + + net/mlx5: Avoid deriving mlx5_core_dev second time + + All callers needs to work on mlx5_core_dev and it is already derived + before calling mlx5_devlink_eswitch_check(). + Hence, accept mlx5_core_dev in mlx5_devlink_eswitch_check(). + + Given that it works on mlx5_core_dev change helper function name to + drop devlink prefix. + + Reviewed-by: Roi Dayan + Reviewed-by: Bodong Wang + Signed-off-by: Parav Pandit + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 16 +++++++--------- + 1 file changed, 7 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 8d277bdaccea..96a6eaf84a50 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -2574,10 +2574,8 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) + return 0; + } + +-static int mlx5_devlink_eswitch_check(struct devlink *devlink) ++static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) + { +- struct mlx5_core_dev *dev = devlink_priv(devlink); +- + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return -EOPNOTSUPP; + +@@ -2598,7 +2596,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + u16 cur_mlx5_mode, mlx5_mode = 0; + int err; + +- err = mlx5_devlink_eswitch_check(devlink); ++ err = mlx5_eswitch_check(dev); + if (err) + return err; + +@@ -2623,7 +2621,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) + struct mlx5_core_dev *dev = devlink_priv(devlink); + int err; + +- err = mlx5_devlink_eswitch_check(devlink); ++ err = mlx5_eswitch_check(dev); + if (err) + return err; + +@@ -2638,7 +2636,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + int err, vport, num_vport; + u8 mlx5_mode; + +- err = mlx5_devlink_eswitch_check(devlink); ++ err = mlx5_eswitch_check(dev); + if (err) + return err; + +@@ -2692,7 +2690,7 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) + struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; + +- err = mlx5_devlink_eswitch_check(devlink); ++ err = mlx5_eswitch_check(dev); + if (err) + return err; + +@@ -2744,7 +2742,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, + struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; + +- err = mlx5_devlink_eswitch_check(devlink); ++ err = mlx5_eswitch_check(dev); + if (err) + return err; + +@@ -2793,7 +2791,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, + struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; + +- err = mlx5_devlink_eswitch_check(devlink); ++ err = mlx5_eswitch_check(dev); + if (err) + return err; + +-- +2.13.6 + diff --git a/SOURCES/0267-netdrv-net-mlx5-Simplify-mlx5_register_device-to-ret.patch b/SOURCES/0267-netdrv-net-mlx5-Simplify-mlx5_register_device-to-ret.patch new file mode 100644 index 0000000..2335d3f --- /dev/null +++ b/SOURCES/0267-netdrv-net-mlx5-Simplify-mlx5_register_device-to-ret.patch @@ -0,0 +1,115 @@ +From 9d713f61edcf542b446566ad17b5dcce46044480 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:30 -0400 +Subject: [PATCH 267/312] [netdrv] net/mlx5: Simplify mlx5_register_device to + return void + +Message-id: <20200519074934.6303-60-ahleihel@redhat.com> +Patchwork-id: 310560 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 59/63] net/mlx5: Simplify mlx5_register_device to return void +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit ecd01db8711d4c608ef6636275e26a8b2069b798 +Author: Parav Pandit +Date: Sun Mar 8 22:19:51 2020 -0500 + + net/mlx5: Simplify mlx5_register_device to return void + + mlx5_register_device() doesn't check for any error and always returns 0. + Simplify mlx5_register_device() to return void and its caller, remove + dead code related to it. + + Reviewed-by: Moshe Shemesh + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/dev.c | 4 +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 14 +++----------- + drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 2 +- + 3 files changed, 5 insertions(+), 15 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c +index 50862275544e..1972ddd12704 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c +@@ -193,7 +193,7 @@ bool mlx5_device_registered(struct mlx5_core_dev *dev) + return found; + } + +-int mlx5_register_device(struct mlx5_core_dev *dev) ++void mlx5_register_device(struct mlx5_core_dev *dev) + { + struct mlx5_priv *priv = &dev->priv; + struct mlx5_interface *intf; +@@ -203,8 +203,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev) + list_for_each_entry(intf, &intf_list, list) + mlx5_add_device(intf, priv); + mutex_unlock(&mlx5_intf_mutex); +- +- return 0; + } + + void mlx5_unregister_device(struct mlx5_core_dev *dev) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index c835f029caf8..8df92ccf6393 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1228,15 +1228,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + goto err_devlink_reg; + } + +- if (mlx5_device_registered(dev)) { ++ if (mlx5_device_registered(dev)) + mlx5_attach_device(dev); +- } else { +- err = mlx5_register_device(dev); +- if (err) { +- mlx5_core_err(dev, "register device failed %d\n", err); +- goto err_reg_dev; +- } +- } ++ else ++ mlx5_register_device(dev); + + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + out: +@@ -1244,9 +1239,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + + return err; + +-err_reg_dev: +- if (boot) +- mlx5_devlink_unregister(priv_to_devlink(dev)); + err_devlink_reg: + mlx5_unload(dev); + err_load: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +index b100489dc85c..23dc7be90263 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +@@ -182,7 +182,7 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv); + void mlx5_attach_device(struct mlx5_core_dev *dev); + void mlx5_detach_device(struct mlx5_core_dev *dev); + bool mlx5_device_registered(struct mlx5_core_dev *dev); +-int mlx5_register_device(struct mlx5_core_dev *dev); ++void mlx5_register_device(struct mlx5_core_dev *dev); + void mlx5_unregister_device(struct mlx5_core_dev *dev); + void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); + void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); +-- +2.13.6 + diff --git a/SOURCES/0268-netdrv-net-mlx5-Simplify-mlx5_unload_one-and-its-cal.patch b/SOURCES/0268-netdrv-net-mlx5-Simplify-mlx5_unload_one-and-its-cal.patch new file mode 100644 index 0000000..224eff1 --- /dev/null +++ b/SOURCES/0268-netdrv-net-mlx5-Simplify-mlx5_unload_one-and-its-cal.patch @@ -0,0 +1,82 @@ +From 03928b4be345f0b0872a07929aa4844f2cef0e17 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:31 -0400 +Subject: [PATCH 268/312] [netdrv] net/mlx5: Simplify mlx5_unload_one() and its + callers + +Message-id: <20200519074934.6303-61-ahleihel@redhat.com> +Patchwork-id: 310559 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 60/63] net/mlx5: Simplify mlx5_unload_one() and its callers +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit f999b706b7ab5dae45a3ee5c2b3bc2b47c11b0c5 +Author: Parav Pandit +Date: Sun Mar 8 23:17:37 2020 -0500 + + net/mlx5: Simplify mlx5_unload_one() and its callers + + mlx5_unload_one() always returns 0. + Simplify callers of mlx5_unload_one() and remove the dead code. + + Reviewed-by: Moshe Shemesh + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +[ Conflicts: mlx5_unload_one was static as + "[netdrv] net/mlx5: Add devlink reload" is skipped. ] + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 10 ++-------- + 1 file changed, 2 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 8df92ccf6393..f575f684ad78 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1252,7 +1252,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + return err; + } + +-static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) ++static void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) + { + if (cleanup) { + mlx5_unregister_device(dev); +@@ -1281,7 +1281,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) + mlx5_function_teardown(dev, cleanup); + out: + mutex_unlock(&dev->intf_state_mutex); +- return 0; + } + + static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) +@@ -1402,12 +1401,7 @@ static void remove_one(struct pci_dev *pdev) + mlx5_crdump_disable(dev); + mlx5_devlink_unregister(devlink); + +- if (mlx5_unload_one(dev, true)) { +- mlx5_core_err(dev, "mlx5_unload_one failed\n"); +- mlx5_health_flush(dev); +- return; +- } +- ++ mlx5_unload_one(dev, true); + mlx5_pci_close(dev); + mlx5_mdev_uninit(dev); + mlx5_devlink_free(devlink); +-- +2.13.6 + diff --git a/SOURCES/0269-netdrv-net-mlx5-Split-eswitch-mode-check-to-differen.patch b/SOURCES/0269-netdrv-net-mlx5-Split-eswitch-mode-check-to-differen.patch new file mode 100644 index 0000000..525debe --- /dev/null +++ b/SOURCES/0269-netdrv-net-mlx5-Split-eswitch-mode-check-to-differen.patch @@ -0,0 +1,139 @@ +From 9558c167e7bcfe7b5ef5a688054f274e2083c0ce Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:32 -0400 +Subject: [PATCH 269/312] [netdrv] net/mlx5: Split eswitch mode check to + different helper function + +Message-id: <20200519074934.6303-62-ahleihel@redhat.com> +Patchwork-id: 310565 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 61/63] net/mlx5: Split eswitch mode check to different helper function +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit ae24432cbc2b2d7f5a8d636194422604b0b4c4f7 +Author: Parav Pandit +Date: Sat Dec 14 04:09:04 2019 -0600 + + net/mlx5: Split eswitch mode check to different helper function + + In order to check eswitch state under a lock, prepare code to split + capability check and eswitch state check into two helper functions. + + Reviewed-by: Roi Dayan + Reviewed-by: Bodong Wang + Reviewed-by: Mark Bloch + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 37 +++++++++++++++++++--- + 1 file changed, 33 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 96a6eaf84a50..1b74a6f35df9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -2582,13 +2582,18 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) + if(!MLX5_ESWITCH_MANAGER(dev)) + return -EPERM; + +- if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE && +- !mlx5_core_is_ecpf_esw_manager(dev)) +- return -EOPNOTSUPP; +- + return 0; + } + ++static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) ++{ ++ /* devlink commands in NONE eswitch mode are currently supported only ++ * on ECPF. ++ */ ++ return (esw->mode == MLX5_ESWITCH_NONE && ++ !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0; ++} ++ + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) + { +@@ -2600,6 +2605,10 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + if (err) + return err; + ++ err = eswitch_devlink_esw_mode_check(dev->priv.eswitch); ++ if (err) ++ return err; ++ + cur_mlx5_mode = dev->priv.eswitch->mode; + + if (esw_mode_from_devlink(mode, &mlx5_mode)) +@@ -2625,6 +2634,10 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) + if (err) + return err; + ++ err = eswitch_devlink_esw_mode_check(dev->priv.eswitch); ++ if (err) ++ return err; ++ + return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); + } + +@@ -2640,6 +2653,10 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + if (err) + return err; + ++ err = eswitch_devlink_esw_mode_check(esw); ++ if (err) ++ return err; ++ + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) +@@ -2694,6 +2711,10 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) + if (err) + return err; + ++ err = eswitch_devlink_esw_mode_check(esw); ++ if (err) ++ return err; ++ + return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); + } + +@@ -2746,6 +2767,10 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, + if (err) + return err; + ++ err = eswitch_devlink_esw_mode_check(esw); ++ if (err) ++ return err; ++ + if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && + (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || + !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) +@@ -2795,6 +2820,10 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, + if (err) + return err; + ++ err = eswitch_devlink_esw_mode_check(esw); ++ if (err) ++ return err; ++ + *encap = esw->offloads.encap; + return 0; + } +-- +2.13.6 + diff --git a/SOURCES/0270-netdrv-net-mlx5-E-switch-Extend-eswitch-enable-to-ha.patch b/SOURCES/0270-netdrv-net-mlx5-E-switch-Extend-eswitch-enable-to-ha.patch new file mode 100644 index 0000000..df183db --- /dev/null +++ b/SOURCES/0270-netdrv-net-mlx5-E-switch-Extend-eswitch-enable-to-ha.patch @@ -0,0 +1,239 @@ +From c8627df5d03e52683da824e64b4330343da7948b Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:33 -0400 +Subject: [PATCH 270/312] [netdrv] net/mlx5: E-switch, Extend eswitch enable to + handle num_vfs change + +Message-id: <20200519074934.6303-63-ahleihel@redhat.com> +Patchwork-id: 310564 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 62/63] net/mlx5: E-switch, Extend eswitch enable to handle num_vfs change +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit ebf77bb83f635377ad7946b73490b18ecf50dc29 +Author: Parav Pandit +Date: Wed Dec 18 04:58:58 2019 -0600 + + net/mlx5: E-switch, Extend eswitch enable to handle num_vfs change + + Subsequent patch protects eswitch mode changes across sriov and devlink + interfaces. It is desirable for eswitch to provide thread safe eswitch + enable and disable APIs. + Hence, extend eswitch enable API to optionally update num_vfs when + requested. + + In subsequent patch, eswitch num_vfs are updated after all the eswitch + users eswitch drops its reference count. + + Reviewed-by: Roi Dayan + Reviewed-by: Bodong Wang + Reviewed-by: Mark Bloch + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 63 +++++++++++++++------- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 10 ++-- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 13 +++-- + drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 4 +- + 4 files changed, 58 insertions(+), 32 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 901466fa1041..19b887ad9392 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -2026,7 +2026,48 @@ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw) + } + } + +-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) ++static void ++mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) ++{ ++ const u32 *out; ++ ++ WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); ++ ++ if (num_vfs < 0) ++ return; ++ ++ if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { ++ esw->esw_funcs.num_vfs = num_vfs; ++ return; ++ } ++ ++ out = mlx5_esw_query_functions(esw->dev); ++ if (IS_ERR(out)) ++ return; ++ ++ esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, ++ host_params_context.host_num_of_vfs); ++ kvfree(out); ++} ++ ++/** ++ * mlx5_eswitch_enable - Enable eswitch ++ * @esw: Pointer to eswitch ++ * @mode: Eswitch mode to enable ++ * @num_vfs: Enable eswitch for given number of VFs. This is optional. ++ * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS. ++ * Caller should pass num_vfs > 0 when enabling eswitch for ++ * vf vports. Caller should pass num_vfs = 0, when eswitch ++ * is enabled without sriov VFs or when caller ++ * is unaware of the sriov state of the host PF on ECPF based ++ * eswitch. Caller should pass < 0 when num_vfs should be ++ * completely ignored. This is typically the case when eswitch ++ * is enabled without sriov regardless of PF/ECPF system. ++ * mlx5_eswitch_enable() Enables eswitch in either legacy or offloads mode. ++ * If num_vfs >=0 is provided, it setup VF related eswitch vports. It returns ++ * 0 on success or error code on failure. ++ */ ++int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode, int num_vfs) + { + int err; + +@@ -2044,6 +2085,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) + + mlx5_eswitch_get_devlink_param(esw); + ++ mlx5_eswitch_update_num_of_vfs(esw, num_vfs); ++ + esw_create_tsar(esw); + + esw->mode = mode; +@@ -2772,22 +2815,4 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, + dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS); + } + +-void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) +-{ +- const u32 *out; +- +- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE); +- +- if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) { +- esw->esw_funcs.num_vfs = num_vfs; +- return; +- } +- +- out = mlx5_esw_query_functions(esw->dev); +- if (IS_ERR(out)) +- return; + +- esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, +- host_params_context.host_num_of_vfs); +- kvfree(out); +-} +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index dafeb2000269..5ec76f48e9b7 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -335,7 +335,9 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + /* E-Switch API */ + int mlx5_eswitch_init(struct mlx5_core_dev *dev); + void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); +-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode); ++ ++#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) ++int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode, int num_vfs); + void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); + int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]); +@@ -673,7 +675,6 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); + + bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); + +-void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); + int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); + + int +@@ -700,7 +701,7 @@ esw_get_max_restore_tag(struct mlx5_eswitch *esw); + /* eswitch API stubs */ + static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } + static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} +-static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { return 0; } ++static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode, int num_vfs) { return 0; } + static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} + static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } + static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } +@@ -709,14 +710,11 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) + return ERR_PTR(-EOPNOTSUPP); + } + +-static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} +- + static inline struct mlx5_flow_handle * + esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) + { + return ERR_PTR(-EOPNOTSUPP); + } +- + #endif /* CONFIG_MLX5_ESWITCH */ + + #endif /* __MLX5_ESWITCH_H__ */ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 1b74a6f35df9..29280218518a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1550,12 +1550,13 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, + } + + mlx5_eswitch_disable(esw, false); +- mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); +- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); ++ err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS, ++ esw->dev->priv.sriov.num_vfs); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch to offloads"); +- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); ++ err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY, ++ MLX5_ESWITCH_IGNORE_NUM_VFS); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to legacy"); +@@ -2473,10 +2474,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, + int err, err1; + + mlx5_eswitch_disable(esw, false); +- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); ++ err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY, ++ MLX5_ESWITCH_IGNORE_NUM_VFS); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); +- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); ++ err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS, ++ MLX5_ESWITCH_IGNORE_NUM_VFS); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to offloads"); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +index 03f037811f1d..10a64b91d04c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +@@ -77,8 +77,8 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) + if (!MLX5_ESWITCH_MANAGER(dev)) + goto enable_vfs_hca; + +- mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs); +- err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY); ++ err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY, ++ num_vfs); + if (err) { + mlx5_core_warn(dev, + "failed to enable eswitch SRIOV (%d)\n", err); +-- +2.13.6 + diff --git a/SOURCES/0271-netdrv-net-mlx5-E-switch-Protect-eswitch-mode-change.patch b/SOURCES/0271-netdrv-net-mlx5-E-switch-Protect-eswitch-mode-change.patch new file mode 100644 index 0000000..7a6d102 --- /dev/null +++ b/SOURCES/0271-netdrv-net-mlx5-E-switch-Protect-eswitch-mode-change.patch @@ -0,0 +1,523 @@ +From 71d5ed76fabd2bd8135d8c34484842f6acad5a65 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:49:34 -0400 +Subject: [PATCH 271/312] [netdrv] net/mlx5: E-switch, Protect eswitch mode + changes + +Message-id: <20200519074934.6303-64-ahleihel@redhat.com> +Patchwork-id: 310566 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 63/63] net/mlx5: E-switch, Protect eswitch mode changes +Bugzilla: 1835595 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1835595 +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 +Conflicts: + - drivers/net/ethernet/mellanox/mlx5/core/eswitch.c + Diff due to missing commit: + 9ffbe8ac05db ("locking/lockdep: Rename lockdep_assert_held_exclusive() -> lockdep_assert_held_write()") + ---> Use lockdep_assert_held_exclusive instead of lockdep_assert_held_write. + +commit 8e0aa4bc959c98c14ed0aaee522d77ca52690189 +Author: Parav Pandit +Date: Wed Dec 18 02:51:19 2019 -0600 + + net/mlx5: E-switch, Protect eswitch mode changes + + Currently eswitch mode change is occurring from 2 different execution + contexts as below. + 1. sriov sysfs enable/disable + 2. devlink eswitch set commands + + Both of them need to access eswitch related data structures in + synchronized manner. + Without any synchronization below race condition exist. + + SR-IOV enable/disable with devlink eswitch mode change: + + cpu-0 cpu-1 + ----- ----- + mlx5_device_disable_sriov() mlx5_devlink_eswitch_mode_set() + mlx5_eswitch_disable() esw_offloads_stop() + esw_offloads_disable() mlx5_eswitch_disable() + esw_offloads_disable() + + Hence, they are synchronized using a new mode_lock. + eswitch's state_lock is not used as it can lead to a deadlock scenario + below and state_lock is only for vport and fdb exclusive access. + + ip link set vf + netlink rcv_msg() - Lock A + rtnl_lock + vfinfo() + esw->state_lock() - Lock B + devlink eswitch_set + devlink_mutex + esw->state_lock() - Lock B + attach_netdev() + register_netdev() + rtnl_lock - Lock A + + Alternatives considered: + 1. Acquiring rtnl lock before taking esw->state_lock to follow similar + locking sequence as ip link flow during eswitch mode set. + rtnl lock is not good idea for two reasons. + (a) Holding rtnl lock for several hundred device commands is not good + idea. + (b) It leads to below and more similar deadlocks. + + devlink eswitch_set + devlink_mutex + rtnl_lock - Lock A + esw->state_lock() - Lock B + eswitch_disable() + reload() + ib_register_device() + ib_cache_setup_one() + rtnl_lock() + + 2. Exporting devlink lock may lead to undesired use of it in vendor + driver(s) in future. + + 3. Unloading representors outside of the mode_lock requires + serialization with other process trying to enable the eswitch. + + 4. Differing the representors life cycle to a different workqueue + requires synchronization with func_change_handler workqueue. + + Reviewed-by: Roi Dayan + Reviewed-by: Bodong Wang + Reviewed-by: Mark Bloch + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 54 +++++++++-- + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 13 ++- + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 105 +++++++++++++-------- + drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 3 +- + 4 files changed, 125 insertions(+), 50 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +index 19b887ad9392..4126f9be46d3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +@@ -2051,7 +2051,7 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) + } + + /** +- * mlx5_eswitch_enable - Enable eswitch ++ * mlx5_eswitch_enable_locked - Enable eswitch + * @esw: Pointer to eswitch + * @mode: Eswitch mode to enable + * @num_vfs: Enable eswitch for given number of VFs. This is optional. +@@ -2063,16 +2063,17 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs) + * eswitch. Caller should pass < 0 when num_vfs should be + * completely ignored. This is typically the case when eswitch + * is enabled without sriov regardless of PF/ECPF system. +- * mlx5_eswitch_enable() Enables eswitch in either legacy or offloads mode. +- * If num_vfs >=0 is provided, it setup VF related eswitch vports. It returns +- * 0 on success or error code on failure. ++ * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads ++ * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports. ++ * It returns 0 on success or error code on failure. + */ +-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode, int num_vfs) ++int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) + { + int err; + +- if (!ESW_ALLOWED(esw) || +- !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { ++ lockdep_assert_held(&esw->mode_lock); ++ ++ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { + esw_warn(esw->dev, "FDB is not supported, aborting ...\n"); + return -EOPNOTSUPP; + } +@@ -2123,11 +2124,34 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode, int num_vfs) + return err; + } + +-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) ++/** ++ * mlx5_eswitch_enable - Enable eswitch ++ * @esw: Pointer to eswitch ++ * @num_vfs: Enable eswitch swich for given number of VFs. ++ * Caller must pass num_vfs > 0 when enabling eswitch for ++ * vf vports. ++ * mlx5_eswitch_enable() returns 0 on success or error code on failure. ++ */ ++int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) ++{ ++ int ret; ++ ++ if (!ESW_ALLOWED(esw)) ++ return 0; ++ ++ mutex_lock(&esw->mode_lock); ++ ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); ++ mutex_unlock(&esw->mode_lock); ++ return ret; ++} ++ ++void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) + { + int old_mode; + +- if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE) ++ lockdep_assert_held_exclusive(&esw->mode_lock); ++ ++ if (esw->mode == MLX5_ESWITCH_NONE) + return; + + esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n", +@@ -2156,6 +2180,16 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) + mlx5_eswitch_clear_vf_vports_info(esw); + } + ++void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) ++{ ++ if (!ESW_ALLOWED(esw)) ++ return; ++ ++ mutex_lock(&esw->mode_lock); ++ mlx5_eswitch_disable_locked(esw, clear_vf); ++ mutex_unlock(&esw->mode_lock); ++} ++ + int mlx5_eswitch_init(struct mlx5_core_dev *dev) + { + struct mlx5_eswitch *esw; +@@ -2207,6 +2241,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) + hash_init(esw->offloads.mod_hdr.hlist); + atomic64_set(&esw->offloads.num_flows, 0); + mutex_init(&esw->state_lock); ++ mutex_init(&esw->mode_lock); + + mlx5_esw_for_all_vports(esw, i, vport) { + vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); +@@ -2241,6 +2276,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) + esw->dev->priv.eswitch = NULL; + destroy_workqueue(esw->work_queue); + esw_offloads_cleanup_reps(esw); ++ mutex_destroy(&esw->mode_lock); + mutex_destroy(&esw->state_lock); + mutex_destroy(&esw->offloads.mod_hdr.lock); + mutex_destroy(&esw->offloads.encap_tbl_lock); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +index 5ec76f48e9b7..05600fda987f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +@@ -295,6 +295,13 @@ struct mlx5_eswitch { + */ + struct mutex state_lock; + ++#ifndef __GENKSYMS__ ++ /* Protects eswitch mode change that occurs via one or more ++ * user commands, i.e. sriov state change, devlink commands. ++ */ ++ struct mutex mode_lock; ++#endif ++ + struct { + bool enabled; + u32 root_tsar_id; +@@ -337,7 +344,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev); + void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); + + #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) +-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode, int num_vfs); ++int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs); ++int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); ++void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); + void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); + int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]); +@@ -701,7 +710,7 @@ esw_get_max_restore_tag(struct mlx5_eswitch *esw); + /* eswitch API stubs */ + static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } + static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} +-static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode, int num_vfs) { return 0; } ++static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } + static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} + static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } + static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index 29280218518a..f34a5160d104 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -1549,14 +1549,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, + return -EINVAL; + } + +- mlx5_eswitch_disable(esw, false); +- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS, +- esw->dev->priv.sriov.num_vfs); ++ mlx5_eswitch_disable_locked(esw, false); ++ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, ++ esw->dev->priv.sriov.num_vfs); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch to offloads"); +- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY, +- MLX5_ESWITCH_IGNORE_NUM_VFS); ++ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, ++ MLX5_ESWITCH_IGNORE_NUM_VFS); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to legacy"); +@@ -2473,13 +2473,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, + { + int err, err1; + +- mlx5_eswitch_disable(esw, false); +- err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY, +- MLX5_ESWITCH_IGNORE_NUM_VFS); ++ mlx5_eswitch_disable_locked(esw, false); ++ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, ++ MLX5_ESWITCH_IGNORE_NUM_VFS); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); +- err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS, +- MLX5_ESWITCH_IGNORE_NUM_VFS); ++ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, ++ MLX5_ESWITCH_IGNORE_NUM_VFS); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to offloads"); +@@ -2601,6 +2601,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) + { + struct mlx5_core_dev *dev = devlink_priv(devlink); ++ struct mlx5_eswitch *esw = dev->priv.eswitch; + u16 cur_mlx5_mode, mlx5_mode = 0; + int err; + +@@ -2608,40 +2609,50 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + if (err) + return err; + +- err = eswitch_devlink_esw_mode_check(dev->priv.eswitch); +- if (err) +- return err; +- +- cur_mlx5_mode = dev->priv.eswitch->mode; +- + if (esw_mode_from_devlink(mode, &mlx5_mode)) + return -EINVAL; + ++ mutex_lock(&esw->mode_lock); ++ err = eswitch_devlink_esw_mode_check(esw); ++ if (err) ++ goto unlock; ++ ++ cur_mlx5_mode = esw->mode; ++ + if (cur_mlx5_mode == mlx5_mode) +- return 0; ++ goto unlock; + + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) +- return esw_offloads_start(dev->priv.eswitch, extack); ++ err = esw_offloads_start(esw, extack); + else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) +- return esw_offloads_stop(dev->priv.eswitch, extack); ++ err = esw_offloads_stop(esw, extack); + else +- return -EINVAL; ++ err = -EINVAL; ++ ++unlock: ++ mutex_unlock(&esw->mode_lock); ++ return err; + } + + int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) + { + struct mlx5_core_dev *dev = devlink_priv(devlink); ++ struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; + + err = mlx5_eswitch_check(dev); + if (err) + return err; + ++ mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(dev->priv.eswitch); + if (err) +- return err; ++ goto unlock; + +- return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); ++ err = esw_mode_to_devlink(esw->mode, mode); ++unlock: ++ mutex_unlock(&esw->mode_lock); ++ return err; + } + + int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, +@@ -2656,18 +2667,20 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + if (err) + return err; + ++ mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) +- return err; ++ goto out; + + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: + if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) +- return 0; ++ goto out; + /* fall through */ + case MLX5_CAP_INLINE_MODE_L2: + NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); +- return -EOPNOTSUPP; ++ err = -EOPNOTSUPP; ++ goto out; + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + break; + } +@@ -2675,7 +2688,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + if (atomic64_read(&esw->offloads.num_flows) > 0) { + NL_SET_ERR_MSG_MOD(extack, + "Can't set inline mode when flows are configured"); +- return -EOPNOTSUPP; ++ err = -EOPNOTSUPP; ++ goto out; + } + + err = esw_inline_mode_from_devlink(mode, &mlx5_mode); +@@ -2692,6 +2706,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + } + + esw->offloads.inline_mode = mlx5_mode; ++ mutex_unlock(&esw->mode_lock); + return 0; + + revert_inline_mode: +@@ -2701,6 +2716,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + vport, + esw->offloads.inline_mode); + out: ++ mutex_unlock(&esw->mode_lock); + return err; + } + +@@ -2714,11 +2730,15 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) + if (err) + return err; + ++ mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) +- return err; ++ goto unlock; + +- return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); ++ err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); ++unlock: ++ mutex_unlock(&esw->mode_lock); ++ return err; + } + + int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) +@@ -2770,30 +2790,36 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, + if (err) + return err; + ++ mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) +- return err; ++ goto unlock; + + if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && + (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || +- !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) +- return -EOPNOTSUPP; ++ !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) { ++ err = -EOPNOTSUPP; ++ goto unlock; ++ } + +- if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) +- return -EOPNOTSUPP; ++ if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) { ++ err = -EOPNOTSUPP; ++ goto unlock; ++ } + + if (esw->mode == MLX5_ESWITCH_LEGACY) { + esw->offloads.encap = encap; +- return 0; ++ goto unlock; + } + + if (esw->offloads.encap == encap) +- return 0; ++ goto unlock; + + if (atomic64_read(&esw->offloads.num_flows) > 0) { + NL_SET_ERR_MSG_MOD(extack, + "Can't set encapsulation when flows are configured"); +- return -EOPNOTSUPP; ++ err = -EOPNOTSUPP; ++ goto unlock; + } + + esw_destroy_offloads_fdb_tables(esw); +@@ -2809,6 +2835,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, + (void)esw_create_offloads_fdb_tables(esw, esw->nvports); + } + ++unlock: ++ mutex_unlock(&esw->mode_lock); + return err; + } + +@@ -2823,11 +2851,14 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, + if (err) + return err; + ++ mutex_lock(&esw->mode_lock); + err = eswitch_devlink_esw_mode_check(esw); + if (err) +- return err; ++ goto unlock; + + *encap = esw->offloads.encap; ++unlock: ++ mutex_unlock(&esw->mode_lock); + return 0; + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +index 10a64b91d04c..3094d20297a9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +@@ -77,8 +77,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) + if (!MLX5_ESWITCH_MANAGER(dev)) + goto enable_vfs_hca; + +- err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY, +- num_vfs); ++ err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs); + if (err) { + mlx5_core_warn(dev, + "failed to enable eswitch SRIOV (%d)\n", err); +-- +2.13.6 + diff --git a/SOURCES/0272-netdrv-net-mlx5e-Rename-representor-get-devlink-port.patch b/SOURCES/0272-netdrv-net-mlx5e-Rename-representor-get-devlink-port.patch new file mode 100644 index 0000000..36c71e1 --- /dev/null +++ b/SOURCES/0272-netdrv-net-mlx5e-Rename-representor-get-devlink-port.patch @@ -0,0 +1,75 @@ +From 600067e1c77c62514d29e347a97a470429302ce5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:24 -0400 +Subject: [PATCH 272/312] [netdrv] net/mlx5e: Rename representor get devlink + port function + +Message-id: <20200601154102.25980-2-ahleihel@redhat.com> +Patchwork-id: 315706 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 01/39] net/mlx5e: Rename representor get devlink port function +Bugzilla: 1842258 1790226 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1790226 +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc1 + +commit ab8f963a11790ea0a04add0538c5e45887890d46 +Author: Vladyslav Tarasiuk +Date: Fri Jan 17 12:42:53 2020 +0200 + + net/mlx5e: Rename representor get devlink port function + + Rename representor's mlx5e_get_devlink_port() to + mlx5e_rep_get_devlink_port(). + The downstream patch will add a non-representor mlx5e function called + mlx5e_get_devlink_phy_port(). + + Signed-off-by: Vladyslav Tarasiuk + Reviewed-by: Moshe Shemesh + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 797ecdb6a165..00213299e616 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1382,7 +1382,7 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan + return 0; + } + +-static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) ++static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; +@@ -1395,7 +1395,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { + .ndo_stop = mlx5e_rep_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_setup_tc = mlx5e_rep_setup_tc, +- .ndo_get_devlink_port = mlx5e_get_devlink_port, ++ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port, + .ndo_get_stats64 = mlx5e_rep_get_stats, + .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, + .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, +@@ -1408,7 +1408,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { + .ndo_start_xmit = mlx5e_xmit, + .ndo_set_mac_address = mlx5e_uplink_rep_set_mac, + .ndo_setup_tc = mlx5e_rep_setup_tc, +- .ndo_get_devlink_port = mlx5e_get_devlink_port, ++ .ndo_get_devlink_port = mlx5e_rep_get_devlink_port, + .ndo_get_stats64 = mlx5e_get_stats, + .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, + .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, +-- +2.13.6 + diff --git a/SOURCES/0273-netdrv-net-mlx5e-Add-support-for-devlink-port-in-non.patch b/SOURCES/0273-netdrv-net-mlx5e-Add-support-for-devlink-port-in-non.patch new file mode 100644 index 0000000..8474324 --- /dev/null +++ b/SOURCES/0273-netdrv-net-mlx5e-Add-support-for-devlink-port-in-non.patch @@ -0,0 +1,188 @@ +From 5fa5489fb414fc500bb9eeef1f1600d8a1e0e2aa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:25 -0400 +Subject: [PATCH 273/312] [netdrv] net/mlx5e: Add support for devlink-port in + non-representors mode + +Message-id: <20200601154102.25980-3-ahleihel@redhat.com> +Patchwork-id: 315707 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 02/39] net/mlx5e: Add support for devlink-port in non-representors mode +Bugzilla: 1842258 1790226 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1790226 +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc1 + +commit c6acd629eec754a9679f922d51f90e44c769b80c +Author: Vladyslav Tarasiuk +Date: Wed Nov 13 17:19:47 2019 +0200 + + net/mlx5e: Add support for devlink-port in non-representors mode + + Added devlink_port field to mlx5e_priv structure and a callback to + netdev ops to enable devlink to get info about the port. The port + registration happens at driver initialization. + + Signed-off-by: Vladyslav Tarasiuk + Reviewed-by: Moshe Shemesh + Reviewed-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + + .../net/ethernet/mellanox/mlx5/core/en/devlink.c | 38 ++++++++++++++++++++++ + .../net/ethernet/mellanox/mlx5/core/en/devlink.h | 15 +++++++++ + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 11 +++++++ + 5 files changed, 66 insertions(+), 1 deletion(-) + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c + create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +index 9e85def607b9..51d6cc94adba 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile +@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ + en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ + en_selftest.o en/port.o en/monitor_stats.o en/health.o \ + en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \ +- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o ++ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o + + # + # Netdev extra +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 2e3a4ba96793..db0f2d9936cd 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -872,6 +872,7 @@ struct mlx5e_priv { + #endif + struct devlink_health_reporter *tx_reporter; + struct devlink_health_reporter *rx_reporter; ++ struct devlink_port dl_phy_port; + struct mlx5e_xsk xsk; + struct mlx5e_scratchpad scratchpad; + }; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +new file mode 100644 +index 000000000000..1a87a3fc6b44 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +@@ -0,0 +1,38 @@ ++// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB ++/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ ++ ++#include "en/devlink.h" ++ ++int mlx5e_devlink_phy_port_register(struct net_device *dev) ++{ ++ struct mlx5e_priv *priv; ++ struct devlink *devlink; ++ int err; ++ ++ priv = netdev_priv(dev); ++ devlink = priv_to_devlink(priv->mdev); ++ ++ devlink_port_attrs_set(&priv->dl_phy_port, ++ DEVLINK_PORT_FLAVOUR_PHYSICAL, ++ PCI_FUNC(priv->mdev->pdev->devfn), ++ false, 0, ++ NULL, 0); ++ err = devlink_port_register(devlink, &priv->dl_phy_port, 1); ++ if (err) ++ return err; ++ devlink_port_type_eth_set(&priv->dl_phy_port, dev); ++ return 0; ++} ++ ++void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv) ++{ ++ devlink_port_unregister(&priv->dl_phy_port); ++} ++ ++struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev) ++{ ++ struct mlx5e_priv *priv = netdev_priv(dev); ++ ++ return &priv->dl_phy_port; ++} ++ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +new file mode 100644 +index 000000000000..b8cd63b88688 +--- /dev/null ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +@@ -0,0 +1,15 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ ++/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ ++ ++#ifndef __MLX5E_EN_DEVLINK_H ++#define __MLX5E_EN_DEVLINK_H ++ ++#include ++#include "en.h" ++ ++int mlx5e_devlink_phy_port_register(struct net_device *dev); ++void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv); ++struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev); ++ ++#endif ++ +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 22298f67fbd2..0f48804f7fd2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -62,6 +62,7 @@ + #include "en/xsk/setup.h" + #include "en/xsk/rx.h" + #include "en/xsk/tx.h" ++#include "en/devlink.h" + + + bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) +@@ -4634,6 +4635,7 @@ const struct net_device_ops mlx5e_netdev_ops = { + .ndo_set_vf_link_state = mlx5e_set_vf_link_state, + .ndo_get_vf_stats = mlx5e_get_vf_stats, + #endif ++ .ndo_get_devlink_port = mlx5e_get_devlink_phy_port, + }; + + static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) +@@ -5497,11 +5499,19 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) + goto err_detach; + } + ++ err = mlx5e_devlink_phy_port_register(netdev); ++ if (err) { ++ mlx5_core_err(mdev, "mlx5e_devlink_phy_port_register failed, %d\n", err); ++ goto err_unregister_netdev; ++ } ++ + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_init_app(priv); + #endif + return priv; + ++err_unregister_netdev: ++ unregister_netdev(netdev); + err_detach: + mlx5e_detach(mdev, priv); + err_destroy_netdev: +@@ -5523,6 +5533,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_delete_app(priv); + #endif ++ mlx5e_devlink_phy_port_unregister(priv); + unregister_netdev(priv->netdev); + mlx5e_detach(mdev, vpriv); + mlx5e_destroy_netdev(priv); +-- +2.13.6 + diff --git a/SOURCES/0274-netdrv-net-mlx5e-Use-devlink-virtual-flavour-for-VF-.patch b/SOURCES/0274-netdrv-net-mlx5e-Use-devlink-virtual-flavour-for-VF-.patch new file mode 100644 index 0000000..6a08b69 --- /dev/null +++ b/SOURCES/0274-netdrv-net-mlx5e-Use-devlink-virtual-flavour-for-VF-.patch @@ -0,0 +1,172 @@ +From 3b05c8231e7a2cdcd4ec6931dd535b6b1e92f8e5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:26 -0400 +Subject: [PATCH 274/312] [netdrv] net/mlx5e: Use devlink virtual flavour for + VF devlink port + +Message-id: <20200601154102.25980-4-ahleihel@redhat.com> +Patchwork-id: 315709 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 03/39] net/mlx5e: Use devlink virtual flavour for VF devlink port +Bugzilla: 1842258 1790226 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1790226 +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc1 + +commit 162add8cbae4635cf0598c640a24d5ed2849774f +Author: Parav Pandit +Date: Tue Mar 3 08:12:43 2020 -0600 + + net/mlx5e: Use devlink virtual flavour for VF devlink port + + Use newly introduce 'virtual' port flavour for devlink + port of PCI VF devlink device in non-representors mode. + + While at it, remove recently introduced empty lines at end of the file. + + Reviewed-by: Jiri Pirko + Signed-off-by: Parav Pandit + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- + .../net/ethernet/mellanox/mlx5/core/en/devlink.c | 39 +++++++++++++--------- + .../net/ethernet/mellanox/mlx5/core/en/devlink.h | 7 ++-- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 ++-- + 4 files changed, 30 insertions(+), 24 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index db0f2d9936cd..09c9f8c0ef48 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -872,7 +872,7 @@ struct mlx5e_priv { + #endif + struct devlink_health_reporter *tx_reporter; + struct devlink_health_reporter *rx_reporter; +- struct devlink_port dl_phy_port; ++ struct devlink_port dl_port; + struct mlx5e_xsk xsk; + struct mlx5e_scratchpad scratchpad; + }; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +index 1a87a3fc6b44..e38495e4aa42 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +@@ -3,36 +3,43 @@ + + #include "en/devlink.h" + +-int mlx5e_devlink_phy_port_register(struct net_device *dev) ++int mlx5e_devlink_port_register(struct net_device *netdev) + { ++ struct mlx5_core_dev *dev; + struct mlx5e_priv *priv; + struct devlink *devlink; + int err; + +- priv = netdev_priv(dev); +- devlink = priv_to_devlink(priv->mdev); +- +- devlink_port_attrs_set(&priv->dl_phy_port, +- DEVLINK_PORT_FLAVOUR_PHYSICAL, +- PCI_FUNC(priv->mdev->pdev->devfn), +- false, 0, +- NULL, 0); +- err = devlink_port_register(devlink, &priv->dl_phy_port, 1); ++ priv = netdev_priv(netdev); ++ dev = priv->mdev; ++ ++ if (mlx5_core_is_pf(dev)) ++ devlink_port_attrs_set(&priv->dl_port, ++ DEVLINK_PORT_FLAVOUR_PHYSICAL, ++ PCI_FUNC(dev->pdev->devfn), ++ false, 0, ++ NULL, 0); ++ else ++ devlink_port_attrs_set(&priv->dl_port, ++ DEVLINK_PORT_FLAVOUR_VIRTUAL, ++ 0, false, 0, NULL, 0); ++ ++ devlink = priv_to_devlink(dev); ++ err = devlink_port_register(devlink, &priv->dl_port, 1); + if (err) + return err; +- devlink_port_type_eth_set(&priv->dl_phy_port, dev); ++ devlink_port_type_eth_set(&priv->dl_port, netdev); + return 0; + } + +-void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv) ++void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) + { +- devlink_port_unregister(&priv->dl_phy_port); ++ devlink_port_unregister(&priv->dl_port); + } + +-struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev) ++struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) + { + struct mlx5e_priv *priv = netdev_priv(dev); + +- return &priv->dl_phy_port; ++ return &priv->dl_port; + } +- +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +index b8cd63b88688..3e5393a0901f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +@@ -7,9 +7,8 @@ + #include + #include "en.h" + +-int mlx5e_devlink_phy_port_register(struct net_device *dev); +-void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv); +-struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev); ++int mlx5e_devlink_port_register(struct net_device *dev); ++void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv); ++struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev); + + #endif +- +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 0f48804f7fd2..5e52f415ef35 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4635,7 +4635,7 @@ const struct net_device_ops mlx5e_netdev_ops = { + .ndo_set_vf_link_state = mlx5e_set_vf_link_state, + .ndo_get_vf_stats = mlx5e_get_vf_stats, + #endif +- .ndo_get_devlink_port = mlx5e_get_devlink_phy_port, ++ .ndo_get_devlink_port = mlx5e_get_devlink_port, + }; + + static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) +@@ -5499,7 +5499,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) + goto err_detach; + } + +- err = mlx5e_devlink_phy_port_register(netdev); ++ err = mlx5e_devlink_port_register(netdev); + if (err) { + mlx5_core_err(mdev, "mlx5e_devlink_phy_port_register failed, %d\n", err); + goto err_unregister_netdev; +@@ -5533,7 +5533,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_delete_app(priv); + #endif +- mlx5e_devlink_phy_port_unregister(priv); ++ mlx5e_devlink_port_unregister(priv); + unregister_netdev(priv->netdev); + mlx5e_detach(mdev, vpriv); + mlx5e_destroy_netdev(priv); +-- +2.13.6 + diff --git a/SOURCES/0275-netdrv-net-mlx5e-Fix-devlink-port-register-sequence.patch b/SOURCES/0275-netdrv-net-mlx5e-Fix-devlink-port-register-sequence.patch new file mode 100644 index 0000000..1d395e4 --- /dev/null +++ b/SOURCES/0275-netdrv-net-mlx5e-Fix-devlink-port-register-sequence.patch @@ -0,0 +1,156 @@ +From eaef6191478050c3c5a4ef24955d741d1002261e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:27 -0400 +Subject: [PATCH 275/312] [netdrv] net/mlx5e: Fix devlink port register + sequence + +Message-id: <20200601154102.25980-5-ahleihel@redhat.com> +Patchwork-id: 315708 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 04/39] net/mlx5e: Fix devlink port register sequence +Bugzilla: 1842258 1790226 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1790226 +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc1 + +commit 31e87b39ba9d47cf31f5a91dd3cc9680f5987d12 +Author: Vladyslav Tarasiuk +Date: Wed Mar 4 13:33:50 2020 +0200 + + net/mlx5e: Fix devlink port register sequence + + If udevd is configured to rename interfaces according to persistent + naming rules and if a network interface has phys_port_name in sysfs, + its contents will be appended to the interface name. + However, register_netdev creates device in sysfs and if + devlink_port_register is called after that, there is a timeframe in + which udevd may read an empty phys_port_name value. The consequence is + that the interface will lose this suffix and its name will not be + really persistent. + + The solution is to register the port before registering a netdev. + + Fixes: c6acd629eec7 ("net/mlx5e: Add support for devlink-port in non-representors mode") + Signed-off-by: Vladyslav Tarasiuk + Reviewed-by: Maxim Mikityanskiy + Reviewed-by: Jiri Pirko + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + .../net/ethernet/mellanox/mlx5/core/en/devlink.c | 26 +++++++++------------- + .../net/ethernet/mellanox/mlx5/core/en/devlink.h | 3 ++- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 16 +++++++------ + 3 files changed, 21 insertions(+), 24 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +index e38495e4aa42..f8b2de4b04be 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +@@ -3,20 +3,14 @@ + + #include "en/devlink.h" + +-int mlx5e_devlink_port_register(struct net_device *netdev) ++int mlx5e_devlink_port_register(struct mlx5e_priv *priv) + { +- struct mlx5_core_dev *dev; +- struct mlx5e_priv *priv; +- struct devlink *devlink; +- int err; ++ struct devlink *devlink = priv_to_devlink(priv->mdev); + +- priv = netdev_priv(netdev); +- dev = priv->mdev; +- +- if (mlx5_core_is_pf(dev)) ++ if (mlx5_core_is_pf(priv->mdev)) + devlink_port_attrs_set(&priv->dl_port, + DEVLINK_PORT_FLAVOUR_PHYSICAL, +- PCI_FUNC(dev->pdev->devfn), ++ PCI_FUNC(priv->mdev->pdev->devfn), + false, 0, + NULL, 0); + else +@@ -24,12 +18,12 @@ int mlx5e_devlink_port_register(struct net_device *netdev) + DEVLINK_PORT_FLAVOUR_VIRTUAL, + 0, false, 0, NULL, 0); + +- devlink = priv_to_devlink(dev); +- err = devlink_port_register(devlink, &priv->dl_port, 1); +- if (err) +- return err; +- devlink_port_type_eth_set(&priv->dl_port, netdev); +- return 0; ++ return devlink_port_register(devlink, &priv->dl_port, 1); ++} ++ ++void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv) ++{ ++ devlink_port_type_eth_set(&priv->dl_port, priv->netdev); + } + + void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +index 3e5393a0901f..83123a801adc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h +@@ -7,8 +7,9 @@ + #include + #include "en.h" + +-int mlx5e_devlink_port_register(struct net_device *dev); ++int mlx5e_devlink_port_register(struct mlx5e_priv *priv); + void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv); ++void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv); + struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev); + + #endif +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 5e52f415ef35..7698167f6dab 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -5493,25 +5493,27 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) + goto err_destroy_netdev; + } + +- err = register_netdev(netdev); ++ err = mlx5e_devlink_port_register(priv); + if (err) { +- mlx5_core_err(mdev, "register_netdev failed, %d\n", err); ++ mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); + goto err_detach; + } + +- err = mlx5e_devlink_port_register(netdev); ++ err = register_netdev(netdev); + if (err) { +- mlx5_core_err(mdev, "mlx5e_devlink_phy_port_register failed, %d\n", err); +- goto err_unregister_netdev; ++ mlx5_core_err(mdev, "register_netdev failed, %d\n", err); ++ goto err_devlink_port_unregister; + } + ++ mlx5e_devlink_port_type_eth_set(priv); ++ + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_init_app(priv); + #endif + return priv; + +-err_unregister_netdev: +- unregister_netdev(netdev); ++err_devlink_port_unregister: ++ mlx5e_devlink_port_unregister(priv); + err_detach: + mlx5e_detach(mdev, priv); + err_destroy_netdev: +-- +2.13.6 + diff --git a/SOURCES/0276-netdrv-net-mlx5e-Fix-devlink-port-netdev-unregistrat.patch b/SOURCES/0276-netdrv-net-mlx5e-Fix-devlink-port-netdev-unregistrat.patch new file mode 100644 index 0000000..003c6f8 --- /dev/null +++ b/SOURCES/0276-netdrv-net-mlx5e-Fix-devlink-port-netdev-unregistrat.patch @@ -0,0 +1,59 @@ +From f1954e485506f27fcc4ba60d0fe6d95d25f7c419 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:28 -0400 +Subject: [PATCH 276/312] [netdrv] net/mlx5e: Fix devlink port netdev + unregistration sequence + +Message-id: <20200601154102.25980-6-ahleihel@redhat.com> +Patchwork-id: 315710 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 05/39] net/mlx5e: Fix devlink port netdev unregistration sequence +Bugzilla: 1842258 1790226 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1790226 +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc2 + +commit 230a1bc2470c5554a8c2bfe14774863897dc9386 +Author: Parav Pandit +Date: Fri Apr 3 02:35:46 2020 -0500 + + net/mlx5e: Fix devlink port netdev unregistration sequence + + In cited commit netdevice is registered after devlink port. + + Unregistration flow should be mirror sequence of registration flow. + Hence, unregister netdevice before devlink port. + + Fixes: 31e87b39ba9d ("net/mlx5e: Fix devlink port register sequence") + Reviewed-by: Jiri Pirko + Signed-off-by: Parav Pandit + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 7698167f6dab..15102b5b8d4a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -5535,8 +5535,8 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_delete_app(priv); + #endif +- mlx5e_devlink_port_unregister(priv); + unregister_netdev(priv->netdev); ++ mlx5e_devlink_port_unregister(priv); + mlx5e_detach(mdev, vpriv); + mlx5e_destroy_netdev(priv); + } +-- +2.13.6 + diff --git a/SOURCES/0277-netdrv-net-mlx5-Fix-crash-upon-suspend-resume.patch b/SOURCES/0277-netdrv-net-mlx5-Fix-crash-upon-suspend-resume.patch new file mode 100644 index 0000000..e9f3524 --- /dev/null +++ b/SOURCES/0277-netdrv-net-mlx5-Fix-crash-upon-suspend-resume.patch @@ -0,0 +1,81 @@ +From 0af2635530b4c17d9ce60f26d0f0285851c22ec1 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:29 -0400 +Subject: [PATCH 277/312] [netdrv] net/mlx5: Fix crash upon suspend/resume + +Message-id: <20200601154102.25980-7-ahleihel@redhat.com> +Patchwork-id: 315711 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 06/39] net/mlx5: Fix crash upon suspend/resume +Bugzilla: 1842258 1841973 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Bugzilla: http://bugzilla.redhat.com/1841973 +Upstream: v5.7 + +commit 8fc3e29be9248048f449793502c15af329f35c6e +Author: Mark Bloch +Date: Wed May 20 17:32:08 2020 +0000 + + net/mlx5: Fix crash upon suspend/resume + + Currently a Linux system with the mlx5 NIC always crashes upon + hibernation - suspend/resume. + + Add basic callbacks so the NIC could be suspended and resumed. + + Fixes: 9603b61de1ee ("mlx5: Move pci device handling from mlx5_ib to mlx5_core") + Tested-by: Dexuan Cui + Signed-off-by: Mark Bloch + Reviewed-by: Moshe Shemesh + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index f575f684ad78..880bc53d0b1b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1562,6 +1562,22 @@ static void shutdown(struct pci_dev *pdev) + mlx5_pci_disable_device(dev); + } + ++static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) ++{ ++ struct mlx5_core_dev *dev = pci_get_drvdata(pdev); ++ ++ mlx5_unload_one(dev, false); ++ ++ return 0; ++} ++ ++static int mlx5_resume(struct pci_dev *pdev) ++{ ++ struct mlx5_core_dev *dev = pci_get_drvdata(pdev); ++ ++ return mlx5_load_one(dev, false); ++} ++ + static const struct pci_device_id mlx5_core_pci_table[] = { + { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, + { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ +@@ -1605,6 +1621,8 @@ static struct pci_driver mlx5_core_driver = { + .id_table = mlx5_core_pci_table, + .probe = init_one, + .remove = remove_one, ++ .suspend = mlx5_suspend, ++ .resume = mlx5_resume, + .shutdown = shutdown, + .err_handler = &mlx5_err_handler, + .sriov_configure = mlx5_core_sriov_configure, +-- +2.13.6 + diff --git a/SOURCES/0278-netdrv-net-mlx5-Add-command-entry-handling-completio.patch b/SOURCES/0278-netdrv-net-mlx5-Add-command-entry-handling-completio.patch new file mode 100644 index 0000000..35d25af --- /dev/null +++ b/SOURCES/0278-netdrv-net-mlx5-Add-command-entry-handling-completio.patch @@ -0,0 +1,123 @@ +From c751e5890724beeb0b41017f54ac5509f50697e8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:31 -0400 +Subject: [PATCH 278/312] [netdrv] net/mlx5: Add command entry handling + completion + +Message-id: <20200601154102.25980-9-ahleihel@redhat.com> +Patchwork-id: 315713 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 08/39] net/mlx5: Add command entry handling completion +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit 17d00e839d3b592da9659c1977d45f85b77f986a +Author: Moshe Shemesh +Date: Fri Dec 27 07:01:53 2019 +0200 + + net/mlx5: Add command entry handling completion + + When FW response to commands is very slow and all command entries in + use are waiting for completion we can have a race where commands can get + timeout before they get out of the queue and handled. Timeout + completion on uninitialized command will cause releasing command's + buffers before accessing it for initialization and then we will get NULL + pointer exception while trying access it. It may also cause releasing + buffers of another command since we may have timeout completion before + even allocating entry index for this command. + Add entry handling completion to avoid this race. + + Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") + Signed-off-by: Moshe Shemesh + Signed-off-by: Eran Ben Elisha + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 14 ++++++++++++++ + include/linux/mlx5/driver.h | 3 +++ + 2 files changed, 17 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 23acec5a31d4..50783828d2e8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -861,6 +861,7 @@ static void cmd_work_handler(struct work_struct *work) + int alloc_ret; + int cmd_mode; + ++ complete(&ent->handling); + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { +@@ -978,6 +979,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) + struct mlx5_cmd *cmd = &dev->cmd; + int err; + ++ if (!wait_for_completion_timeout(&ent->handling, timeout) && ++ cancel_work_sync(&ent->work)) { ++ ent->ret = -ECANCELED; ++ goto out_err; ++ } + if (cmd->mode == CMD_MODE_POLLING || ent->polling) { + wait_for_completion(&ent->done); + } else if (!wait_for_completion_timeout(&ent->done, timeout)) { +@@ -985,12 +991,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) + mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + } + ++out_err: + err = ent->ret; + + if (err == -ETIMEDOUT) { + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); ++ } else if (err == -ECANCELED) { ++ mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", ++ mlx5_command_str(msg_to_opcode(ent->in)), ++ msg_to_opcode(ent->in)); + } + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", + err, deliv_status_to_str(ent->status), ent->status); +@@ -1026,6 +1037,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + ent->token = token; + ent->polling = force_polling; + ++ init_completion(&ent->handling); + if (!callback) + init_completion(&ent->done); + +@@ -1045,6 +1057,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out; ++ if (err == -ECANCELED) ++ goto out_free; + + ds = ent->ts2 - ent->ts1; + op = MLX5_GET(mbox_in, in->first.data, opcode); +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index 0d728007078c..df47476d6fca 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -761,6 +761,9 @@ struct mlx5_cmd_work_ent { + struct delayed_work cb_timeout_work; + void *context; + int idx; ++#ifndef __GENKSYMS__ ++ struct completion handling; ++#endif + struct completion done; + struct mlx5_cmd *cmd; + struct work_struct work; +-- +2.13.6 + diff --git a/SOURCES/0279-netdrv-net-mlx5-Fix-a-race-when-moving-command-inter.patch b/SOURCES/0279-netdrv-net-mlx5-Fix-a-race-when-moving-command-inter.patch new file mode 100644 index 0000000..0d90983 --- /dev/null +++ b/SOURCES/0279-netdrv-net-mlx5-Fix-a-race-when-moving-command-inter.patch @@ -0,0 +1,193 @@ +From f4df233157339147539324ac3e86b5ec44513e83 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:32 -0400 +Subject: [PATCH 279/312] [netdrv] net/mlx5: Fix a race when moving command + interface to events mode + +Message-id: <20200601154102.25980-10-ahleihel@redhat.com> +Patchwork-id: 315714 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 09/39] net/mlx5: Fix a race when moving command interface to events mode +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit d43b7007dbd1195a5b6b83213e49b1516aaf6f5e +Author: Eran Ben Elisha +Date: Wed Mar 18 21:44:32 2020 +0200 + + net/mlx5: Fix a race when moving command interface to events mode + + After driver creates (via FW command) an EQ for commands, the driver will + be informed on new commands completion by EQE. However, due to a race in + driver's internal command mode metadata update, some new commands will + still be miss-handled by driver as if we are in polling mode. Such commands + can get two non forced completion, leading to already freed command entry + access. + + CREATE_EQ command, that maps EQ to the command queue must be posted to the + command queue while it is empty and no other command should be posted. + + Add SW mechanism that once the CREATE_EQ command is about to be executed, + all other commands will return error without being sent to the FW. Allow + sending other commands only after successfully changing the driver's + internal command mode metadata. + We can safely return error to all other commands while creating the command + EQ, as all other commands might be sent from the user/application during + driver load. Application can rerun them later after driver's load was + finished. + + Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") + Signed-off-by: Eran Ben Elisha + Signed-off-by: Moshe Shemesh + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 35 ++++++++++++++++++++++++--- + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 +++ + include/linux/mlx5/driver.h | 8 ++++++ + 3 files changed, 42 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 50783828d2e8..2280bb7e748d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -848,6 +848,14 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); + static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg); + ++static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) ++{ ++ if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) ++ return true; ++ ++ return cmd->allowed_opcode == opcode; ++} ++ + static void cmd_work_handler(struct work_struct *work) + { + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); +@@ -914,7 +922,8 @@ static void cmd_work_handler(struct work_struct *work) + + /* Skip sending command to fw if internal error */ + if (pci_channel_offline(dev->pdev) || +- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { ++ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || ++ !opcode_allowed(&dev->cmd, ent->op)) { + u8 status = 0; + u32 drv_synd; + +@@ -1405,6 +1414,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev) + mlx5_cmdif_debugfs_init(dev); + } + ++void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) ++{ ++ struct mlx5_cmd *cmd = &dev->cmd; ++ int i; ++ ++ for (i = 0; i < cmd->max_reg_cmds; i++) ++ down(&cmd->sem); ++ down(&cmd->pages_sem); ++ ++ cmd->allowed_opcode = opcode; ++ ++ up(&cmd->pages_sem); ++ for (i = 0; i < cmd->max_reg_cmds; i++) ++ up(&cmd->sem); ++} ++ + static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) + { + struct mlx5_cmd *cmd = &dev->cmd; +@@ -1681,12 +1706,13 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int err; + u8 status = 0; + u32 drv_synd; ++ u16 opcode; + u8 token; + ++ opcode = MLX5_GET(mbox_in, in, opcode); + if (pci_channel_offline(dev->pdev) || +- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { +- u16 opcode = MLX5_GET(mbox_in, in, opcode); +- ++ dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || ++ !opcode_allowed(&dev->cmd, opcode)) { + err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); + MLX5_SET(mbox_out, out, status, status); + MLX5_SET(mbox_out, out, syndrome, drv_synd); +@@ -1988,6 +2014,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) + mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); + + cmd->mode = CMD_MODE_POLLING; ++ cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; + + create_msg_cache(dev); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +index cccea3a8eddd..ce6c621af043 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c +@@ -611,11 +611,13 @@ static int create_async_eqs(struct mlx5_core_dev *dev) + .nent = MLX5_NUM_CMD_EQE, + .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, + }; ++ mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ); + err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd"); + if (err) + goto err1; + + mlx5_cmd_use_events(dev); ++ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); + + param = (struct mlx5_eq_param) { + .irq_index = 0, +@@ -645,6 +647,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) + mlx5_cmd_use_polling(dev); + cleanup_async_eq(dev, &table->cmd_eq, "cmd"); + err1: ++ mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); + mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); + return err; + } +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index df47476d6fca..c2009064805a 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -300,6 +300,9 @@ struct mlx5_cmd { + */ + spinlock_t token_lock; + u8 token; ++#ifndef __GENKSYMS__ ++ u16 allowed_opcode; ++#endif + unsigned long bitmask; + char wq_name[MLX5_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; +@@ -895,10 +898,15 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); + } + ++enum { ++ CMD_ALLOWED_OPCODE_ALL, ++}; ++ + int mlx5_cmd_init(struct mlx5_core_dev *dev); + void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); + void mlx5_cmd_use_events(struct mlx5_core_dev *dev); + void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); ++void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); + + struct mlx5_async_ctx { + struct mlx5_core_dev *dev; +-- +2.13.6 + diff --git a/SOURCES/0280-netdrv-net-mlx5-Avoid-processing-commands-before-cmd.patch b/SOURCES/0280-netdrv-net-mlx5-Avoid-processing-commands-before-cmd.patch new file mode 100644 index 0000000..2625728 --- /dev/null +++ b/SOURCES/0280-netdrv-net-mlx5-Avoid-processing-commands-before-cmd.patch @@ -0,0 +1,149 @@ +From e1a165d97b043db33a647577da3cac42d9653202 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:33 -0400 +Subject: [PATCH 280/312] [netdrv] net/mlx5: Avoid processing commands before + cmdif is ready + +Message-id: <20200601154102.25980-11-ahleihel@redhat.com> +Patchwork-id: 315715 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 10/39] net/mlx5: Avoid processing commands before cmdif is ready +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit f7936ddd35d8b849daf0372770c7c9dbe7910fca +Author: Eran Ben Elisha +Date: Thu Mar 19 21:43:13 2020 +0200 + + net/mlx5: Avoid processing commands before cmdif is ready + + When driver is reloading during recovery flow, it can't get new commands + till command interface is up again. Otherwise we may get to null pointer + trying to access non initialized command structures. + + Add cmdif state to avoid processing commands while cmdif is not ready. + + Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") + Signed-off-by: Eran Ben Elisha + Signed-off-by: Moshe Shemesh + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 10 ++++++++++ + drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 ++++ + include/linux/mlx5/driver.h | 9 +++++++++ + 3 files changed, 23 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +index 2280bb7e748d..3745fcd9a99e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +@@ -923,6 +923,7 @@ static void cmd_work_handler(struct work_struct *work) + /* Skip sending command to fw if internal error */ + if (pci_channel_offline(dev->pdev) || + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || ++ cmd->state != MLX5_CMDIF_STATE_UP || + !opcode_allowed(&dev->cmd, ent->op)) { + u8 status = 0; + u32 drv_synd; +@@ -1712,6 +1713,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + opcode = MLX5_GET(mbox_in, in, opcode); + if (pci_channel_offline(dev->pdev) || + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || ++ dev->cmd.state != MLX5_CMDIF_STATE_UP || + !opcode_allowed(&dev->cmd, opcode)) { + err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); + MLX5_SET(mbox_out, out, status, status); +@@ -1977,6 +1979,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) + goto err_free_page; + } + ++ cmd->state = MLX5_CMDIF_STATE_DOWN; + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; +@@ -2054,3 +2057,10 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) + dma_pool_destroy(cmd->pool); + } + EXPORT_SYMBOL(mlx5_cmd_cleanup); ++ ++void mlx5_cmd_set_state(struct mlx5_core_dev *dev, ++ enum mlx5_cmdif_state cmdif_state) ++{ ++ dev->cmd.state = cmdif_state; ++} ++EXPORT_SYMBOL(mlx5_cmd_set_state); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index 880bc53d0b1b..fdc0c0f7da96 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -993,6 +993,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) + goto err_cmd_cleanup; + } + ++ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); ++ + err = mlx5_core_enable_hca(dev, 0); + if (err) { + mlx5_core_err(dev, "enable hca failed\n"); +@@ -1056,6 +1058,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) + err_disable_hca: + mlx5_core_disable_hca(dev, 0); + err_cmd_cleanup: ++ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); + mlx5_cmd_cleanup(dev); + + return err; +@@ -1073,6 +1076,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) + } + mlx5_reclaim_startup_pages(dev); + mlx5_core_disable_hca(dev, 0); ++ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); + mlx5_cmd_cleanup(dev); + + return 0; +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h +index c2009064805a..013786924596 100644 +--- a/include/linux/mlx5/driver.h ++++ b/include/linux/mlx5/driver.h +@@ -228,6 +228,12 @@ struct mlx5_bfreg_info { + u32 num_dyn_bfregs; + }; + ++enum mlx5_cmdif_state { ++ MLX5_CMDIF_STATE_UNINITIALIZED, ++ MLX5_CMDIF_STATE_UP, ++ MLX5_CMDIF_STATE_DOWN, ++}; ++ + struct mlx5_cmd_first { + __be32 data[4]; + }; +@@ -301,6 +307,7 @@ struct mlx5_cmd { + spinlock_t token_lock; + u8 token; + #ifndef __GENKSYMS__ ++ u8 /* enum mlx5_cmdif_state */ state; + u16 allowed_opcode; + #endif + unsigned long bitmask; +@@ -904,6 +911,8 @@ enum { + + int mlx5_cmd_init(struct mlx5_core_dev *dev); + void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); ++void mlx5_cmd_set_state(struct mlx5_core_dev *dev, ++ enum mlx5_cmdif_state cmdif_state); + void mlx5_cmd_use_events(struct mlx5_core_dev *dev); + void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); + void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); +-- +2.13.6 + diff --git a/SOURCES/0281-netdrv-net-mlx5e-Fix-allowed-tc-redirect-merged-eswi.patch b/SOURCES/0281-netdrv-net-mlx5e-Fix-allowed-tc-redirect-merged-eswi.patch new file mode 100644 index 0000000..acea16d --- /dev/null +++ b/SOURCES/0281-netdrv-net-mlx5e-Fix-allowed-tc-redirect-merged-eswi.patch @@ -0,0 +1,169 @@ +From 117363bb0ad6ef81b058a6406b657cb9d78c5578 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:34 -0400 +Subject: [PATCH 281/312] [netdrv] net/mlx5e: Fix allowed tc redirect merged + eswitch offload cases + +Message-id: <20200601154102.25980-12-ahleihel@redhat.com> +Patchwork-id: 315716 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 11/39] net/mlx5e: Fix allowed tc redirect merged eswitch offload cases +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit 321348475d544aa6705dcfac2135deeccb8dc0bb +Author: Maor Dickman +Date: Thu Apr 23 15:16:17 2020 +0300 + + net/mlx5e: Fix allowed tc redirect merged eswitch offload cases + + After changing the parent_id to be the same for both NICs of same + The cited commit wrongly allow offload of tc redirect flows from + VF to uplink and vice versa when devcies are on different eswitch, + these cases aren't supported by HW. + + Disallow the above offloads when devcies are on different eswitch + and VF LAG is not configured. + + Fixes: f6dc1264f1c0 ("net/mlx5e: Disallow tc redirect offload cases we don't support") + Signed-off-by: Maor Dickman + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 8 ++--- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 7 ++++- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 40 +++++++++++++++++++----- + 3 files changed, 41 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 00213299e616..681003317271 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1429,13 +1429,9 @@ bool mlx5e_eswitch_uplink_rep(struct net_device *netdev) + return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; + } + +-bool mlx5e_eswitch_rep(struct net_device *netdev) ++bool mlx5e_eswitch_vf_rep(struct net_device *netdev) + { +- if (netdev->netdev_ops == &mlx5e_netdev_ops_rep || +- netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep) +- return true; +- +- return false; ++ return netdev->netdev_ops == &mlx5e_netdev_ops_rep; + } + + static void mlx5e_build_rep_params(struct net_device *netdev) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +index eccf61e1ac96..425b151ca032 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +@@ -209,8 +209,13 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, + + void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); + +-bool mlx5e_eswitch_rep(struct net_device *netdev); ++bool mlx5e_eswitch_vf_rep(struct net_device *netdev); + bool mlx5e_eswitch_uplink_rep(struct net_device *netdev); ++static inline bool mlx5e_eswitch_rep(struct net_device *netdev) ++{ ++ return mlx5e_eswitch_vf_rep(netdev) || ++ mlx5e_eswitch_uplink_rep(netdev); ++} + + #else /* CONFIG_MLX5_ESWITCH */ + static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index c0e06114d328..bc8878b82078 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3073,6 +3073,11 @@ static bool actions_match_supported(struct mlx5e_priv *priv, + return true; + } + ++static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) ++{ ++ return priv->mdev == peer_priv->mdev; ++} ++ + static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) + { + struct mlx5_core_dev *fmdev, *pmdev; +@@ -3290,7 +3295,7 @@ static inline int hash_encap_info(struct encap_key *key) + } + + +-static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, ++static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, + struct net_device *peer_netdev) + { + struct mlx5e_priv *peer_priv; +@@ -3298,13 +3303,11 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, + peer_priv = netdev_priv(peer_netdev); + + return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && +- mlx5e_eswitch_rep(priv->netdev) && +- mlx5e_eswitch_rep(peer_netdev) && ++ mlx5e_eswitch_vf_rep(priv->netdev) && ++ mlx5e_eswitch_vf_rep(peer_netdev) && + same_hw_devs(priv, peer_priv)); + } + +- +- + bool mlx5e_encap_take(struct mlx5e_encap_entry *e) + { + return refcount_inc_not_zero(&e->refcnt); +@@ -3574,14 +3577,37 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv, + return err; + } + ++static bool same_hw_reps(struct mlx5e_priv *priv, ++ struct net_device *peer_netdev) ++{ ++ struct mlx5e_priv *peer_priv; ++ ++ peer_priv = netdev_priv(peer_netdev); ++ ++ return mlx5e_eswitch_rep(priv->netdev) && ++ mlx5e_eswitch_rep(peer_netdev) && ++ same_hw_devs(priv, peer_priv); ++} ++ ++static bool is_lag_dev(struct mlx5e_priv *priv, ++ struct net_device *peer_netdev) ++{ ++ return ((mlx5_lag_is_sriov(priv->mdev) || ++ mlx5_lag_is_multipath(priv->mdev)) && ++ same_hw_reps(priv, peer_netdev)); ++} ++ + bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, + struct net_device *out_dev) + { +- if (is_merged_eswitch_dev(priv, out_dev)) ++ if (is_merged_eswitch_vfs(priv, out_dev)) ++ return true; ++ ++ if (is_lag_dev(priv, out_dev)) + return true; + + return mlx5e_eswitch_rep(out_dev) && +- same_hw_devs(priv, netdev_priv(out_dev)); ++ same_port_devs(priv, netdev_priv(out_dev)); + } + + static bool is_duplicated_output_device(struct net_device *dev, +-- +2.13.6 + diff --git a/SOURCES/0282-netdrv-net-mlx5e-kTLS-Destroy-key-object-after-destr.patch b/SOURCES/0282-netdrv-net-mlx5e-kTLS-Destroy-key-object-after-destr.patch new file mode 100644 index 0000000..3dbb151 --- /dev/null +++ b/SOURCES/0282-netdrv-net-mlx5e-kTLS-Destroy-key-object-after-destr.patch @@ -0,0 +1,59 @@ +From 28b45723942ac89a12419ce9c85a6bdb297d01a4 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:35 -0400 +Subject: [PATCH 282/312] [netdrv] net/mlx5e: kTLS, Destroy key object after + destroying the TIS + +Message-id: <20200601154102.25980-13-ahleihel@redhat.com> +Patchwork-id: 315717 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 12/39] net/mlx5e: kTLS, Destroy key object after destroying the TIS +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit 16736e11f43b80a38f98f6add54fab3b8c297df3 +Author: Tariq Toukan +Date: Mon Apr 27 16:56:59 2020 +0300 + + net/mlx5e: kTLS, Destroy key object after destroying the TIS + + The TLS TIS object contains the dek/key ID. + By destroying the key first, the TIS would contain an invalid + non-existing key ID. + Reverse the destroy order, this also acheives the desired assymetry + between the destroy and the create flows. + + Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") + Signed-off-by: Tariq Toukan + Reviewed-by: Boris Pismenny + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +index 46725cd743a3..7d1985fa0d4f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +@@ -69,8 +69,8 @@ static void mlx5e_ktls_del(struct net_device *netdev, + struct mlx5e_ktls_offload_context_tx *tx_priv = + mlx5e_get_ktls_tx_priv_ctx(tls_ctx); + +- mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); + mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); ++ mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); + kvfree(tx_priv); + } + +-- +2.13.6 + diff --git a/SOURCES/0283-netdrv-net-mlx5e-Fix-inner-tirs-handling.patch b/SOURCES/0283-netdrv-net-mlx5e-Fix-inner-tirs-handling.patch new file mode 100644 index 0000000..cf01703 --- /dev/null +++ b/SOURCES/0283-netdrv-net-mlx5e-Fix-inner-tirs-handling.patch @@ -0,0 +1,157 @@ +From ed01318b69be279cdb29591d04d8751d28e0b5b8 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:36 -0400 +Subject: [PATCH 283/312] [netdrv] net/mlx5e: Fix inner tirs handling + +Message-id: <20200601154102.25980-14-ahleihel@redhat.com> +Patchwork-id: 315719 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 13/39] net/mlx5e: Fix inner tirs handling +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit a16b8e0dcf7043bee46174bed0553cc9e36b63a5 +Author: Roi Dayan +Date: Thu Apr 30 09:16:01 2020 +0300 + + net/mlx5e: Fix inner tirs handling + + In the cited commit inner_tirs argument was added to create and destroy + inner tirs, and no indication was added to mlx5e_modify_tirs_hash() + function. In order to have a consistent handling, use + inner_indir_tir[0].tirn in tirs destroy/modify function as an indication + to whether inner tirs are created. + Inner tirs are not created for representors and before this commit, + a call to mlx5e_modify_tirs_hash() was sending HW commands to + modify non-existent inner tirs. + + Fixes: 46dc933cee82 ("net/mlx5e: Provide explicit directive if to create inner indirect tirs") + Signed-off-by: Roi Dayan + Reviewed-by: Vlad Buslov + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 +++++++----- + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 4 ++-- + 4 files changed, 12 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 09c9f8c0ef48..ab0985099444 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1109,7 +1109,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); + int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); + + int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); +-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); ++void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); + + int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); + void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 15102b5b8d4a..5c347f179c01 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -2718,7 +2718,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); + } + +- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) ++ /* Verify inner tirs resources allocated */ ++ if (!priv->inner_indir_tir[0].tirn) + return; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { +@@ -3410,14 +3411,15 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) + return err; + } + +-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) ++void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) + { + int i; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); + +- if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) ++ /* Verify inner tirs resources allocated */ ++ if (!priv->inner_indir_tir[0].tirn) + return; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) +@@ -5130,7 +5132,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + err_destroy_indirect_tirs: +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + err_destroy_indirect_rqts: +@@ -5149,7 +5151,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) + mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); + mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index 681003317271..b228762357ee 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -1688,7 +1688,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + err_destroy_indirect_tirs: +- mlx5e_destroy_indirect_tirs(priv, false); ++ mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + err_destroy_indirect_rqts: +@@ -1706,7 +1706,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) + mlx5e_destroy_rep_root_ft(priv); + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); +- mlx5e_destroy_indirect_tirs(priv, false); ++ mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +index 673aaa815f57..505cf6eeae25 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +@@ -396,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); + err_destroy_indirect_tirs: +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + err_destroy_indirect_rqts: +@@ -412,7 +412,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) + { + mlx5i_destroy_flow_steering(priv); + mlx5e_destroy_direct_tirs(priv, priv->direct_tir); +- mlx5e_destroy_indirect_tirs(priv, true); ++ mlx5e_destroy_indirect_tirs(priv); + mlx5e_destroy_direct_rqts(priv, priv->direct_tir); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); +-- +2.13.6 + diff --git a/SOURCES/0284-netdrv-net-mlx5-Fix-memory-leak-in-mlx5_events_init.patch b/SOURCES/0284-netdrv-net-mlx5-Fix-memory-leak-in-mlx5_events_init.patch new file mode 100644 index 0000000..4a1cc63 --- /dev/null +++ b/SOURCES/0284-netdrv-net-mlx5-Fix-memory-leak-in-mlx5_events_init.patch @@ -0,0 +1,59 @@ +From 45f622a9a7b5f51f041e8871ebc9ab21bff2dfaa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:37 -0400 +Subject: [PATCH 284/312] [netdrv] net/mlx5: Fix memory leak in + mlx5_events_init + +Message-id: <20200601154102.25980-15-ahleihel@redhat.com> +Patchwork-id: 315718 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 14/39] net/mlx5: Fix memory leak in mlx5_events_init +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit df14ad1eccb04a4a28c90389214dbacab085b244 +Author: Moshe Shemesh +Date: Wed Apr 29 23:56:58 2020 +0300 + + net/mlx5: Fix memory leak in mlx5_events_init + + Fix memory leak in mlx5_events_init(), in case + create_single_thread_workqueue() fails, events + struct should be freed. + + Fixes: 5d3c537f9070 ("net/mlx5: Handle event of power detection in the PCIE slot") + Signed-off-by: Moshe Shemesh + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/events.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c +index 8bcf3426b9c6..3ce17c3d7a00 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c +@@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev) + events->dev = dev; + dev->priv.events = events; + events->wq = create_singlethread_workqueue("mlx5_events"); +- if (!events->wq) ++ if (!events->wq) { ++ kfree(events); + return -ENOMEM; ++ } + INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); + + return 0; +-- +2.13.6 + diff --git a/SOURCES/0285-netdrv-net-mlx5-Fix-cleaning-unmanaged-flow-tables.patch b/SOURCES/0285-netdrv-net-mlx5-Fix-cleaning-unmanaged-flow-tables.patch new file mode 100644 index 0000000..b0f225d --- /dev/null +++ b/SOURCES/0285-netdrv-net-mlx5-Fix-cleaning-unmanaged-flow-tables.patch @@ -0,0 +1,77 @@ +From db6d49db1b50825fc1d5efde0d113fea05ab412a Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:38 -0400 +Subject: [PATCH 285/312] [netdrv] net/mlx5: Fix cleaning unmanaged flow tables + +Message-id: <20200601154102.25980-16-ahleihel@redhat.com> +Patchwork-id: 315720 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 15/39] net/mlx5: Fix cleaning unmanaged flow tables +Bugzilla: 1842258 1840408 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Bugzilla: http://bugzilla.redhat.com/1840408 +Upstream: v5.7-rc7 + +commit aee37f3d940ca732df71c3df49347bccaafc0b24 +Author: Roi Dayan +Date: Mon May 11 16:32:09 2020 +0300 + + net/mlx5: Fix cleaning unmanaged flow tables + + Unmanaged flow tables doesn't have a parent and tree_put_node() + assume there is always a parent if cleaning is needed. fix that. + + Fixes: 5281a0c90919 ("net/mlx5: fs_core: Introduce unmanaged flow tables") + Signed-off-by: Roi Dayan + Reviewed-by: Mark Bloch + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index f44e366ecfa8..6927d9a1c910 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -323,14 +323,13 @@ static void tree_put_node(struct fs_node *node, bool locked) + if (node->del_hw_func) + node->del_hw_func(node); + if (parent_node) { +- /* Only root namespace doesn't have parent and we just +- * need to free its node. +- */ + down_write_ref_node(parent_node, locked); + list_del_init(&node->list); + if (node->del_sw_func) + node->del_sw_func(node); + up_write_ref_node(parent_node, locked); ++ } else if (node->del_sw_func) { ++ node->del_sw_func(node); + } else { + kfree(node); + } +@@ -447,8 +446,10 @@ static void del_sw_flow_table(struct fs_node *node) + fs_get_obj(ft, node); + + rhltable_destroy(&ft->fgs_hash); +- fs_get_obj(prio, ft->node.parent); +- prio->num_ft--; ++ if (ft->node.parent) { ++ fs_get_obj(prio, ft->node.parent); ++ prio->num_ft--; ++ } + kfree(ft); + } + +-- +2.13.6 + diff --git a/SOURCES/0286-netdrv-net-mlx5-Don-t-maintain-a-case-of-del_sw_func.patch b/SOURCES/0286-netdrv-net-mlx5-Don-t-maintain-a-case-of-del_sw_func.patch new file mode 100644 index 0000000..de9a34d --- /dev/null +++ b/SOURCES/0286-netdrv-net-mlx5-Don-t-maintain-a-case-of-del_sw_func.patch @@ -0,0 +1,87 @@ +From fb95b280014de1cc4cb10e5743b83b00817ac7be Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:39 -0400 +Subject: [PATCH 286/312] [netdrv] net/mlx5: Don't maintain a case of + del_sw_func being null + +Message-id: <20200601154102.25980-17-ahleihel@redhat.com> +Patchwork-id: 315721 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 16/39] net/mlx5: Don't maintain a case of del_sw_func being null +Bugzilla: 1842258 1840408 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Bugzilla: http://bugzilla.redhat.com/1840408 +Upstream: v5.7-rc7 + +commit 6eb7a268a99bad8346d4baa148a14456d061c1c3 +Author: Roi Dayan +Date: Mon May 11 16:37:11 2020 +0300 + + net/mlx5: Don't maintain a case of del_sw_func being null + + Add del_sw_func cb for root ns. Now there is no need to + maintain a case of del_sw_func being null when freeing the node. + + Fixes: 2cc43b494a6c ("net/mlx5_core: Managing root flow table") + Signed-off-by: Roi Dayan + Reviewed-by: Mark Bloch + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 17 +++++++++-------- + 1 file changed, 9 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 6927d9a1c910..6343d5df787d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -325,14 +325,10 @@ static void tree_put_node(struct fs_node *node, bool locked) + if (parent_node) { + down_write_ref_node(parent_node, locked); + list_del_init(&node->list); +- if (node->del_sw_func) +- node->del_sw_func(node); +- up_write_ref_node(parent_node, locked); +- } else if (node->del_sw_func) { +- node->del_sw_func(node); +- } else { +- kfree(node); + } ++ node->del_sw_func(node); ++ if (parent_node) ++ up_write_ref_node(parent_node, locked); + node = NULL; + } + if (!node && parent_node) +@@ -2360,6 +2356,11 @@ static int init_root_tree(struct mlx5_flow_steering *steering, + return 0; + } + ++static void del_sw_root_ns(struct fs_node *node) ++{ ++ kfree(node); ++} ++ + static struct mlx5_flow_root_namespace + *create_root_ns(struct mlx5_flow_steering *steering, + enum fs_flow_table_type table_type) +@@ -2386,7 +2387,7 @@ static struct mlx5_flow_root_namespace + ns = &root_ns->ns; + fs_init_namespace(ns); + mutex_init(&root_ns->chain_lock); +- tree_init_node(&ns->node, NULL, NULL); ++ tree_init_node(&ns->node, NULL, del_sw_root_ns); + tree_add_node(&ns->node, NULL); + + return root_ns; +-- +2.13.6 + diff --git a/SOURCES/0287-netdrv-net-mlx5-Annotate-mutex-destroy-for-root-ns.patch b/SOURCES/0287-netdrv-net-mlx5-Annotate-mutex-destroy-for-root-ns.patch new file mode 100644 index 0000000..102c464 --- /dev/null +++ b/SOURCES/0287-netdrv-net-mlx5-Annotate-mutex-destroy-for-root-ns.patch @@ -0,0 +1,58 @@ +From 35c46ad4062e1cffb1bc2df6e44b4cc75000e119 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:40 -0400 +Subject: [PATCH 287/312] [netdrv] net/mlx5: Annotate mutex destroy for root ns + +Message-id: <20200601154102.25980-18-ahleihel@redhat.com> +Patchwork-id: 315722 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 17/39] net/mlx5: Annotate mutex destroy for root ns +Bugzilla: 1842258 1840408 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Bugzilla: http://bugzilla.redhat.com/1840408 +Upstream: v5.7-rc7 + +commit 9ca415399dae133b00273a4283ef31d003a6818d +Author: Roi Dayan +Date: Thu May 14 23:44:38 2020 +0300 + + net/mlx5: Annotate mutex destroy for root ns + + Invoke mutex_destroy() to catch any errors. + + Fixes: 2cc43b494a6c ("net/mlx5_core: Managing root flow table") + Signed-off-by: Roi Dayan + Reviewed-by: Mark Bloch + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 6343d5df787d..cbf8126242fc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2358,6 +2358,12 @@ static int init_root_tree(struct mlx5_flow_steering *steering, + + static void del_sw_root_ns(struct fs_node *node) + { ++ struct mlx5_flow_root_namespace *root_ns; ++ struct mlx5_flow_namespace *ns; ++ ++ fs_get_obj(ns, node); ++ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns); ++ mutex_destroy(&root_ns->chain_lock); + kfree(node); + } + +-- +2.13.6 + diff --git a/SOURCES/0288-netdrv-net-mlx5e-Update-netdev-txq-on-completions-du.patch b/SOURCES/0288-netdrv-net-mlx5e-Update-netdev-txq-on-completions-du.patch new file mode 100644 index 0000000..9d7ab7f --- /dev/null +++ b/SOURCES/0288-netdrv-net-mlx5e-Update-netdev-txq-on-completions-du.patch @@ -0,0 +1,77 @@ +From cf5f57fff37c1669cb2813543f8270d884088cba Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:41 -0400 +Subject: [PATCH 288/312] [netdrv] net/mlx5e: Update netdev txq on completions + during closure + +Message-id: <20200601154102.25980-19-ahleihel@redhat.com> +Patchwork-id: 315724 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 18/39] net/mlx5e: Update netdev txq on completions during closure +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit 5e911e2c06bd8c17df29147a5e2d4b17fafda024 +Author: Moshe Shemesh +Date: Tue Apr 7 17:38:28 2020 +0300 + + net/mlx5e: Update netdev txq on completions during closure + + On sq closure when we free its descriptors, we should also update netdev + txq on completions which would not arrive. Otherwise if we reopen sqs + and attach them back, for example on fw fatal recovery flow, we may get + tx timeout. + + Fixes: 29429f3300a3 ("net/mlx5e: Timeout if SQ doesn't flush during close") + Signed-off-by: Moshe Shemesh + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +index bb73d9ea131e..a0ffd97e8319 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +@@ -538,10 +538,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) + void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) + { + struct mlx5e_tx_wqe_info *wi; ++ u32 dma_fifo_cc, nbytes = 0; ++ u16 ci, sqcc, npkts = 0; + struct sk_buff *skb; +- u32 dma_fifo_cc; +- u16 sqcc; +- u16 ci; + int i; + + sqcc = sq->cc; +@@ -566,11 +565,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) + } + + dev_kfree_skb_any(skb); ++ npkts++; ++ nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + } + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; ++ ++ netdev_tx_completed_queue(sq->txq, npkts, nbytes); + } + + #ifdef CONFIG_MLX5_CORE_IPOIB +-- +2.13.6 + diff --git a/SOURCES/0289-netdrv-net-mlx5e-CT-Correctly-get-flow-rule.patch b/SOURCES/0289-netdrv-net-mlx5e-CT-Correctly-get-flow-rule.patch new file mode 100644 index 0000000..13f05df --- /dev/null +++ b/SOURCES/0289-netdrv-net-mlx5e-CT-Correctly-get-flow-rule.patch @@ -0,0 +1,88 @@ +From 5fc5946d699899ae403c7bb0fa6ff621d8d5b968 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:42 -0400 +Subject: [PATCH 289/312] [netdrv] net/mlx5e: CT: Correctly get flow rule + +Message-id: <20200601154102.25980-20-ahleihel@redhat.com> +Patchwork-id: 315723 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 19/39] net/mlx5e: CT: Correctly get flow rule +Bugzilla: 1842258 1840408 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Bugzilla: http://bugzilla.redhat.com/1840408 +Upstream: v5.7-rc7 + +commit d37bd5e81ed0d58f0ebe2e01658c26722e0c033e +Author: Roi Dayan +Date: Mon May 18 20:21:11 2020 +0300 + + net/mlx5e: CT: Correctly get flow rule + + The correct way is to us the flow_cls_offload_flow_rule() wrapper + instead of f->rule directly. + + Fixes: 4c3844d9e97e ("net/mlx5e: CT: Introduce connection tracking") + Signed-off-by: Roi Dayan + Reviewed-by: Oz Shlomo + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 5 +++-- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h | 4 +++- + 2 files changed, 6 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 8f94a4dde2bf..5ad72232dce9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -713,6 +713,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + struct netlink_ext_ack *extack) + { + struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); ++ struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector_key_ct *mask, *key; + bool trk, est, untrk, unest, new; + u32 ctstate = 0, ctstate_mask = 0; +@@ -720,7 +721,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + u16 ct_state, ct_state_mask; + struct flow_match_ct match; + +- if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) + return 0; + + if (!ct_priv) { +@@ -729,7 +730,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + return -EOPNOTSUPP; + } + +- flow_rule_match_ct(f->rule, &match); ++ flow_rule_match_ct(rule, &match); + + key = match.key; + mask = match.mask; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +index 091d305b633e..626f6c04882e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +@@ -130,7 +130,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, + struct flow_cls_offload *f, + struct netlink_ext_ack *extack) + { +- if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) ++ struct flow_rule *rule = flow_cls_offload_flow_rule(f); ++ ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) + return 0; + + NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled."); +-- +2.13.6 + diff --git a/SOURCES/0290-netdrv-net-mlx5-Fix-error-flow-in-case-of-function_s.patch b/SOURCES/0290-netdrv-net-mlx5-Fix-error-flow-in-case-of-function_s.patch new file mode 100644 index 0000000..ea7e5d3 --- /dev/null +++ b/SOURCES/0290-netdrv-net-mlx5-Fix-error-flow-in-case-of-function_s.patch @@ -0,0 +1,64 @@ +From ff053ba86c5373334906f06a0208695dc38a6006 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:43 -0400 +Subject: [PATCH 290/312] [netdrv] net/mlx5: Fix error flow in case of + function_setup failure + +Message-id: <20200601154102.25980-21-ahleihel@redhat.com> +Patchwork-id: 315725 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 20/39] net/mlx5: Fix error flow in case of function_setup failure +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc7 + +commit 4f7400d5cbaef676e00cdffb0565bf731c6bb09e +Author: Shay Drory +Date: Wed May 6 14:52:04 2020 +0300 + + net/mlx5: Fix error flow in case of function_setup failure + + Currently, if an error occurred during mlx5_function_setup(), we + keep dev->state as DEVICE_STATE_UP. + Fixing it by adding a goto label. + + Fixes: e161105e58da ("net/mlx5: Function setup/teardown procedures") + Signed-off-by: Shay Drory + Reviewed-by: Moshe Shemesh + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/main.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c +index fdc0c0f7da96..79e4bfeae70b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +@@ -1212,7 +1212,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + + err = mlx5_function_setup(dev, boot); + if (err) +- goto out; ++ goto err_function; + + if (boot) { + err = mlx5_init_once(dev); +@@ -1250,6 +1250,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) + mlx5_cleanup_once(dev); + function_teardown: + mlx5_function_teardown(dev, boot); ++err_function: + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + mutex_unlock(&dev->intf_state_mutex); + +-- +2.13.6 + diff --git a/SOURCES/0291-netdrv-net-mlx5e-IPoIB-Enable-loopback-packets-for-I.patch b/SOURCES/0291-netdrv-net-mlx5e-IPoIB-Enable-loopback-packets-for-I.patch new file mode 100644 index 0000000..77acbfd --- /dev/null +++ b/SOURCES/0291-netdrv-net-mlx5e-IPoIB-Enable-loopback-packets-for-I.patch @@ -0,0 +1,183 @@ +From 03a1dd382c4e2dee654161b656f6ff2528e198aa Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:44 -0400 +Subject: [PATCH 291/312] [netdrv] net/mlx5e: IPoIB, Enable loopback packets + for IPoIB interfaces + +Message-id: <20200601154102.25980-22-ahleihel@redhat.com> +Patchwork-id: 315729 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 21/39] net/mlx5e: IPoIB, Enable loopback packets for IPoIB interfaces +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 80639b199c9ca87444da218ba0e7511946452dd4 +Author: Erez Shitrit +Date: Sun May 3 13:01:37 2020 +0300 + + net/mlx5e: IPoIB, Enable loopback packets for IPoIB interfaces + + Enable loopback of unicast and multicast traffic for IPoIB enhanced + mode. + This will allow interfaces with the same pkey to communicate between + them e.g cloned interfaces that located in different namespaces. + + Signed-off-by: Erez Shitrit + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 ++- + drivers/net/ethernet/mellanox/mlx5/core/en_common.c | 13 ++++++++++--- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 7 ++++++- + drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 2 ++ + drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | 2 +- + 7 files changed, 24 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index ab0985099444..9832ac9a55dc 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1097,7 +1097,8 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir); + int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); + void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); +-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); ++int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, ++ bool enable_mc_lb); + + /* common netdev helpers */ + void mlx5e_create_q_counters(struct mlx5e_priv *priv); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +index f7890e0ce96c..03ca68708f1a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +@@ -142,10 +142,12 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) + memset(res, 0, sizeof(*res)); + } + +-int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) ++int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, ++ bool enable_mc_lb) + { + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_tir *tir; ++ u8 lb_flags = 0; + int err = 0; + u32 tirn = 0; + int inlen; +@@ -159,8 +161,13 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) + } + + if (enable_uc_lb) +- MLX5_SET(modify_tir_in, in, ctx.self_lb_block, +- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST); ++ lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; ++ ++ if (enable_mc_lb) ++ lb_flags |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST; ++ ++ if (lb_flags) ++ MLX5_SET(modify_tir_in, in, ctx.self_lb_block, lb_flags); + + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index 5c347f179c01..e0bd700634c8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -5235,7 +5235,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) + + int mlx5e_update_nic_rx(struct mlx5e_priv *priv) + { +- return mlx5e_refresh_tirs(priv, false); ++ return mlx5e_refresh_tirs(priv, false, false); + } + + static const struct mlx5e_profile mlx5e_nic_profile = { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +index bbff8d8ded76..46790216ce86 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +@@ -234,7 +234,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, + return err; + } + +- err = mlx5e_refresh_tirs(priv, true); ++ err = mlx5e_refresh_tirs(priv, true, false); + if (err) + goto out; + +@@ -263,7 +263,7 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, + mlx5_nic_vport_update_local_lb(priv->mdev, false); + + dev_remove_pack(&lbtp->pt); +- mlx5e_refresh_tirs(priv, false); ++ mlx5e_refresh_tirs(priv, false, false); + } + + #define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200)) +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +index 505cf6eeae25..e4253ba7a861 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +@@ -256,6 +256,11 @@ void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp * + mlx5_core_destroy_qp(mdev, qp); + } + ++int mlx5i_update_nic_rx(struct mlx5e_priv *priv) ++{ ++ return mlx5e_refresh_tirs(priv, true, true); ++} ++ + int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) + { + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; +@@ -450,7 +455,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = { + .cleanup_rx = mlx5i_cleanup_rx, + .enable = NULL, /* mlx5i_enable */ + .disable = NULL, /* mlx5i_disable */ +- .update_rx = mlx5e_update_nic_rx, ++ .update_rx = mlx5i_update_nic_rx, + .update_stats = NULL, /* mlx5i_update_stats */ + .update_carrier = NULL, /* no HW update in IB link */ + .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +index c87962cab921..99ad77ed1010 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +@@ -92,6 +92,8 @@ int mlx5i_init(struct mlx5_core_dev *mdev, + void *ppriv); + void mlx5i_cleanup(struct mlx5e_priv *priv); + ++int mlx5i_update_nic_rx(struct mlx5e_priv *priv); ++ + /* Get child interface nic profile */ + const struct mlx5e_profile *mlx5i_pkey_get_profile(void); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +index 96e64187c089..1a25ef296021 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +@@ -350,7 +350,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { + .cleanup_rx = mlx5i_pkey_cleanup_rx, + .enable = NULL, + .disable = NULL, +- .update_rx = mlx5e_update_nic_rx, ++ .update_rx = mlx5i_update_nic_rx, + .update_stats = NULL, + .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, + .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ +-- +2.13.6 + diff --git a/SOURCES/0292-netdrv-net-mlx5e-IPoIB-Drop-multicast-packets-that-t.patch b/SOURCES/0292-netdrv-net-mlx5e-IPoIB-Drop-multicast-packets-that-t.patch new file mode 100644 index 0000000..8d8c1c5 --- /dev/null +++ b/SOURCES/0292-netdrv-net-mlx5e-IPoIB-Drop-multicast-packets-that-t.patch @@ -0,0 +1,91 @@ +From e0043c18d6c5280d8dbc999700454fafee04baec Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:45 -0400 +Subject: [PATCH 292/312] [netdrv] net/mlx5e: IPoIB, Drop multicast packets + that this interface sent + +Message-id: <20200601154102.25980-23-ahleihel@redhat.com> +Patchwork-id: 315726 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 22/39] net/mlx5e: IPoIB, Drop multicast packets that this interface sent +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 8b46d424a743ddfef8056d5167f13ee7ebd1dcad +Author: Erez Shitrit +Date: Mon May 4 11:46:25 2020 +0300 + + net/mlx5e: IPoIB, Drop multicast packets that this interface sent + + After enabled loopback packets for IPoIB, we need to drop these packets + that this HCA has replicated and came back to the same interface that + sent them. + + Fixes: 4c6c615e3f30 ("net/mlx5e: IPoIB, Add PKEY child interface nic profile") + Signed-off-by: Erez Shitrit + Reviewed-by: Alex Vesker + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +index 7aad59376ff4..aee120af6e1f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +@@ -1496,6 +1496,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) + + #ifdef CONFIG_MLX5_CORE_IPOIB + ++#define MLX5_IB_GRH_SGID_OFFSET 8 + #define MLX5_IB_GRH_DGID_OFFSET 24 + #define MLX5_GID_SIZE 16 + +@@ -1509,6 +1510,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, + struct net_device *netdev; + struct mlx5e_priv *priv; + char *pseudo_header; ++ u32 flags_rqpn; + u32 qpn; + u8 *dgid; + u8 g; +@@ -1530,7 +1532,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, + tstamp = &priv->tstamp; + stats = &priv->channel_stats[rq->ix].rq; + +- g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; ++ flags_rqpn = be32_to_cpu(cqe->flags_rqpn); ++ g = (flags_rqpn >> 28) & 3; + dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; + if ((!g) || dgid[0] != 0xff) + skb->pkt_type = PACKET_HOST; +@@ -1539,9 +1542,15 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, + else + skb->pkt_type = PACKET_MULTICAST; + +- /* TODO: IB/ipoib: Allow mcast packets from other VFs +- * 68996a6e760e5c74654723eeb57bf65628ae87f4 ++ /* Drop packets that this interface sent, ie multicast packets ++ * that the HCA has replicated. + */ ++ if (g && (qpn == (flags_rqpn & 0xffffff)) && ++ (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET, ++ MLX5_GID_SIZE) == 0)) { ++ skb->dev = NULL; ++ return; ++ } + + skb_pull(skb, MLX5_IB_GRH_BYTES); + +-- +2.13.6 + diff --git a/SOURCES/0293-netdrv-net-mlx5-DR-Fix-incorrect-type-in-argument.patch b/SOURCES/0293-netdrv-net-mlx5-DR-Fix-incorrect-type-in-argument.patch new file mode 100644 index 0000000..8643ef9 --- /dev/null +++ b/SOURCES/0293-netdrv-net-mlx5-DR-Fix-incorrect-type-in-argument.patch @@ -0,0 +1,58 @@ +From 94516ebcf10eb8e9395e656fe7a241e16ed37ef5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:46 -0400 +Subject: [PATCH 293/312] [netdrv] net/mlx5: DR: Fix incorrect type in argument + +Message-id: <20200601154102.25980-24-ahleihel@redhat.com> +Patchwork-id: 315727 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 23/39] net/mlx5: DR: Fix incorrect type in argument +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 618f88c4c40a9621a3105f3ff957a91a148e7d94 +Author: Saeed Mahameed +Date: Thu May 28 01:02:08 2020 -0700 + + net/mlx5: DR: Fix incorrect type in argument + + HW spec objects should receive a void ptr to work on, the MLX5_SET/GET + macro will know how to handle it. + + No need to provide explicit or wrong pointer type in this case. + + warning: incorrect type in argument 1 (different base types) + expected unsigned long long const [usertype] *sw_action + got restricted __be64 [usertype] *[assigned] sw_action + + Signed-off-by: Saeed Mahameed + Reviewed-by: Mark Bloch + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +index 1d90378b155c..c3cc01f93672 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +@@ -1644,7 +1644,7 @@ dr_action_modify_check_field_limitation(struct mlx5dr_action *action, + } + + static bool +-dr_action_modify_check_is_ttl_modify(const u64 *sw_action) ++dr_action_modify_check_is_ttl_modify(const void *sw_action) + { + u16 sw_field = MLX5_GET(set_action_in, sw_action, field); + +-- +2.13.6 + diff --git a/SOURCES/0294-netdrv-net-mlx5-DR-Fix-cast-to-restricted-__be32.patch b/SOURCES/0294-netdrv-net-mlx5-DR-Fix-cast-to-restricted-__be32.patch new file mode 100644 index 0000000..d053f47 --- /dev/null +++ b/SOURCES/0294-netdrv-net-mlx5-DR-Fix-cast-to-restricted-__be32.patch @@ -0,0 +1,55 @@ +From c9729182a9808a78e34f42701efd067c13d4e75e Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:47 -0400 +Subject: [PATCH 294/312] [netdrv] net/mlx5: DR: Fix cast to restricted __be32 + +Message-id: <20200601154102.25980-25-ahleihel@redhat.com> +Patchwork-id: 315728 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 24/39] net/mlx5: DR: Fix cast to restricted __be32 +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit c2ba2c2287698bac36bf71e5c4f3be423371bee0 +Author: Saeed Mahameed +Date: Thu May 28 01:11:37 2020 -0700 + + net/mlx5: DR: Fix cast to restricted __be32 + + raw_ip actual type is __be32 and not u32. + Fix that and get rid of the warning. + + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c:906:31: + warning: cast to restricted __be32 + + Signed-off-by: Saeed Mahameed + Reviewed-by: Mark Bloch + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +index c0e3a1e7389d..e13ac84b56c6 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +@@ -869,7 +869,7 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec) + + static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec) + { +- u32 raw_ip[4]; ++ __be32 raw_ip[4]; + + spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16); + +-- +2.13.6 + diff --git a/SOURCES/0295-netdrv-net-mlx5-DR-Fix-incorrect-type-in-return-expr.patch b/SOURCES/0295-netdrv-net-mlx5-DR-Fix-incorrect-type-in-return-expr.patch new file mode 100644 index 0000000..0ae0a6b --- /dev/null +++ b/SOURCES/0295-netdrv-net-mlx5-DR-Fix-incorrect-type-in-return-expr.patch @@ -0,0 +1,59 @@ +From dadbc13a9755555cbcbb92f39404308d3457b2f5 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:48 -0400 +Subject: [PATCH 295/312] [netdrv] net/mlx5: DR: Fix incorrect type in return + expression + +Message-id: <20200601154102.25980-26-ahleihel@redhat.com> +Patchwork-id: 315730 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 25/39] net/mlx5: DR: Fix incorrect type in return expression +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 9ff2e92c466dc1aa4d970e5027dfd66b1f32b7bc +Author: Saeed Mahameed +Date: Thu May 28 01:14:31 2020 -0700 + + net/mlx5: DR: Fix incorrect type in return expression + + dr_ste_crc32_calc() calculates crc32 and should return it in HW format. + It is being used to calculate a u32 index, hence we force the return value + of u32 to avoid the sparse warning: + + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c:115:16: + warning: incorrect type in return expression (different base types) + expected unsigned int + got restricted __be32 [usertype] + + Signed-off-by: Saeed Mahameed + Reviewed-by: Mark Bloch + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +index e13ac84b56c6..a422a83ae541 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +@@ -112,7 +112,7 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length) + { + u32 crc = crc32(0, input_data, length); + +- return htonl(crc); ++ return (__force u32)htonl(crc); + } + + u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) +-- +2.13.6 + diff --git a/SOURCES/0296-netdrv-net-mlx5-Accel-fpga-tls-fix-cast-to-__be64-an.patch b/SOURCES/0296-netdrv-net-mlx5-Accel-fpga-tls-fix-cast-to-__be64-an.patch new file mode 100644 index 0000000..a025185 --- /dev/null +++ b/SOURCES/0296-netdrv-net-mlx5-Accel-fpga-tls-fix-cast-to-__be64-an.patch @@ -0,0 +1,135 @@ +From d5a140965c323fce16afc0e91596eaa3480ea1f0 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:50 -0400 +Subject: [PATCH 296/312] [netdrv] net/mlx5: Accel: fpga tls fix cast to __be64 + and incorrect argument types + +Message-id: <20200601154102.25980-28-ahleihel@redhat.com> +Patchwork-id: 315732 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 27/39] net/mlx5: Accel: fpga tls fix cast to __be64 and incorrect argument types +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit aee3e9c457f172870bdb87e675faf6c4528190b1 +Author: Saeed Mahameed +Date: Thu May 28 18:42:40 2020 -0700 + + net/mlx5: Accel: fpga tls fix cast to __be64 and incorrect argument types + + tls handle and rcd_sn are actually big endian and not in host format. + Fix that. + + Fix the following sparse warnings: + drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c:177:21: + warning: cast to restricted __be64 + + drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c:178:52: + warning: incorrect type in argument 2 (different base types) + expected unsigned int [usertype] handle + got restricted __be32 [usertype] handle + + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h | 8 ++++---- + drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 2 +- + drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | 4 ++-- + drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h | 4 ++-- + 5 files changed, 11 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +index cab708af3422..cbf3d76c05a8 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +@@ -56,8 +56,8 @@ void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); + } + +-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, +- u64 rcd_sn) ++int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, ++ u32 seq, __be64 rcd_sn) + { + return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +index e09bc3858d57..aefea467f7b3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +@@ -109,8 +109,8 @@ int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + bool direction_sx); + void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx); +-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, +- u64 rcd_sn); ++int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, ++ u32 seq, __be64 rcd_sn); + bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); + u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); + int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); +@@ -125,8 +125,8 @@ mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + bool direction_sx) { return -ENOTSUPP; } + static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, + bool direction_sx) { } +-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, +- u32 seq, u64 rcd_sn) { return 0; } ++static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, ++ u32 seq, __be64 rcd_sn) { return 0; } + static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) + { + return mlx5_accel_is_ktls_device(mdev); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +index fba561ffe1d4..c01c17a5c6de 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +@@ -167,7 +167,7 @@ static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_tls_offload_context_rx *rx_ctx; +- u64 rcd_sn = *(u64 *)rcd_sn_data; ++ __be64 rcd_sn = *(__be64 *)rcd_sn_data; + + if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX)) + return -EINVAL; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +index 22a2ef111514..29b7339ebfa3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +@@ -194,8 +194,8 @@ static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) + MLX5_GET(tls_flow, flow, direction_sx)); + } + +-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, +- u64 rcd_sn) ++int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, ++ u32 seq, __be64 rcd_sn) + { + struct mlx5_fpga_dma_buf *buf; + int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +index 3b2e37bf76fe..5714cf391d1b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +@@ -68,7 +68,7 @@ static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) + return mdev->fpga->tls->caps; + } + +-int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, +- u64 rcd_sn); ++int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, ++ u32 seq, __be64 rcd_sn); + + #endif /* __MLX5_FPGA_TLS_H__ */ +-- +2.13.6 + diff --git a/SOURCES/0297-netdrv-net-mlx5e-Allow-partial-data-mask-for-tunnel-.patch b/SOURCES/0297-netdrv-net-mlx5e-Allow-partial-data-mask-for-tunnel-.patch new file mode 100644 index 0000000..895eb3c --- /dev/null +++ b/SOURCES/0297-netdrv-net-mlx5e-Allow-partial-data-mask-for-tunnel-.patch @@ -0,0 +1,134 @@ +From a021f5288acb2261683c6e044b2a3729963ee62d Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:52 -0400 +Subject: [PATCH 297/312] [netdrv] net/mlx5e: Allow partial data mask for + tunnel options + +Message-id: <20200601154102.25980-30-ahleihel@redhat.com> +Patchwork-id: 315734 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 29/39] net/mlx5e: Allow partial data mask for tunnel options +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit d7a42ad062cc6b20b2c2a8c09dc61df2d4f5751f +Author: Roi Dayan +Date: Wed Mar 25 11:32:56 2020 +0200 + + net/mlx5e: Allow partial data mask for tunnel options + + We use mapping to save and restore the tunnel options. + Save also the tunnel options mask. + + Signed-off-by: Roi Dayan + Reviewed-by: Paul Blakey + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 32 +++++++++++++++++-------- + 1 file changed, 22 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index bc8878b82078..9ee982366893 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -171,6 +171,11 @@ struct tunnel_match_key { + int filter_ifindex; + }; + ++struct tunnel_match_enc_opts { ++ struct flow_dissector_key_enc_opts key; ++ struct flow_dissector_key_enc_opts mask; ++}; ++ + /* Tunnel_id mapping is TUNNEL_INFO_BITS + ENC_OPTS_BITS. + * Upper TUNNEL_INFO_BITS for general tunnel info. + * Lower ENC_OPTS_BITS bits for enc_opts. +@@ -1824,9 +1829,7 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, + *dont_care = false; + + if (opt->opt_class != U16_MAX || +- opt->type != U8_MAX || +- memchr_inv(opt->opt_data, 0xFF, +- opt->length * 4)) { ++ opt->type != U8_MAX) { + NL_SET_ERR_MSG(extack, + "Partial match of tunnel options in chain > 0 isn't supported"); + netdev_warn(priv->netdev, +@@ -1863,6 +1866,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; + struct flow_match_enc_opts enc_opts_match; ++ struct tunnel_match_enc_opts tun_enc_opts; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct tunnel_match_key tunnel_key; +@@ -1905,8 +1909,14 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv, + goto err_enc_opts; + + if (!enc_opts_is_dont_care) { ++ memset(&tun_enc_opts, 0, sizeof(tun_enc_opts)); ++ memcpy(&tun_enc_opts.key, enc_opts_match.key, ++ sizeof(*enc_opts_match.key)); ++ memcpy(&tun_enc_opts.mask, enc_opts_match.mask, ++ sizeof(*enc_opts_match.mask)); ++ + err = mapping_add(uplink_priv->tunnel_enc_opts_mapping, +- enc_opts_match.key, &enc_opts_id); ++ &tun_enc_opts, &enc_opts_id); + if (err) + goto err_enc_opts; + } +@@ -4691,7 +4701,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) + + int mlx5e_tc_esw_init(struct rhashtable *tc_ht) + { +- const size_t sz_enc_opts = sizeof(struct flow_dissector_key_enc_opts); ++ const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts); + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *priv; + struct mapping_ctx *mapping; +@@ -4786,7 +4796,7 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, + u32 tunnel_id) + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; +- struct flow_dissector_key_enc_opts enc_opts = {}; ++ struct tunnel_match_enc_opts enc_opts = {}; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct metadata_dst *tun_dst; +@@ -4824,7 +4834,7 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, + } + } + +- tun_dst = tun_rx_dst(enc_opts.len); ++ tun_dst = tun_rx_dst(enc_opts.key.len); + if (!tun_dst) { + WARN_ON_ONCE(true); + return false; +@@ -4838,9 +4848,11 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, + key32_to_tunnel_id(key.enc_key_id.keyid), + TUNNEL_KEY); + +- if (enc_opts.len) +- ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.data, +- enc_opts.len, enc_opts.dst_opt_type); ++ if (enc_opts.key.len) ++ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, ++ enc_opts.key.data, ++ enc_opts.key.len, ++ enc_opts.key.dst_opt_type); + + skb_dst_set(skb, (struct dst_entry *)tun_dst); + dev = dev_get_by_index(&init_net, key.filter_ifindex); +-- +2.13.6 + diff --git a/SOURCES/0298-netdrv-net-mlx5e-en_tc-Fix-incorrect-type-in-initial.patch b/SOURCES/0298-netdrv-net-mlx5e-en_tc-Fix-incorrect-type-in-initial.patch new file mode 100644 index 0000000..329cb36 --- /dev/null +++ b/SOURCES/0298-netdrv-net-mlx5e-en_tc-Fix-incorrect-type-in-initial.patch @@ -0,0 +1,53 @@ +From 0af83e561e05f8ce72b4f67057d54c10d1a1c396 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:53 -0400 +Subject: [PATCH 298/312] [netdrv] net/mlx5e: en_tc: Fix incorrect type in + initializer warnings + +Message-id: <20200601154102.25980-31-ahleihel@redhat.com> +Patchwork-id: 315735 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 30/39] net/mlx5e: en_tc: Fix incorrect type in initializer warnings +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit c51323ee7ab4132c80db198b7d0956fef957e6ab +Author: Saeed Mahameed +Date: Wed May 27 23:41:03 2020 -0700 + + net/mlx5e: en_tc: Fix incorrect type in initializer warnings + + Fix some trivial warnings of the type: + warning: incorrect type in initializer (different base types) + + Signed-off-by: Saeed Mahameed + Reviewed-by: Mark Bloch + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 9ee982366893..5a15e4630171 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1828,7 +1828,7 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, + memchr_inv(opt->opt_data, 0, opt->length * 4)) { + *dont_care = false; + +- if (opt->opt_class != U16_MAX || ++ if (opt->opt_class != htons(U16_MAX) || + opt->type != U8_MAX) { + NL_SET_ERR_MSG(extack, + "Partial match of tunnel options in chain > 0 isn't supported"); +-- +2.13.6 + diff --git a/SOURCES/0299-netdrv-net-mlx5e-en_tc-Fix-cast-to-restricted-__be32.patch b/SOURCES/0299-netdrv-net-mlx5e-en_tc-Fix-cast-to-restricted-__be32.patch new file mode 100644 index 0000000..5be7dfa --- /dev/null +++ b/SOURCES/0299-netdrv-net-mlx5e-en_tc-Fix-cast-to-restricted-__be32.patch @@ -0,0 +1,69 @@ +From ceab994789c9455111c509858a17cc0513196b03 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:54 -0400 +Subject: [PATCH 299/312] [netdrv] net/mlx5e: en_tc: Fix cast to restricted + __be32 warning + +Message-id: <20200601154102.25980-32-ahleihel@redhat.com> +Patchwork-id: 315736 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 31/39] net/mlx5e: en_tc: Fix cast to restricted __be32 warning +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 58ff18e12c9b3bb860b32e9cac4dc8e12aec2695 +Author: Saeed Mahameed +Date: Thu May 28 00:22:12 2020 -0700 + + net/mlx5e: en_tc: Fix cast to restricted __be32 warning + + Fixes sparse warnings: + warning: cast to restricted __be32 + warning: restricted __be32 degrades to integer + + Signed-off-by: Saeed Mahameed + Reviewed-by: Mark Bloch + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 5a15e4630171..d4f2697d84d0 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -225,8 +225,8 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, + fmask = headers_c + soffset; + fval = headers_v + soffset; + +- mask = cpu_to_be32(mask) >> (32 - (match_len * 8)); +- data = cpu_to_be32(data) >> (32 - (match_len * 8)); ++ mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8)); ++ data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8)); + + memcpy(fmask, &mask, match_len); + memcpy(fval, &data, match_len); +@@ -2725,10 +2725,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, + continue; + + if (f->field_bsize == 32) { +- mask_be32 = (__be32)mask; ++ mask_be32 = (__force __be32)(mask); + mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); + } else if (f->field_bsize == 16) { +- mask_be32 = (__be32)mask; ++ mask_be32 = (__force __be32)(mask); + mask_be16 = *(__be16 *)&mask_be32; + mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); + } +-- +2.13.6 + diff --git a/SOURCES/0300-netdrv-net-sched-expose-HW-stats-types-per-action-us.patch b/SOURCES/0300-netdrv-net-sched-expose-HW-stats-types-per-action-us.patch new file mode 100644 index 0000000..d3d8f3b --- /dev/null +++ b/SOURCES/0300-netdrv-net-sched-expose-HW-stats-types-per-action-us.patch @@ -0,0 +1,73 @@ +From a5b2bc6cac5aa14e5003fb867391560f1cba7444 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:55 -0400 +Subject: [PATCH 300/312] [netdrv] net: sched: expose HW stats types per action + used by drivers + +Message-id: <20200601154102.25980-33-ahleihel@redhat.com> +Patchwork-id: 315737 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 32/39] net: sched: expose HW stats types per action used by drivers +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7-rc1 +Conflicts: + - Add the tc_ct.c hunks that were dropped when this patch was backported + as part of CNB work. + +commit 93a129eb8c520b032e1823447b2e1badcc650666 +Author: Jiri Pirko +Date: Sat Mar 28 16:37:43 2020 +0100 + + net: sched: expose HW stats types per action used by drivers + + It may be up to the driver (in case ANY HW stats is passed) to select + which type of HW stats he is going to use. Add an infrastructure to + expose this information to user. + + $ tc filter add dev enp3s0np1 ingress proto ip handle 1 pref 1 flower dst_ip 192.168.1.1 action drop + $ tc -s filter show dev enp3s0np1 ingress + filter protocol ip pref 1 flower chain 0 + filter protocol ip pref 1 flower chain 0 handle 0x1 + eth_type ipv4 + dst_ip 192.168.1.1 + in_hw in_hw_count 2 + action order 1: gact action drop + random type none pass val 0 + index 1 ref 1 bind 1 installed 10 sec used 10 sec + Action statistics: + Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0) + backlog 0b 0p requeues 0 + used_hw_stats immediate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 5ad72232dce9..eb1adac0a79e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -677,7 +677,8 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, + return -ENOENT; + + mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse); +- flow_stats_update(&f->stats, bytes, packets, lastuse); ++ flow_stats_update(&f->stats, bytes, packets, lastuse, ++ FLOW_ACTION_HW_STATS_DELAYED); + + return 0; + } +-- +2.13.6 + diff --git a/SOURCES/0301-netdrv-net-mlx5e-Fix-stats-update-for-matchall-class.patch b/SOURCES/0301-netdrv-net-mlx5e-Fix-stats-update-for-matchall-class.patch new file mode 100644 index 0000000..39b1752 --- /dev/null +++ b/SOURCES/0301-netdrv-net-mlx5e-Fix-stats-update-for-matchall-class.patch @@ -0,0 +1,53 @@ +From d8752233d9a5fd952b8da8022adf494d835e81b0 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:56 -0400 +Subject: [PATCH 301/312] [netdrv] net/mlx5e: Fix stats update for matchall + classifier + +Message-id: <20200601154102.25980-34-ahleihel@redhat.com> +Patchwork-id: 315738 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 33/39] net/mlx5e: Fix stats update for matchall classifier +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7 + +commit 0a2a6f498fa060cc0d592d56148da856e9d77bd8 +Author: Roi Dayan +Date: Wed May 27 21:46:09 2020 +0300 + + net/mlx5e: Fix stats update for matchall classifier + + It's bytes, packets, lastused. + + Fixes: fcb64c0f5640 ("net/mlx5: E-Switch, add ingress rate support") + Signed-off-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index d4f2697d84d0..22b67563412d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -4588,7 +4588,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, + dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; + dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; + rpriv->prev_vf_vport_stats = cur_stats; +- flow_stats_update(&ma->stats, dpkts, dbytes, jiffies); ++ flow_stats_update(&ma->stats, dbytes, dpkts, jiffies); + } + + static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, +-- +2.13.6 + diff --git a/SOURCES/0302-netdrv-net-mlx5e-Properly-set-default-values-when-di.patch b/SOURCES/0302-netdrv-net-mlx5e-Properly-set-default-values-when-di.patch new file mode 100644 index 0000000..7a3e43c --- /dev/null +++ b/SOURCES/0302-netdrv-net-mlx5e-Properly-set-default-values-when-di.patch @@ -0,0 +1,163 @@ +From 43b996d1219f2ea80cc56d91384e00948bc71174 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:57 -0400 +Subject: [PATCH 302/312] [netdrv] net/mlx5e: Properly set default values when + disabling adaptive moderation + +Message-id: <20200601154102.25980-35-ahleihel@redhat.com> +Patchwork-id: 315739 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 34/39] net/mlx5e: Properly set default values when disabling adaptive moderation +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7 + +commit ebeaf084ad5c0eeaf8ea3314f62cc28cb79d529f +Author: Tal Gilboa +Date: Thu Apr 23 13:23:06 2020 +0300 + + net/mlx5e: Properly set default values when disabling adaptive moderation + + Add a call to mlx5e_reset_rx/tx_moderation() when enabling/disabling + adaptive moderation, in order to select the proper default values. + + In order to do so, we separate the logic of selecting the moderation values + and setting moderion mode (CQE/EQE based). + + Fixes: 0088cbbc4b66 ("net/mlx5e: Enable CQE based moderation on TX CQ") + Fixes: 9908aa292971 ("net/mlx5e: CQE based moderation") + Signed-off-by: Tal Gilboa + Reviewed-by: Tariq Toukan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 ++++++---- + .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 21 +++++++++++++++++---- + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 20 ++++++++++++++------ + 3 files changed, 37 insertions(+), 14 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h +index 9832ac9a55dc..75f4b5a29ae3 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +@@ -1056,10 +1056,12 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); + + void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels); +-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, +- u8 cq_period_mode); +-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, +- u8 cq_period_mode); ++ ++void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); ++void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); ++void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); ++void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); ++ + void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); + void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, + struct mlx5e_params *params); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index cae5da83e793..a6fadcfae236 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -527,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, + struct dim_cq_moder *rx_moder, *tx_moder; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; ++ bool reset_rx, reset_tx; + int err = 0; +- bool reset; + + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + return -EOPNOTSUPP; +@@ -566,15 +566,28 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, + } + /* we are opened */ + +- reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || +- (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); ++ reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; ++ reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; + +- if (!reset) { ++ if (!reset_rx && !reset_tx) { + mlx5e_set_priv_channels_coalesce(priv, coal); + priv->channels.params = new_channels.params; + goto out; + } + ++ if (reset_rx) { ++ u8 mode = MLX5E_GET_PFLAG(&new_channels.params, ++ MLX5E_PFLAG_RX_CQE_BASED_MODER); ++ ++ mlx5e_reset_rx_moderation(&new_channels.params, mode); ++ } ++ if (reset_tx) { ++ u8 mode = MLX5E_GET_PFLAG(&new_channels.params, ++ MLX5E_PFLAG_TX_CQE_BASED_MODER); ++ ++ mlx5e_reset_tx_moderation(&new_channels.params, mode); ++ } ++ + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); + + out: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +index e0bd700634c8..98f153022f2d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +@@ -4723,7 +4723,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + +-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) ++void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) + { + if (params->tx_dim_enabled) { + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); +@@ -4732,13 +4732,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) + } else { + params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); + } +- +- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, +- params->tx_cq_moderation.cq_period_mode == +- MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + } + +-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) ++void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) + { + if (params->rx_dim_enabled) { + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); +@@ -4747,7 +4743,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) + } else { + params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); + } ++} ++ ++void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) ++{ ++ mlx5e_reset_tx_moderation(params, cq_period_mode); ++ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, ++ params->tx_cq_moderation.cq_period_mode == ++ MLX5_CQ_PERIOD_MODE_START_FROM_CQE); ++} + ++void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) ++{ ++ mlx5e_reset_rx_moderation(params, cq_period_mode); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, + params->rx_cq_moderation.cq_period_mode == + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); +-- +2.13.6 + diff --git a/SOURCES/0303-netdrv-net-mlx5e-Fix-MLX5_TC_CT-dependencies.patch b/SOURCES/0303-netdrv-net-mlx5e-Fix-MLX5_TC_CT-dependencies.patch new file mode 100644 index 0000000..3f7ca61 --- /dev/null +++ b/SOURCES/0303-netdrv-net-mlx5e-Fix-MLX5_TC_CT-dependencies.patch @@ -0,0 +1,56 @@ +From 043b6f1cc76176fae641bd0da12c4d7339f70fa4 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:58 -0400 +Subject: [PATCH 303/312] [netdrv] net/mlx5e: Fix MLX5_TC_CT dependencies + +Message-id: <20200601154102.25980-36-ahleihel@redhat.com> +Patchwork-id: 315741 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 35/39] net/mlx5e: Fix MLX5_TC_CT dependencies +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7 + +commit cb9a0641b531ac11cd7e3076de23ceada19b892e +Author: Vlad Buslov +Date: Mon May 25 16:57:51 2020 +0300 + + net/mlx5e: Fix MLX5_TC_CT dependencies + + Change MLX5_TC_CT config dependencies to include MLX5_ESWITCH instead of + MLX5_CORE_EN && NET_SWITCHDEV, which are already required by MLX5_ESWITCH. + Without this change mlx5 fails to compile if user disables MLX5_ESWITCH + without also manually disabling MLX5_TC_CT. + + Fixes: 4c3844d9e97e ("net/mlx5e: CT: Introduce connection tracking") + Signed-off-by: Vlad Buslov + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +index e1dff89804f6..fa877f81b034 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig ++++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +@@ -79,7 +79,7 @@ config MLX5_ESWITCH + + config MLX5_TC_CT + bool "MLX5 TC connection tracking offload support" +- depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT ++ depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT + default y + help + Say Y here if you want to support offloading connection tracking rules +-- +2.13.6 + diff --git a/SOURCES/0304-netdrv-net-mlx5e-replace-EINVAL-in-mlx5e_flower_pars.patch b/SOURCES/0304-netdrv-net-mlx5e-replace-EINVAL-in-mlx5e_flower_pars.patch new file mode 100644 index 0000000..b8023e5 --- /dev/null +++ b/SOURCES/0304-netdrv-net-mlx5e-replace-EINVAL-in-mlx5e_flower_pars.patch @@ -0,0 +1,71 @@ +From 4fedc08dc4ec7fcbcc3311a2fb10af94b10bbb55 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:59 -0400 +Subject: [PATCH 304/312] [netdrv] net/mlx5e: replace EINVAL in + mlx5e_flower_parse_meta() + +Message-id: <20200601154102.25980-37-ahleihel@redhat.com> +Patchwork-id: 315740 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 36/39] net/mlx5e: replace EINVAL in mlx5e_flower_parse_meta() +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7 + +commit a683012a8e77675a1947cc8f11f97cdc1d5bb769 +Author: Pablo Neira Ayuso +Date: Sun Apr 19 14:12:35 2020 +0200 + + net/mlx5e: replace EINVAL in mlx5e_flower_parse_meta() + + The drivers reports EINVAL to userspace through netlink on invalid meta + match. This is confusing since EINVAL is usually reserved for malformed + netlink messages. Replace it by more meaningful codes. + + Fixes: 6d65bc64e232 ("net/mlx5e: Add mlx5e_flower_parse_meta support") + Signed-off-by: Pablo Neira Ayuso + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 22b67563412d..2df19165a78a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2078,7 +2078,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, + flow_rule_match_meta(rule, &match); + if (match.mask->ingress_ifindex != 0xFFFFFFFF) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + ingress_dev = __dev_get_by_index(dev_net(filter_dev), +@@ -2086,13 +2086,13 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev, + if (!ingress_dev) { + NL_SET_ERR_MSG_MOD(extack, + "Can't find the ingress port to match on"); +- return -EINVAL; ++ return -ENOENT; + } + + if (ingress_dev != filter_dev) { + NL_SET_ERR_MSG_MOD(extack, + "Can't match on the ingress filter port"); +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + return 0; +-- +2.13.6 + diff --git a/SOURCES/0305-netdrv-net-mlx5e-Remove-warning-devices-are-not-on-s.patch b/SOURCES/0305-netdrv-net-mlx5e-Remove-warning-devices-are-not-on-s.patch new file mode 100644 index 0000000..0623c18 --- /dev/null +++ b/SOURCES/0305-netdrv-net-mlx5e-Remove-warning-devices-are-not-on-s.patch @@ -0,0 +1,62 @@ +From d651ca00b9162eb20eb11e0fb40b80db9face395 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:41:01 -0400 +Subject: [PATCH 305/312] [netdrv] net/mlx5e: Remove warning "devices are not + on same switch HW" + +Message-id: <20200601154102.25980-39-ahleihel@redhat.com> +Patchwork-id: 315743 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 38/39] net/mlx5e: Remove warning "devices are not on same switch HW" +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: v5.7 + +commit 20300aafa7a2719f71d50f97a8846459d9869b75 +Author: Maor Dickman +Date: Sun May 24 09:45:44 2020 +0300 + + net/mlx5e: Remove warning "devices are not on same switch HW" + + On tunnel decap rule insertion, the indirect mechanism will attempt to + offload the rule on all uplink representors which will trigger the + "devices are not on same switch HW, can't offload forwarding" message + for the uplink which isn't on the same switch HW as the VF representor. + + The above flow is valid and shouldn't cause warning message, + fix by removing the warning and only report this flow using extack. + + Fixes: 321348475d54 ("net/mlx5e: Fix allowed tc redirect merged eswitch offload cases") + Signed-off-by: Maor Dickman + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 2df19165a78a..c5966589625f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -3830,10 +3830,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); +- netdev_warn(priv->netdev, +- "devices %s %s not on same switch HW, can't offload forwarding\n", +- priv->netdev->name, +- out_dev->name); + return -EOPNOTSUPP; + } + +-- +2.13.6 + diff --git a/SOURCES/0306-include-net-mlx5-HW-bit-for-goto-chain-offload-suppo.patch b/SOURCES/0306-include-net-mlx5-HW-bit-for-goto-chain-offload-suppo.patch new file mode 100644 index 0000000..71eaac0 --- /dev/null +++ b/SOURCES/0306-include-net-mlx5-HW-bit-for-goto-chain-offload-suppo.patch @@ -0,0 +1,56 @@ +From 8f5565930abea792ddc7c4cff6642a7137a48738 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Tue, 19 May 2020 07:48:43 -0400 +Subject: [PATCH 306/312] [include] net/mlx5: HW bit for goto chain offload + support + +Message-id: <20200519074934.6303-13-ahleihel@redhat.com> +Patchwork-id: 310521 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1663246 12/63] net/mlx5: HW bit for goto chain offload support +Bugzilla: 1663246 +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson +RH-Acked-by: John Linville +RH-Acked-by: Ivan Vecera +RH-Acked-by: Tony Camuso +RH-Acked-by: Kamal Heib + +Bugzilla: http://bugzilla.redhat.com/1663246 +Upstream: v5.7-rc1 + +commit e0ebd8eb36ed850a22a9a0ca83edc4a40ad67c16 +Author: Eli Cohen +Date: Mon Mar 2 16:15:22 2020 -0800 + + net/mlx5: HW bit for goto chain offload support + + Add the HW bit definition indecating goto chain offload support. + + Signed-off-by: Eli Cohen + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/mlx5_ifc.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index a8adb6e7d1fd..989a4d3d8034 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -414,7 +414,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { + u8 reserved_at_16[0x1]; + u8 table_miss_action_domain[0x1]; + u8 termination_table[0x1]; +- u8 reserved_at_19[0x7]; ++ u8 reformat_and_fwd_to_table[0x1]; ++ u8 reserved_at_1a[0x6]; + u8 reserved_at_20[0x2]; + u8 log_max_ft_size[0x6]; + u8 log_max_modify_header_context[0x8]; +-- +2.13.6 + diff --git a/SOURCES/0307-include-netfilter-add-include-guard-to-xt_connlabel..patch b/SOURCES/0307-include-netfilter-add-include-guard-to-xt_connlabel..patch new file mode 100644 index 0000000..12e0e43 --- /dev/null +++ b/SOURCES/0307-include-netfilter-add-include-guard-to-xt_connlabel..patch @@ -0,0 +1,59 @@ +From 2a64d48496c728bef101e32c3b8697c9eecbe85d Mon Sep 17 00:00:00 2001 +From: Marcelo Leitner +Date: Thu, 21 May 2020 03:55:12 -0400 +Subject: [PATCH 307/312] [include] netfilter: add include guard to + xt_connlabel.h + +Message-id: <75f3f7ff23efae20be98ea07eb9cd44e6ea2942b.1590033302.git.mleitner@redhat.com> +Patchwork-id: 313057 +Patchwork-instance: patchwork +O-Subject: [RHEL-8.3 net 1/2] netfilter: add include guard to xt_connlabel.h +Bugzilla: 1837856 +RH-Acked-by: Ivan Vecera +RH-Acked-by: Jarod Wilson +RH-Acked-by: Florian Westphal + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1837856 +Build Info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=28732649 + +commit 91826ba13855f73e252fef68369b3b0e1ed25253 +Author: Masahiro Yamada +Date: Mon Jul 29 00:51:38 2019 +0900 + + netfilter: add include guard to xt_connlabel.h + + Add a header include guard just in case. + + Signed-off-by: Masahiro Yamada + Acked-by: Florian Westphal + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Marcelo Ricardo Leitner +Signed-off-by: Timothy Redaelli +Signed-off-by: Frantisek Hrbata +--- + include/uapi/linux/netfilter/xt_connlabel.h | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/include/uapi/linux/netfilter/xt_connlabel.h b/include/uapi/linux/netfilter/xt_connlabel.h +index 2312f0ec07b2..323f0dfc2a4e 100644 +--- a/include/uapi/linux/netfilter/xt_connlabel.h ++++ b/include/uapi/linux/netfilter/xt_connlabel.h +@@ -1,4 +1,8 @@ + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++ ++#ifndef _UAPI_XT_CONNLABEL_H ++#define _UAPI_XT_CONNLABEL_H ++ + #include + + #define XT_CONNLABEL_MAXBIT 127 +@@ -11,3 +15,5 @@ struct xt_connlabel_mtinfo { + __u16 bit; + __u16 options; + }; ++ ++#endif /* _UAPI_XT_CONNLABEL_H */ +-- +2.13.6 + diff --git a/SOURCES/0308-include-netfilter-fix-include-guards.patch b/SOURCES/0308-include-netfilter-fix-include-guards.patch new file mode 100644 index 0000000..4c693d3 --- /dev/null +++ b/SOURCES/0308-include-netfilter-fix-include-guards.patch @@ -0,0 +1,70 @@ +From 09fcb7d18730125efb79e138f424e4fa5dea6b67 Mon Sep 17 00:00:00 2001 +From: Marcelo Leitner +Date: Thu, 21 May 2020 03:55:13 -0400 +Subject: [PATCH 308/312] [include] netfilter: fix include guards. + +Message-id: <6d269112c4be0c94fc1c7ae4aaf220187c783ea6.1590033302.git.mleitner@redhat.com> +Patchwork-id: 313056 +Patchwork-instance: patchwork +O-Subject: [RHEL-8.3 net 2/2] netfilter: fix include guards. +Bugzilla: 1837856 +RH-Acked-by: Ivan Vecera +RH-Acked-by: Jarod Wilson +RH-Acked-by: Florian Westphal + +Godzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1837856 +Build Info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=28732649 +Conflicts: missing chunk was backported as part of bz1811193 + +commit 0286fbc624e2842ececb853e74645b479b55f0a3 +Author: Jeremy Sowden +Date: Fri Sep 13 09:13:01 2019 +0100 + + netfilter: fix include guards. + + nf_conntrack_labels.h has no include guard. Add it. + + The comment following the #endif in the nf_flow_table.h include guard + referred to the wrong macro. Fix it. + + Signed-off-by: Jeremy Sowden + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Marcelo Ricardo Leitner +Signed-off-by: Timothy Redaelli +Signed-off-by: Frantisek Hrbata +--- + include/net/netfilter/nf_conntrack_labels.h | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h +index 4eacce6f3bcc..ba916411c4e1 100644 +--- a/include/net/netfilter/nf_conntrack_labels.h ++++ b/include/net/netfilter/nf_conntrack_labels.h +@@ -1,11 +1,14 @@ + /* SPDX-License-Identifier: GPL-2.0 */ +-#include +-#include ++ ++#ifndef _NF_CONNTRACK_LABELS_H ++#define _NF_CONNTRACK_LABELS_H ++ + #include + #include ++#include ++#include + #include + #include +- + #include + + #define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE) +@@ -51,3 +54,5 @@ static inline void nf_conntrack_labels_fini(void) {} + static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; } + static inline void nf_connlabels_put(struct net *net) {} + #endif ++ ++#endif /* _NF_CONNTRACK_LABELS_H */ +-- +2.13.6 + diff --git a/SOURCES/0310-include-net-mlx5-IPSec-Fix-incorrect-type-for-spi.patch b/SOURCES/0310-include-net-mlx5-IPSec-Fix-incorrect-type-for-spi.patch new file mode 100644 index 0000000..36a44d4 --- /dev/null +++ b/SOURCES/0310-include-net-mlx5-IPSec-Fix-incorrect-type-for-spi.patch @@ -0,0 +1,58 @@ +From 6e13ba7f4e12b25459c2d9e792ca56d84f9c8b52 Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Mon, 1 Jun 2020 15:40:51 -0400 +Subject: [PATCH 310/312] [include] net/mlx5: IPSec: Fix incorrect type for spi + +Message-id: <20200601154102.25980-29-ahleihel@redhat.com> +Patchwork-id: 315733 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1842258 28/39] net/mlx5: IPSec: Fix incorrect type for spi +Bugzilla: 1842258 +RH-Acked-by: Honggang Li +RH-Acked-by: Kamal Heib +RH-Acked-by: Marcelo Leitner +RH-Acked-by: Jarod Wilson + +Bugzilla: http://bugzilla.redhat.com/1842258 +Upstream: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git , branch: master + +commit 44345c4c130ee3df9b9fbc366d59ab3ac707d7f8 +Author: Saeed Mahameed +Date: Fri May 29 00:47:12 2020 -0700 + + net/mlx5: IPSec: Fix incorrect type for spi + + spi is __be32, fix that. + + Fixes sparse warning: + drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c:74:64 + warning: incorrect type + + Signed-off-by: Saeed Mahameed + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + include/linux/mlx5/accel.h | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h +index 5613e677a5f9..146d7dfe730b 100644 +--- a/include/linux/mlx5/accel.h ++++ b/include/linux/mlx5/accel.h +@@ -76,7 +76,11 @@ struct aes_gcm_keymat { + struct mlx5_accel_esp_xfrm_attrs { + enum mlx5_accel_esp_action action; + u32 esn; +- u32 spi; ++#ifndef __GENKSYMS__ ++ __be32 spi; ++#else ++ u32 spi; ++#endif + u32 seq; + u32 tfc_pad; + u32 flags; +-- +2.13.6 + diff --git a/SOURCES/0313-netdrv-net-mlx5e-Disable-devlink-port-support-for-no.patch b/SOURCES/0313-netdrv-net-mlx5e-Disable-devlink-port-support-for-no.patch new file mode 100644 index 0000000..47683e6 --- /dev/null +++ b/SOURCES/0313-netdrv-net-mlx5e-Disable-devlink-port-support-for-no.patch @@ -0,0 +1,82 @@ +From 0bdc004c006f8c9aae2809e747b5d6025315e69c Mon Sep 17 00:00:00 2001 +From: Alaa Hleihel +Date: Wed, 22 Jul 2020 13:57:11 -0400 +Subject: [PATCH 313/313] [netdrv] net/mlx5e: Disable devlink port support for + non-switchdev mode + +Message-id: <20200722135711.29389-1-ahleihel@redhat.com> +Patchwork-id: 322598 +Patchwork-instance: patchwork +O-Subject: [RHEL8.3 BZ 1849623] net/mlx5e: Disable devlink port support for non-switchdev mode +Bugzilla: 1858501 1852904 1849623 +RH-Acked-by: Jarod Wilson +RH-Acked-by: Marcelo Leitner +RH-Acked-by: John Linville + +Bugzilla: http://bugzilla.redhat.com/1849623 +Bugzilla: http://bugzilla.redhat.com/1852904 +Bugzilla: http://bugzilla.redhat.com/1858501 +Brew: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=30252169 +Upstream: RHEL-only +Tested: Sanity tests and verified that netdev interface naming scheme + is now the same as we had in RHEL-8.2. + +After the discussion in BZ 1849623, it was decided to disable the +'devlink port' support for non-switchdev mode in RHEL-8.3 since this +feature changed the netdev interface naming scheme; the interfaces +started to get an 'npX' suffix, which could cause issues for users +who relied on driver-given names and did not set the names in ifcfg +files or udev rules, etc. + +This patch essentially reverts the following commits but with minimal +code changes to avoid future conflicts: + 2c1f000844a5 [netdrv] net/mlx5e: Fix devlink port register sequence + f30a3e5bd818 [netdrv] net/mlx5e: Use devlink virtual flavour for VF devlink port + 7712d03e7e53 [netdrv] net/mlx5e: Add support for devlink-port in non-representors mode + +Signed-off-by: Alaa Hleihel +Signed-off-by: Frantisek Hrbata +--- + drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +index f8b2de4b04be..d31f5d0c29ee 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +@@ -7,6 +7,8 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv) + { + struct devlink *devlink = priv_to_devlink(priv->mdev); + ++ return 0; /* RHEL-only: Disable 'devlink port' support for non-switchdev mode*/ ++ + if (mlx5_core_is_pf(priv->mdev)) + devlink_port_attrs_set(&priv->dl_port, + DEVLINK_PORT_FLAVOUR_PHYSICAL, +@@ -23,11 +25,15 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv) + + void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv) + { ++ return; /* RHEL-only: Disable 'devlink port' support for non-switchdev mode*/ ++ + devlink_port_type_eth_set(&priv->dl_port, priv->netdev); + } + + void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) + { ++ return; /* RHEL-only: Disable 'devlink port' support for non-switchdev mode*/ ++ + devlink_port_unregister(&priv->dl_port); + } + +@@ -35,5 +41,7 @@ struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) + { + struct mlx5e_priv *priv = netdev_priv(dev); + ++ return NULL; /* RHEL-only: Disable 'devlink port' support for non-switchdev mode*/ ++ + return &priv->dl_port; + } +-- +2.13.6 + diff --git a/SOURCES/9001-Bump-driver-version.patch b/SOURCES/9001-Bump-driver-version.patch new file mode 100644 index 0000000..f06e5d2 --- /dev/null +++ b/SOURCES/9001-Bump-driver-version.patch @@ -0,0 +1,13 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h 2020-07-14 03:26:20.193719944 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h 2020-07-14 03:32:06.851933944 +0200 +@@ -44,7 +44,7 @@ + #include + + #define DRIVER_NAME "mlx5_core" +-#define DRIVER_VERSION "5.0-0" ++#define DRIVER_VERSION "5.0-0_dup8.2" + + extern uint mlx5_core_debug_mask; + diff --git a/SOURCES/9002-Add-mlx_backport_compat-h.patch b/SOURCES/9002-Add-mlx_backport_compat-h.patch new file mode 100644 index 0000000..6704c82 --- /dev/null +++ b/SOURCES/9002-Add-mlx_backport_compat-h.patch @@ -0,0 +1,9 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-14 15:58:11.554035920 +0200 +@@ -0,0 +1,4 @@ ++#ifndef MLX5_BACKPORT_COMPAT_H ++#define MLX5_BACKPORT_COMPAT_H ++ ++#endif diff --git a/SOURCES/9003-Add-xsk_umem_adjust_offset.patch b/SOURCES/9003-Add-xsk_umem_adjust_offset.patch new file mode 100644 index 0000000..6f15b0b --- /dev/null +++ b/SOURCES/9003-Add-xsk_umem_adjust_offset.patch @@ -0,0 +1,46 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-14 20:07:10.942801484 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-14 20:07:49.481861209 +0200 +@@ -1,4 +1,17 @@ + #ifndef MLX5_BACKPORT_COMPAT_H + #define MLX5_BACKPORT_COMPAT_H + ++#include ++ ++ ++/* ++ * A stub for xsk_umem_adjust_offset; since only unaligned mode is supported ++ * in RHEL 8.2; the code is somewhat simplified. ++ */ ++static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, ++ u64 offset) ++{ ++ return address + offset; ++} ++ + #endif +Index: src/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 2020-07-14 20:07:07.866796717 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 2020-07-14 20:07:10.954801503 +0200 +@@ -34,6 +34,7 @@ + #include + #include "en/xdp.h" + #include "en/params.h" ++#include "backport_compat.h" + + int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) + { +Index: src/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 2020-07-14 20:07:07.809796629 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 2020-07-14 20:07:55.423870417 +0200 +@@ -4,6 +4,7 @@ + #include "rx.h" + #include "en/xdp.h" + #include ++#include "../../backport_compat.h" + + /* RX data path */ + diff --git a/SOURCES/9005-reporter_rx-strip-extack-parameter.patch b/SOURCES/9005-reporter_rx-strip-extack-parameter.patch new file mode 100644 index 0000000..9599865 --- /dev/null +++ b/SOURCES/9005-reporter_rx-strip-extack-parameter.patch @@ -0,0 +1,24 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 2020-07-14 20:14:17.355462304 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 2020-07-14 20:14:58.124525484 +0200 +@@ -222,8 +222,7 @@ + } + + static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter, +- void *context, +- struct netlink_ext_ack *extack) ++ void *context) + { + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_err_ctx *err_ctx = context; +@@ -302,8 +301,7 @@ + } + + static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, +- struct devlink_fmsg *fmsg, +- struct netlink_ext_ack *extack) ++ struct devlink_fmsg *fmsg) + { + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_params *params = &priv->channels.params; diff --git a/SOURCES/9006-Provide-DEVLINK_PORT_FLAVOUR_VIRTUAL-stub-value.patch b/SOURCES/9006-Provide-DEVLINK_PORT_FLAVOUR_VIRTUAL-stub-value.patch new file mode 100644 index 0000000..1cb4b23 --- /dev/null +++ b/SOURCES/9006-Provide-DEVLINK_PORT_FLAVOUR_VIRTUAL-stub-value.patch @@ -0,0 +1,24 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-15 10:38:30.270947813 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-15 10:38:30.289947822 +0200 +@@ -14,4 +14,7 @@ + return address + offset; + } + ++ ++#define DEVLINK_PORT_FLAVOUR_VIRTUAL DEVLINK_PORT_FLAVOUR_PHYSICAL ++ + #endif +Index: src/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c 2020-07-15 10:38:29.905947639 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c 2020-07-15 10:38:30.290947823 +0200 +@@ -2,6 +2,7 @@ + /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + + #include "en/devlink.h" ++#include "../backport_compat.h" + + int mlx5e_devlink_port_register(struct mlx5e_priv *priv) + { diff --git a/SOURCES/9007-Provide-TC_SETUP_FT-definition.patch b/SOURCES/9007-Provide-TC_SETUP_FT-definition.patch new file mode 100644 index 0000000..c0d4d08 --- /dev/null +++ b/SOURCES/9007-Provide-TC_SETUP_FT-definition.patch @@ -0,0 +1,24 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-14 20:20:22.753886784 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-14 20:23:18.712075566 +0200 +@@ -23,4 +23,7 @@ + + #define DEVLINK_PORT_FLAVOUR_VIRTUAL DEVLINK_PORT_FLAVOUR_PHYSICAL + ++ ++#define TC_SETUP_FT 14 ++ + #endif +Index: src/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 2020-07-14 20:17:21.652692483 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 2020-07-14 20:24:05.088125322 +0200 +@@ -50,6 +50,7 @@ + #include "lib/port_tun.h" + #define CREATE_TRACE_POINTS + #include "diag/en_rep_tracepoint.h" ++#include "backport_compat.h" + + #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ + max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) diff --git a/SOURCES/9008-Add-flow_action_basic_hw_stats_types_check b/SOURCES/9008-Add-flow_action_basic_hw_stats_types_check new file mode 100644 index 0000000..26c1657 --- /dev/null +++ b/SOURCES/9008-Add-flow_action_basic_hw_stats_types_check @@ -0,0 +1,37 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-15 10:38:36.891950970 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-15 10:38:44.900954789 +0200 +@@ -1,6 +1,7 @@ + #ifndef MLX5_BACKPORT_COMPAT_H + #define MLX5_BACKPORT_COMPAT_H + ++#include + #include + + +@@ -20,4 +21,12 @@ + + #define TC_SETUP_FT 14 + ++ ++static inline bool ++flow_action_basic_hw_stats_types_check(const struct flow_action *action, ++ struct netlink_ext_ack *extack) ++{ ++ return true; ++} ++ + #endif +Index: src/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 2020-07-15 10:38:30.214947786 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 2020-07-15 10:38:36.905950977 +0200 +@@ -60,6 +60,7 @@ + #include "lib/devcom.h" + #include "lib/geneve.h" + #include "diag/en_tc_tracepoint.h" ++#include "backport_compat.h" + + #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) + diff --git a/SOURCES/9009-add-NUM_FLOW_ACTIONS.patch b/SOURCES/9009-add-NUM_FLOW_ACTIONS.patch new file mode 100644 index 0000000..2cfd4ab --- /dev/null +++ b/SOURCES/9009-add-NUM_FLOW_ACTIONS.patch @@ -0,0 +1,24 @@ +Index: src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-15 10:38:44.900954789 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/backport_compat.h 2020-07-15 10:38:50.070957254 +0200 +@@ -29,4 +29,7 @@ + return true; + } + ++ ++#define NUM_FLOW_ACTIONS (FLOW_ACTION_MPLS_MANGLE + 1) ++ + #endif +Index: src/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c +=================================================================== +--- src.orig/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c 2020-07-15 10:38:44.900954789 +0200 ++++ src/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c 2020-07-15 10:38:50.070957254 +0200 +@@ -3,6 +3,7 @@ + + #define CREATE_TRACE_POINTS + #include "en_tc_tracepoint.h" ++#include "../backport_compat.h" + + void put_ids_to_array(int *ids, + const struct flow_action_entry *entries, diff --git a/SPECS/kmod-redhat-mlx5_core.spec b/SPECS/kmod-redhat-mlx5_core.spec new file mode 100644 index 0000000..9daf9ae --- /dev/null +++ b/SPECS/kmod-redhat-mlx5_core.spec @@ -0,0 +1,932 @@ +%define kmod_name mlx5_core +%define kmod_vendor redhat +%define kmod_rpm_name kmod-redhat-mlx5_core +%define kmod_driver_version 5.0_0_dup8.2 +%define kmod_driver_epoch %{nil} +%define kmod_rpm_release 2 +%define kmod_kernel_version 4.18.0-193.el8 +%define kmod_kernel_version_min %{nil} +%define kmod_kernel_version_dep %{nil} +%define kmod_kbuild_dir drivers/net/ethernet/mellanox/mlx5/core +%define kmod_dependencies %{nil} +%define kmod_dist_build_deps %{nil} +%define kmod_build_dependencies %{nil} +%define kmod_devel_package 1 +%define kmod_devel_src_paths include +%define kmod_install_path extra/kmod-redhat-mlx5_core +%define kernel_pkg kernel +%define kernel_devel_pkg kernel-devel +%define kernel_modules_pkg kernel-modules + +%{!?dist: %define dist .el8_2} +%{!?make_build: %define make_build make} + +%if "%{kmod_kernel_version_dep}" == "" +%define kmod_kernel_version_dep %{kmod_kernel_version} +%endif + +%if "%{kmod_dist_build_deps}" == "" +%if (0%{?rhel} > 7) || (0%{?centos} > 7) +%define kmod_dist_build_deps redhat-rpm-config kernel-abi-whitelists elfutils-libelf-devel kernel-rpm-macros kmod +%else +%define kmod_dist_build_deps redhat-rpm-config kernel-abi-whitelists +%endif +%endif + +Source0: %{kmod_name}-%{kmod_vendor}-%{kmod_driver_version}.tar.bz2 +# Source code patches +Patch0: 0001-netdrv-mlx5e-allow-TSO-on-VXLAN-over-VLAN-topologies.patch +Patch1: 0002-netdrv-net-reject-PTP-periodic-output-requests-with-.patch +Patch2: 0003-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch +Patch3: 0004-netdrv-mlx5e-Reorder-mirrer-action-parsing-to-check-.patch +Patch4: 0005-netdrv-net-mlx5e-Move-the-SW-XSK-code-from-NAPI-poll.patch +Patch5: 0006-netdrv-mlx5e-Allow-XSK-frames-smaller-than-a-page.patch +Patch6: 0007-netdrv-net-Use-skb-accessors-in-network-drivers.patch +Patch7: 0008-netdrv-net-mlx5e-xsk-dynamically-allocate-mlx5e_chan.patch +Patch8: 0009-netdrv-net-mlx5-E-Switch-add-ingress-rate-support.patch +Patch9: 0010-netdrv-net-mlx5e-Tx-Strict-the-room-needed-for-SQ-ed.patch +Patch10: 0011-netdrv-net-mlx5e-XDP-Close-TX-MPWQE-session-when-no-.patch +Patch11: 0012-netdrv-net-mlx5e-XDP-Slight-enhancement-for-WQE-fetc.patch +Patch12: 0013-netdrv-net-mlx5e-Tx-Soften-inline-mode-VLAN-dependen.patch +Patch13: 0014-netdrv-net-mlx5e-Rx-checksum-handling-refactoring.patch +Patch14: 0015-netdrv-net-mlx5e-Set-tx-reporter-only-on-successful-.patch +Patch15: 0016-netdrv-net-mlx5e-TX-reporter-cleanup.patch +Patch16: 0017-netdrv-net-mlx5e-Allow-dropping-specific-tunnel-pack.patch +Patch17: 0018-netdrv-mlx5-no-need-to-check-return-value-of-debugfs.patch +Patch18: 0019-netdrv-net-mlx5-Use-debug-message-instead-of-warn.patch +Patch19: 0020-netdrv-net-mlx5-Add-XRQ-legacy-commands-opcodes.patch +Patch20: 0021-netdrv-net-mlx5e-Rename-reporter-header-file.patch +Patch21: 0022-netdrv-net-mlx5e-Change-naming-convention-for-report.patch +Patch22: 0023-netdrv-net-mlx5e-Generalize-tx-reporter-s-functional.patch +Patch23: 0024-netdrv-net-mlx5e-Extend-tx-diagnose-function.patch +Patch24: 0025-netdrv-net-mlx5e-Extend-tx-reporter-diagnostics-outp.patch +Patch25: 0026-netdrv-net-mlx5e-Add-cq-info-to-tx-reporter-diagnose.patch +Patch26: 0027-netdrv-net-mlx5e-Add-helper-functions-for-reporter-s.patch +Patch27: 0028-netdrv-net-mlx5e-Add-support-to-rx-reporter-diagnose.patch +Patch28: 0029-netdrv-net-mlx5e-Split-open-close-ICOSQ-into-stages.patch +Patch29: 0030-netdrv-net-mlx5e-Report-and-recover-from-CQE-error-o.patch +Patch30: 0031-netdrv-net-mlx5e-Report-and-recover-from-rx-timeout.patch +Patch31: 0032-netdrv-net-mlx5e-RX-Handle-CQE-with-error-at-the-ear.patch +Patch32: 0033-netdrv-net-mlx5e-Report-and-recover-from-CQE-with-er.patch +Patch33: 0034-netdrv-net-mlx5-Improve-functions-documentation.patch +Patch34: 0035-include-net-mlx5-Expose-IP-in-IP-capability-bit.patch +Patch35: 0036-netdrv-net-mlx5-Add-per-namespace-flow-table-default.patch +Patch36: 0037-netdrv-net-mlx5-Create-bypass-and-loopback-flow-stee.patch +Patch37: 0038-netdrv-net-mlx5e-Add-tc-flower-tracepoints.patch +Patch38: 0039-netdrv-net-mlx5e-Add-trace-point-for-neigh-used-valu.patch +Patch39: 0040-netdrv-net-mlx5e-Add-trace-point-for-neigh-update.patch +Patch40: 0041-netdrv-net-mlx5-Add-wrappers-for-HyperV-PCIe-operati.patch +Patch41: 0042-netdrv-net-mlx5-Fix-return-code-in-case-of-hyperv-wr.patch +Patch42: 0043-netdrv-net-mlx5-Set-ODP-capabilities-for-DC-transpor.patch +Patch43: 0044-netdrv-net-mlx5e-Change-function-s-position-to-a-mor.patch +Patch44: 0045-netdrv-net-mlx5e-Support-RSS-for-IP-in-IP-and-IPv6-t.patch +Patch45: 0046-netdrv-net-mlx5e-Improve-stateless-offload-capabilit.patch +Patch46: 0047-netdrv-net-mlx5e-Support-TSO-and-TX-checksum-offload.patch +Patch47: 0048-netdrv-net-mlx5e-Remove-unlikely-from-WARN-condition.patch +Patch48: 0049-netdrv-net-mlx5-Kconfig-Fix-MLX5_CORE-dependency-wit.patch +Patch49: 0050-netdrv-net-mlx5e-Use-ipv6_stub-to-avoid-dependency-w.patch +Patch50: 0051-netdrv-net-mlx5-Use-PTR_ERR_OR_ZERO-rather-than-its-.patch +Patch51: 0052-netdrv-net-mlx5e-kTLS-Remove-unused-function-paramet.patch +Patch52: 0053-netdrv-net-mlx5-DR-Remove-useless-set-memory-to-zero.patch +Patch53: 0054-netdrv-net-mlx5-DR-Remove-redundant-dev_name-print-f.patch +Patch54: 0055-netdrv-drivers-net-Fix-Kconfig-indentation.patch +Patch55: 0056-netdrv-net-mlx5e-kTLS-Release-reference-on-DUMPed-fr.patch +Patch56: 0057-netdrv-net-mlx5e-kTLS-Size-of-a-Dump-WQE-is-fixed.patch +Patch57: 0058-netdrv-net-mlx5e-kTLS-Save-only-the-frag-page-to-rel.patch +Patch58: 0059-netdrv-net-mlx5e-kTLS-Save-by-value-copy-of-the-reco.patch +Patch59: 0060-netdrv-net-mlx5e-kTLS-Fix-page-refcnt-leak-in-TX-res.patch +Patch60: 0061-netdrv-net-mlx5e-kTLS-Fix-missing-SQ-edge-fill.patch +Patch61: 0062-netdrv-net-mlx5e-kTLS-Limit-DUMP-wqe-size.patch +Patch62: 0063-netdrv-net-mlx5e-kTLS-Remove-unneeded-cipher-type-ch.patch +Patch63: 0064-netdrv-net-mlx5e-kTLS-Save-a-copy-of-the-crypto-info.patch +Patch64: 0065-netdrv-net-mlx5e-kTLS-Enhance-TX-resync-flow.patch +Patch65: 0066-netdrv-net-mlx5e-Remove-incorrect-match-criteria-ass.patch +Patch66: 0067-netdrv-mlx5-reject-unsupported-external-timestamp-fl.patch +Patch67: 0068-netdrv-net-mlx5e-Fix-ingress-rate-configuration-for-.patch +Patch68: 0069-netdrv-net-mlx5e-Add-missing-capability-bit-check-fo.patch +Patch69: 0070-include-net-mlx5-Expose-optimal-performance-scatter-.patch +Patch70: 0071-netdrv-net-Fix-misspellings-of-configure-and-configu.patch +Patch71: 0072-netdrv-net-mlx5-E-Switch-Rename-egress-config-to-gen.patch +Patch72: 0073-netdrv-net-mlx5-E-Switch-Rename-ingress-acl-config-i.patch +Patch73: 0074-netdrv-net-mlx5-E-switch-Introduce-and-use-vlan-rule.patch +Patch74: 0075-netdrv-net-mlx5-Introduce-and-use-mlx5_esw_is_manage.patch +Patch75: 0076-netdrv-net-mlx5-Move-metdata-fields-under-offloads-s.patch +Patch76: 0077-netdrv-net-mlx5-Move-legacy-drop-counter-and-rule-un.patch +Patch77: 0078-netdrv-net-mlx5-Tide-up-state_lock-and-vport-enabled.patch +Patch78: 0079-netdrv-net-mlx5-E-switch-Prepare-code-to-handle-vpor.patch +Patch79: 0080-netdrv-net-mlx5-E-switch-Legacy-introduce-and-use-pe.patch +Patch80: 0081-netdrv-net-mlx5-Move-ACL-drop-counters-life-cycle-cl.patch +Patch81: 0082-netdrv-net-mlx5-E-switch-Offloads-introduce-and-use-.patch +Patch82: 0083-netdrv-net-mlx5-E-switch-Offloads-shift-ACL-programm.patch +Patch83: 0084-netdrv-net-mlx5-Restrict-metadata-disablement-to-off.patch +Patch84: 0085-netdrv-net-mlx5-Refactor-ingress-acl-configuration.patch +Patch85: 0086-netdrv-net-mlx5-FPGA-support-network-cards-with-stan.patch +Patch86: 0087-netdrv-net-mlx5-Remove-unneeded-variable-in-mlx5_unl.patch +Patch87: 0088-netdrv-net-mlx5e-Verify-that-rule-has-at-least-one-f.patch +Patch88: 0089-netdrv-net-mlx5-Do-not-hold-group-lock-while-allocat.patch +Patch89: 0090-netdrv-net-mlx5-Support-lockless-FTE-read-lookups.patch +Patch90: 0091-netdrv-net-mlx5e-TX-Dump-WQs-wqe-descriptors-on-CQE-.patch +Patch91: 0092-netdrv-net-mlx5-WQ-Move-short-getters-into-header-fi.patch +Patch92: 0093-netdrv-net-mlx5e-Bit-sized-fields-rewrite-support.patch +Patch93: 0094-netdrv-net-mlx5e-Add-ToS-DSCP-header-rewrite-support.patch +Patch94: 0095-netdrv-net-mlx5-rate-limit-alloc_ent-error-messages.patch +Patch95: 0096-netdrv-net-mlx5-LAG-Use-port-enumerators.patch +Patch96: 0097-netdrv-net-mlx5-fix-kvfree-of-uninitialized-pointer-.patch +Patch97: 0098-netdrv-net-mlx5-fix-spelling-mistake-metdata-metadat.patch +Patch98: 0099-netdrv-net-mlx5-Dump-of-fw_fatal-use-updated-devlink.patch +Patch99: 0100-netdrv-net-mlx5-Simplify-fdb-chain-and-prio-eswitch-.patch +Patch100: 0101-netdrv-net-mlx5-Rename-FDB_-tc-related-defines-to-FD.patch +Patch101: 0102-netdrv-net-mlx5-Define-fdb-tc-levels-per-prio.patch +Patch102: 0103-netdrv-net-mlx5-Accumulate-levels-for-chains-prio-na.patch +Patch103: 0104-netdrv-net-mlx5-Refactor-creating-fast-path-prio-cha.patch +Patch104: 0105-netdrv-net-mlx5-Add-new-chain-for-netfilter-flow-tab.patch +Patch105: 0106-netdrv-net-mlx5-Remove-redundant-NULL-initialization.patch +Patch106: 0107-netdrv-net-mlx5-Don-t-write-read-only-fields-in-MODI.patch +Patch107: 0108-netdrv-net-mlx5-DR-Refactor-VXLAN-GPE-flex-parser-tu.patch +Patch108: 0109-netdrv-net-mlx5-DR-Add-HW-bits-and-definitions-for-G.patch +Patch109: 0110-netdrv-net-mlx5-DR-Add-support-for-Geneve-packets-SW.patch +Patch110: 0111-netdrv-net-mlx5e-TC-Stub-out-ipv6-tun-create-header-.patch +Patch111: 0112-netdrv-net-mlx5e-Remove-redundant-pointer-check.patch +Patch112: 0113-netdrv-net-use-rhashtable_lookup-instead-of-rhashtab.patch +Patch113: 0114-netdrv-net-mlx5e-Fix-build-error-without-IPV6.patch +Patch114: 0115-netdrv-net-mlx5e-E-switch-Fix-Ingress-ACL-groups-in-.patch +Patch115: 0116-netdrv-treewide-Use-sizeof_field-macro.patch +Patch116: 0117-netdrv-net-mlx5e-Avoid-duplicating-rule-destinations.patch +Patch117: 0118-netdrv-net-mlx5e-Always-print-health-reporter-messag.patch +Patch118: 0119-netdrv-net-mlx5-Move-devlink-registration-before-int.patch +Patch119: 0120-netdrv-Revert-net-mlx5-Support-lockless-FTE-read-loo.patch +Patch120: 0121-netdrv-net-mlx5e-Fix-hairpin-RSS-table-size.patch +Patch121: 0122-netdrv-net-mlx5-Fix-lowest-FDB-pool-size.patch +Patch122: 0123-netdrv-net-mlx5-Update-the-list-of-the-PCI-supported.patch +Patch123: 0124-netdrv-net-mlx5-E-Switch-Prevent-ingress-rate-config.patch +Patch124: 0125-netdrv-net-mlx5e-kTLS-Fix-corner-case-checks-in-TX-r.patch +Patch125: 0126-netdrv-net-mlx5e-kTLS-Remove-redundant-posts-in-TX-r.patch +Patch126: 0127-netdrv-net-mlx5e-kTLS-Do-not-send-decrypted-marked-S.patch +Patch127: 0128-netdrv-net-mlx5-limit-the-function-in-local-scope.patch +Patch128: 0129-netdrv-mlx5-work-around-high-stack-usage-with-gcc.patch +Patch129: 0130-netdrv-net-mlx5e-Support-accept-action-on-nic-table.patch +Patch130: 0131-netdrv-net-mlx5-Increase-the-max-number-of-channels-.patch +Patch131: 0132-netdrv-net-mlx5-Reduce-No-CQ-found-log-level-from-wa.patch +Patch132: 0133-netdrv-net-mlx5-Use-async-EQ-setup-cleanup-helpers-f.patch +Patch133: 0134-include-net-mlx5-Add-Virtio-Emulation-related-device.patch +Patch134: 0135-netdrv-net-mlx5-Expose-vDPA-emulation-device-capabil.patch +Patch135: 0136-include-net-mlx5-Add-RoCE-accelerator-counters.patch +Patch136: 0137-include-net-mlx5-Expose-relaxed-ordering-bits.patch +Patch137: 0138-include-net-mlx5-Add-copy-header-action-struct-layou.patch +Patch138: 0139-include-net-mlx5-Add-mlx5_ifc-definitions-for-connec.patch +Patch139: 0140-include-net-mlx5e-Expose-FEC-feilds-and-related-capa.patch +Patch140: 0141-netdrv-net-mlx5-Refactor-mlx5_create_auto_grouped_fl.patch +Patch141: 0142-netdrv-net-mlx5-fs_core-Introduce-unmanaged-flow-tab.patch +Patch142: 0143-netdrv-net-mlx5-Add-ignore-level-support-fwd-to-tabl.patch +Patch143: 0144-netdrv-net-mlx5-Allow-creating-autogroups-with-reser.patch +Patch144: 0145-netdrv-net-mlx5e-Fix-printk-format-warning.patch +Patch145: 0146-netdrv-net-mlx5e-Add-mlx5e_flower_parse_meta-support.patch +Patch146: 0147-netdrv-net-mlx5-DR-Modify-set-action-limitation-exte.patch +Patch147: 0148-netdrv-net-mlx5-DR-Modify-header-copy-support.patch +Patch148: 0149-netdrv-net-mlx5-DR-Allow-connecting-flow-table-to-a-.patch +Patch149: 0150-netdrv-net-mlx5-IPsec-Fix-esp-modify-function-attrib.patch +Patch150: 0151-netdrv-net-mlx5-IPsec-fix-memory-leak-at-mlx5_fpga_i.patch +Patch151: 0152-netdrv-net-mlx5e-TX-Error-completion-is-for-last-WQE.patch +Patch152: 0153-netdrv-net-mlx5-Deprecate-usage-of-generic-TLS-HW-ca.patch +Patch153: 0154-netdrv-net-mlx5-Fix-sleep-while-atomic-in-mlx5_eswit.patch +Patch154: 0155-netdrv-net-mlx5e-Reset-RQ-doorbell-counter-before-mo.patch +Patch155: 0156-netdrv-net-mlx5e-Fix-crash-in-recovery-flow-without-.patch +Patch156: 0157-netdrv-net-mlx5-DR-Fix-postsend-actions-write-length.patch +Patch157: 0158-netdrv-net-mlx5e-kTLS-Fix-TCP-seq-off-by-1-issue-in-.patch +Patch158: 0159-netdrv-net-mlx5e-kTLS-Fix-wrong-value-in-record-trac.patch +Patch159: 0160-netdrv-net-mlx5e-Fix-endianness-handling-in-pedit-ma.patch +Patch160: 0161-netdrv-net-mlx5-Clear-LAG-notifier-pointer-after-unr.patch +Patch161: 0162-netdrv-net-mlx5_core-Set-IB-capability-mask1-to-fix-.patch +Patch162: 0163-netdrv-net-mlx5e-Enhance-ICOSQ-WQE-info-fields.patch +Patch163: 0164-netdrv-net-mlx5e-Fix-missing-reset-of-SW-metadata-in.patch +Patch164: 0165-netdrv-net-mlx5e-Fix-ICOSQ-recovery-flow-with-Stridi.patch +Patch165: 0166-netdrv-net-mlx5e-Do-not-recover-from-a-non-fatal-syn.patch +Patch166: 0167-netdrv-net-mlx5e-Define-one-flow-for-TXQ-selection-w.patch +Patch167: 0168-netdrv-net-mlx5e-Add-missing-LRO-cap-check.patch +Patch168: 0169-netdrv-net-mlx5e-Encapsulate-updating-netdev-queues-.patch +Patch169: 0170-netdrv-net-mlx5e-Rename-hw_modify-to-preactivate.patch +Patch170: 0171-netdrv-net-mlx5e-Use-preactivate-hook-to-set-the-ind.patch +Patch171: 0172-netdrv-net-mlx5e-Fix-configuration-of-XPS-cpumasks-a.patch +Patch172: 0173-netdrv-net-mlx5e-Remove-unneeded-netif_set_real_num_.patch +Patch173: 0174-netdrv-net-mlx5e-Allow-mlx5e_switch_priv_channels-to.patch +Patch174: 0175-netdrv-net-mlx5e-Add-context-to-the-preactivate-hook.patch +Patch175: 0176-netdrv-net-mlx5e-Change-inline-mode-correctly-when-c.patch +Patch176: 0177-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-p.patch +Patch177: 0178-netdrv-net-mlx5e-RX-Use-indirect-calls-wrapper-for-h.patch +Patch178: 0179-netdrv-net-mlx5-sparse-warning-incorrect-type-in-ass.patch +Patch179: 0180-netdrv-net-mlx5-sparse-warning-Using-plain-integer-a.patch +Patch180: 0181-include-net-mlx5-fix-spelling-mistake-reserverd-rese.patch +Patch181: 0182-netdrv-net-mlx5e-Use-netdev_warn-for-errors-for-adde.patch +Patch182: 0183-include-net-mlx5-Expose-link-speed-directly.patch +Patch183: 0184-netdrv-net-mlx5-Expose-port-speed-when-possible.patch +Patch184: 0185-netdrv-net-mlx5-Tidy-up-and-fix-reverse-christmas-or.patch +Patch185: 0186-netdrv-net-mlx5-E-Switch-Hold-mutex-when-querying-dr.patch +Patch186: 0187-netdrv-net-mlx5-Fix-group-version-management.patch +Patch187: 0188-netdrv-net-mlx5e-Don-t-allow-forwarding-between-upli.patch +Patch188: 0189-netdrv-net-mlx5-Eswitch-avoid-redundant-mask.patch +Patch189: 0190-netdrv-net-mlx5-DR-Change-matcher-priority-parameter.patch +Patch190: 0191-netdrv-net-mlx5-DR-Improve-log-messages.patch +Patch191: 0192-netdrv-net-mlx5-DR-Remove-unneeded-functions-deceler.patch +Patch192: 0193-netdrv-net-mlx5e-Use-netdev_warn-instead-of-pr_err-f.patch +Patch193: 0194-netdrv-net-mlx5e-Remove-unused-argument-from-parse_t.patch +Patch194: 0195-netdrv-flow_offload-check-for-basic-action-hw-stats-.patch +Patch195: 0196-netdrv-net-mlx5-Fix-frequent-ioread-PCI-access-durin.patch +Patch196: 0197-netdrv-net-mlx5e-Add-missing-release-firmware-call.patch +Patch197: 0198-netdrv-net-mlx5e-Fix-nest_level-for-vlan-pop-action.patch +Patch198: 0199-netdrv-net-mlx5e-Fix-pfnum-in-devlink-port-attribute.patch +Patch199: 0200-netdrv-net-mlx5-Fix-failing-fw-tracer-allocation-on-.patch +Patch200: 0201-netdrv-net-mlx5e-Don-t-trigger-IRQ-multiple-times-on.patch +Patch201: 0202-netdrv-net-mlx5e-Get-the-latest-values-from-counters.patch +Patch202: 0203-netdrv-net-mlx5-DR-On-creation-set-CQ-s-arm_db-membe.patch +Patch203: 0204-netdrv-net-mlx5-Fix-forced-completion-access-non-ini.patch +Patch204: 0205-netdrv-net-mlx5-Fix-command-entry-leak-in-Internal-E.patch +Patch205: 0206-netdrv-net-mlx5e-Fix-q-counters-on-uplink-represento.patch +Patch206: 0207-netdrv-net-mlx5e-en_accel-Add-missing-net-geneve.h-i.patch +Patch207: 0208-netdrv-net-mlx5e-Set-of-completion-request-bit-shoul.patch +Patch208: 0209-netdrv-mlx5-Update-list-of-unsupported-devices.patch +Patch209: 0210-netdrv-mlx5-Remove-the-unsupported-mark-from-Connect.patch +Patch210: 0211-netdrv-net-mlx5-TC-Offload-flow-table-rules.patch +Patch211: 0212-netdrv-net-mlx5-ft-Use-getter-function-to-get-ft-cha.patch +Patch212: 0213-netdrv-net-mlx5-ft-Check-prio-and-chain-sanity-for-f.patch +Patch213: 0214-netdrv-net-mlx5-E-Switch-Refactor-chains-and-priorit.patch +Patch214: 0215-netdrv-net-mlx5-E-Switch-Increase-number-of-chains-a.patch +Patch215: 0216-netdrv-net-mlx5-make-the-symbol-ESW_POOLS-static.patch +Patch216: 0217-netdrv-net-mlx5e-Eswitch-Use-per-vport-tables-for-mi.patch +Patch217: 0218-netdrv-net-mlx5-E-Switch-Allow-goto-earlier-chain-if.patch +Patch218: 0219-netdrv-net-mlx5e-Use-NL_SET_ERR_MSG_MOD-extack-for-e.patch +Patch219: 0220-netdrv-net-mlx5e-Reduce-number-of-arguments-in-slow-.patch +Patch220: 0221-netdrv-net-mlx5e-Remove-redundant-comment-about-goto.patch +Patch221: 0222-netdrv-net-mlx5-Verify-goto-chain-offload-support.patch +Patch222: 0223-netdrv-net-mlx5e-Fix-an-IS_ERR-vs-NULL-check.patch +Patch223: 0224-netdrv-net-mlx5-Change-the-name-of-steering-mode-par.patch +Patch224: 0225-netdrv-net-mlx5e-Add-devlink-fdb_large_groups-parame.patch +Patch225: 0226-netdrv-net-mlx5-Introduce-mapping-infra-for-mapping-.patch +Patch226: 0227-infiniband-net-mlx5-E-Switch-Move-source-port-on-reg.patch +Patch227: 0228-netdrv-net-mlx5-E-Switch-Get-reg_c0-value-on-CQE.patch +Patch228: 0229-netdrv-net-mlx5-E-Switch-Mark-miss-packets-with-new-.patch +Patch229: 0230-netdrv-net-mlx5e-Rx-Split-rep-rx-mpwqe-handler-from-.patch +Patch230: 0231-netdrv-net-mlx5-E-Switch-Restore-chain-id-on-miss.patch +Patch231: 0232-netdrv-net-mlx5e-Allow-re-allocating-mod-header-acti.patch +Patch232: 0233-netdrv-net-mlx5e-Move-tc-tunnel-parsing-logic-with-t.patch +Patch233: 0234-netdrv-net-mlx5e-Disallow-inserting-vxlan-vlan-egres.patch +Patch234: 0235-netdrv-net-mlx5e-Support-inner-header-rewrite-with-g.patch +Patch235: 0236-netdrv-net-mlx5-E-Switch-Get-reg_c1-value-on-miss.patch +Patch236: 0237-netdrv-net-mlx5e-Restore-tunnel-metadata-on-miss.patch +Patch237: 0238-netdrv-net-mlx5-E-Switch-Enable-reg-c1-loopback-when.patch +Patch238: 0239-netdrv-net-mlx5e-en_rep-Create-uplink-rep-root-table.patch +Patch239: 0240-netdrv-net-mlx5-E-Switch-Introduce-global-tables.patch +Patch240: 0241-netdrv-net-mlx5-E-Switch-Add-support-for-offloading-.patch +Patch241: 0242-netdrv-net-mlx5-E-Switch-Support-getting-chain-mappi.patch +Patch242: 0243-netdrv-net-mlx5e-CT-Introduce-connection-tracking.patch +Patch243: 0244-netdrv-net-mlx5e-CT-Offload-established-flows.patch +Patch244: 0245-netdrv-net-mlx5e-CT-Handle-misses-after-executing-CT.patch +Patch245: 0246-netdrv-net-mlx5e-CT-Support-clear-action.patch +Patch246: 0247-netdrv-net-mlx5e-CT-Fix-stack-usage-compiler-warning.patch +Patch247: 0248-netdrv-net-mlx5e-CT-Use-rhashtable-s-ct-entries-inst.patch +Patch248: 0249-netdrv-net-mlx5-CT-Change-idr-to-xarray-to-protect-p.patch +Patch249: 0250-netdrv-net-mlx5-E-switch-Fix-mutex-init-order.patch +Patch250: 0251-netdrv-net-mlx5-E-Switch-free-flow_group_in-after-cr.patch +Patch251: 0252-netdrv-net-mlx5-E-Switch-Enable-restore-table-only-i.patch +Patch252: 0253-netdrv-net-mlx5-Add-missing-inline-to-stub-esw_add_r.patch +Patch253: 0254-netdrv-net-mlx5-E-Switch-Fix-using-fwd-and-modify-wh.patch +Patch254: 0255-netdrv-net-mlx5e-Fix-rejecting-all-egress-rules-not-.patch +Patch255: 0256-netdrv-net-mlx5-E-switch-Fix-printing-wrong-error-va.patch +Patch256: 0257-netdrv-net-mlx5-E-Switch-Use-correct-type-for-chain-.patch +Patch257: 0258-netdrv-net-mlx5e-CT-Avoid-false-warning-about-rule-m.patch +Patch258: 0259-netdrv-net-mlx5e-Fix-actions_match_supported-return.patch +Patch259: 0260-netdrv-net-mlx5e-CT-Fix-insert-rules-when-TC_CT-conf.patch +Patch260: 0261-netdrv-net-mlx5e-CT-remove-set-but-not-used-variable.patch +Patch261: 0262-netdrv-net-mlx5e-Fix-missing-pedit-action-after-ct-c.patch +Patch262: 0263-netdrv-net-mlx5e-CT-Fix-offload-with-CT-action-after.patch +Patch263: 0264-netdrv-net-mlx5-E-switch-Annotate-termtbl_mutex-mute.patch +Patch264: 0265-netdrv-net-mlx5-E-switch-Annotate-esw-state_lock-mut.patch +Patch265: 0266-netdrv-net-mlx5-Avoid-deriving-mlx5_core_dev-second-.patch +Patch266: 0267-netdrv-net-mlx5-Simplify-mlx5_register_device-to-ret.patch +Patch267: 0268-netdrv-net-mlx5-Simplify-mlx5_unload_one-and-its-cal.patch +Patch268: 0269-netdrv-net-mlx5-Split-eswitch-mode-check-to-differen.patch +Patch269: 0270-netdrv-net-mlx5-E-switch-Extend-eswitch-enable-to-ha.patch +Patch270: 0271-netdrv-net-mlx5-E-switch-Protect-eswitch-mode-change.patch +Patch271: 0272-netdrv-net-mlx5e-Rename-representor-get-devlink-port.patch +Patch272: 0273-netdrv-net-mlx5e-Add-support-for-devlink-port-in-non.patch +Patch273: 0274-netdrv-net-mlx5e-Use-devlink-virtual-flavour-for-VF-.patch +Patch274: 0275-netdrv-net-mlx5e-Fix-devlink-port-register-sequence.patch +Patch275: 0276-netdrv-net-mlx5e-Fix-devlink-port-netdev-unregistrat.patch +Patch276: 0277-netdrv-net-mlx5-Fix-crash-upon-suspend-resume.patch +Patch277: 0278-netdrv-net-mlx5-Add-command-entry-handling-completio.patch +Patch278: 0279-netdrv-net-mlx5-Fix-a-race-when-moving-command-inter.patch +Patch279: 0280-netdrv-net-mlx5-Avoid-processing-commands-before-cmd.patch +Patch280: 0281-netdrv-net-mlx5e-Fix-allowed-tc-redirect-merged-eswi.patch +Patch281: 0282-netdrv-net-mlx5e-kTLS-Destroy-key-object-after-destr.patch +Patch282: 0283-netdrv-net-mlx5e-Fix-inner-tirs-handling.patch +Patch283: 0284-netdrv-net-mlx5-Fix-memory-leak-in-mlx5_events_init.patch +Patch284: 0285-netdrv-net-mlx5-Fix-cleaning-unmanaged-flow-tables.patch +Patch285: 0286-netdrv-net-mlx5-Don-t-maintain-a-case-of-del_sw_func.patch +Patch286: 0287-netdrv-net-mlx5-Annotate-mutex-destroy-for-root-ns.patch +Patch287: 0288-netdrv-net-mlx5e-Update-netdev-txq-on-completions-du.patch +Patch288: 0289-netdrv-net-mlx5e-CT-Correctly-get-flow-rule.patch +Patch289: 0290-netdrv-net-mlx5-Fix-error-flow-in-case-of-function_s.patch +Patch290: 0291-netdrv-net-mlx5e-IPoIB-Enable-loopback-packets-for-I.patch +Patch291: 0292-netdrv-net-mlx5e-IPoIB-Drop-multicast-packets-that-t.patch +Patch292: 0293-netdrv-net-mlx5-DR-Fix-incorrect-type-in-argument.patch +Patch293: 0294-netdrv-net-mlx5-DR-Fix-cast-to-restricted-__be32.patch +Patch294: 0295-netdrv-net-mlx5-DR-Fix-incorrect-type-in-return-expr.patch +Patch295: 0296-netdrv-net-mlx5-Accel-fpga-tls-fix-cast-to-__be64-an.patch +Patch296: 0297-netdrv-net-mlx5e-Allow-partial-data-mask-for-tunnel-.patch +Patch297: 0298-netdrv-net-mlx5e-en_tc-Fix-incorrect-type-in-initial.patch +Patch298: 0299-netdrv-net-mlx5e-en_tc-Fix-cast-to-restricted-__be32.patch +Patch299: 0300-netdrv-net-sched-expose-HW-stats-types-per-action-us.patch +Patch300: 0301-netdrv-net-mlx5e-Fix-stats-update-for-matchall-class.patch +Patch301: 0302-netdrv-net-mlx5e-Properly-set-default-values-when-di.patch +Patch302: 0303-netdrv-net-mlx5e-Fix-MLX5_TC_CT-dependencies.patch +Patch303: 0304-netdrv-net-mlx5e-replace-EINVAL-in-mlx5e_flower_pars.patch +Patch304: 0305-netdrv-net-mlx5e-Remove-warning-devices-are-not-on-s.patch +Patch305: 0306-include-net-mlx5-HW-bit-for-goto-chain-offload-suppo.patch +Patch306: 0307-include-netfilter-add-include-guard-to-xt_connlabel..patch +Patch307: 0308-include-netfilter-fix-include-guards.patch +Patch308: 0310-include-net-mlx5-IPSec-Fix-incorrect-type-for-spi.patch +Patch309: 0313-netdrv-net-mlx5e-Disable-devlink-port-support-for-no.patch +Patch310: 9001-Bump-driver-version.patch +Patch311: 9002-Add-mlx_backport_compat-h.patch +Patch312: 9003-Add-xsk_umem_adjust_offset.patch +Patch313: 9005-reporter_rx-strip-extack-parameter.patch +Patch314: 9006-Provide-DEVLINK_PORT_FLAVOUR_VIRTUAL-stub-value.patch +Patch315: 9007-Provide-TC_SETUP_FT-definition.patch +Patch316: 9008-Add-flow_action_basic_hw_stats_types_check +Patch317: 9009-add-NUM_FLOW_ACTIONS.patch + +%define findpat %( echo "%""P" ) +%define __find_requires /usr/lib/rpm/redhat/find-requires.ksyms +%define __find_provides /usr/lib/rpm/redhat/find-provides.ksyms %{kmod_name} %{?epoch:%{epoch}:}%{version}-%{release} +%define sbindir %( if [ -d "/sbin" -a \! -h "/sbin" ]; then echo "/sbin"; else echo %{_sbindir}; fi ) +%define dup_state_dir %{_localstatedir}/lib/rpm-state/kmod-dups +%define kver_state_dir %{dup_state_dir}/kver +%define kver_state_file %{kver_state_dir}/%{kmod_kernel_version}.%(arch) +%define dup_module_list %{dup_state_dir}/rpm-kmod-%{kmod_name}-modules + +Name: kmod-redhat-mlx5_core +Version: %{kmod_driver_version} +Release: %{kmod_rpm_release}%{?dist} +%if "%{kmod_driver_epoch}" != "" +Epoch: %{kmod_driver_epoch} +%endif +Summary: mlx5_core kernel module for Driver Update Program +Group: System/Kernel +License: GPLv2 +URL: https://www.kernel.org/ +BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) +BuildRequires: %kernel_devel_pkg = %kmod_kernel_version +%if "%{kmod_dist_build_deps}" != "" +BuildRequires: %{kmod_dist_build_deps} +%endif +ExclusiveArch: x86_64 +%global kernel_source() /usr/src/kernels/%{kmod_kernel_version}.$(arch) + +%global _use_internal_dependency_generator 0 +%if "%{?kmod_kernel_version_min}" != "" +Provides: %kernel_modules_pkg >= %{kmod_kernel_version_min}.%{_target_cpu} +%else +Provides: %kernel_modules_pkg = %{kmod_kernel_version_dep}.%{_target_cpu} +%endif +Provides: kmod-%{kmod_name} = %{?epoch:%{epoch}:}%{version}-%{release} +Requires(post): %{sbindir}/weak-modules +Requires(postun): %{sbindir}/weak-modules +Requires: kernel >= 4.18.0-193.el8 + +Requires: kernel < 4.18.0-194.el8 +%if 0 +Requires: firmware(%{kmod_name}) = ENTER_FIRMWARE_VERSION +%endif +%if "%{kmod_build_dependencies}" != "" +BuildRequires: %{kmod_build_dependencies} +%endif +%if "%{kmod_dependencies}" != "" +Requires: %{kmod_dependencies} +%endif +# if there are multiple kmods for the same driver from different vendors, +# they should conflict with each other. +Conflicts: kmod-%{kmod_name} + +%description +mlx5_core kernel module for Driver Update Program + +%if 0 + +%package -n kmod-redhat-mlx5_core-firmware +Version: ENTER_FIRMWARE_VERSION +Summary: mlx5_core firmware for Driver Update Program +Provides: firmware(%{kmod_name}) = ENTER_FIRMWARE_VERSION +%if "%{kmod_kernel_version_min}" != "" +Provides: %kernel_modules_pkg >= %{kmod_kernel_version_min}.%{_target_cpu} +%else +Provides: %kernel_modules_pkg = %{kmod_kernel_version_dep}.%{_target_cpu} +%endif +%description -n kmod-redhat-mlx5_core-firmware +mlx5_core firmware for Driver Update Program + + +%files -n kmod-redhat-mlx5_core-firmware +%defattr(644,root,root,755) +%{FIRMWARE_FILES} + +%endif + +# Development package +%if 0%{kmod_devel_package} +%package -n kmod-redhat-mlx5_core-devel +Version: %{kmod_driver_version} +Requires: kernel >= 4.18.0-193.el8 + +Requires: kernel < 4.18.0-194.el8 +Summary: mlx5_core development files for Driver Update Program + +%description -n kmod-redhat-mlx5_core-devel +mlx5_core development files for Driver Update Program + + +%files -n kmod-redhat-mlx5_core-devel +%defattr(644,root,root,755) +/lib/modules/%{kmod_rpm_name}-%{kmod_driver_version}/ +%endif + +%post +modules=( $(find /lib/modules/%{kmod_kernel_version}.%(arch)/%{kmod_install_path} | grep '\.ko$') ) +printf '%s\n' "${modules[@]}" | %{sbindir}/weak-modules --add-modules --no-initramfs + +mkdir -p "%{kver_state_dir}" +touch "%{kver_state_file}" + +exit 0 + +%posttrans +# We have to re-implement part of weak-modules here because it doesn't allow +# calling initramfs regeneration separately +if [ -f "%{kver_state_file}" ]; then + kver_base="%{kmod_kernel_version_dep}" + kvers=$(ls -d "/lib/modules/${kver_base%%.*}"*) + + for k_dir in $kvers; do + k="${k_dir#/lib/modules/}" + + tmp_initramfs="/boot/initramfs-$k.tmp" + dst_initramfs="/boot/initramfs-$k.img" + + # The same check as in weak-modules: we assume that the kernel present + # if the symvers file exists. + if [ -e "/boot/symvers-$k.gz" ]; then + /usr/bin/dracut -f "$tmp_initramfs" "$k" || exit 1 + cmp -s "$tmp_initramfs" "$dst_initramfs" + if [ "$?" = 1 ]; then + mv "$tmp_initramfs" "$dst_initramfs" + else + rm -f "$tmp_initramfs" + fi + fi + done + + rm -f "%{kver_state_file}" + rmdir "%{kver_state_dir}" 2> /dev/null +fi + +rmdir "%{dup_state_dir}" 2> /dev/null + +exit 0 + +%preun +if rpm -q --filetriggers kmod 2> /dev/null| grep -q "Trigger for weak-modules call on kmod removal"; then + mkdir -p "%{kver_state_dir}" + touch "%{kver_state_file}" +fi + +mkdir -p "%{dup_state_dir}" +rpm -ql kmod-redhat-mlx5_core-%{kmod_driver_version}-%{kmod_rpm_release}%{?dist}.$(arch) | \ + grep '\.ko$' > "%{dup_module_list}" + +%postun +if rpm -q --filetriggers kmod 2> /dev/null| grep -q "Trigger for weak-modules call on kmod removal"; then + initramfs_opt="--no-initramfs" +else + initramfs_opt="" +fi + +modules=( $(cat "%{dup_module_list}") ) +rm -f "%{dup_module_list}" +printf '%s\n' "${modules[@]}" | %{sbindir}/weak-modules --remove-modules $initramfs_opt + +rmdir "%{dup_state_dir}" 2> /dev/null + +exit 0 + +%files +%defattr(644,root,root,755) +/lib/modules/%{kmod_kernel_version}.%(arch) +/etc/depmod.d/%{kmod_name}.conf +%doc /usr/share/doc/%{kmod_rpm_name}/greylist.txt + + + +%prep +%setup -n %{kmod_name}-%{kmod_vendor}-%{kmod_driver_version} + +%patch0 -p1 +%patch1 -p1 +%patch2 -p1 +%patch3 -p1 +%patch4 -p1 +%patch5 -p1 +%patch6 -p1 +%patch7 -p1 +%patch8 -p1 +%patch9 -p1 +%patch10 -p1 +%patch11 -p1 +%patch12 -p1 +%patch13 -p1 +%patch14 -p1 +%patch15 -p1 +%patch16 -p1 +%patch17 -p1 +%patch18 -p1 +%patch19 -p1 +%patch20 -p1 +%patch21 -p1 +%patch22 -p1 +%patch23 -p1 +%patch24 -p1 +%patch25 -p1 +%patch26 -p1 +%patch27 -p1 +%patch28 -p1 +%patch29 -p1 +%patch30 -p1 +%patch31 -p1 +%patch32 -p1 +%patch33 -p1 +%patch34 -p1 +%patch35 -p1 +%patch36 -p1 +%patch37 -p1 +%patch38 -p1 +%patch39 -p1 +%patch40 -p1 +%patch41 -p1 +%patch42 -p1 +%patch43 -p1 +%patch44 -p1 +%patch45 -p1 +%patch46 -p1 +%patch47 -p1 +%patch48 -p1 +%patch49 -p1 +%patch50 -p1 +%patch51 -p1 +%patch52 -p1 +%patch53 -p1 +%patch54 -p1 +%patch55 -p1 +%patch56 -p1 +%patch57 -p1 +%patch58 -p1 +%patch59 -p1 +%patch60 -p1 +%patch61 -p1 +%patch62 -p1 +%patch63 -p1 +%patch64 -p1 +%patch65 -p1 +%patch66 -p1 +%patch67 -p1 +%patch68 -p1 +%patch69 -p1 +%patch70 -p1 +%patch71 -p1 +%patch72 -p1 +%patch73 -p1 +%patch74 -p1 +%patch75 -p1 +%patch76 -p1 +%patch77 -p1 +%patch78 -p1 +%patch79 -p1 +%patch80 -p1 +%patch81 -p1 +%patch82 -p1 +%patch83 -p1 +%patch84 -p1 +%patch85 -p1 +%patch86 -p1 +%patch87 -p1 +%patch88 -p1 +%patch89 -p1 +%patch90 -p1 +%patch91 -p1 +%patch92 -p1 +%patch93 -p1 +%patch94 -p1 +%patch95 -p1 +%patch96 -p1 +%patch97 -p1 +%patch98 -p1 +%patch99 -p1 +%patch100 -p1 +%patch101 -p1 +%patch102 -p1 +%patch103 -p1 +%patch104 -p1 +%patch105 -p1 +%patch106 -p1 +%patch107 -p1 +%patch108 -p1 +%patch109 -p1 +%patch110 -p1 +%patch111 -p1 +%patch112 -p1 +%patch113 -p1 +%patch114 -p1 +%patch115 -p1 +%patch116 -p1 +%patch117 -p1 +%patch118 -p1 +%patch119 -p1 +%patch120 -p1 +%patch121 -p1 +%patch122 -p1 +%patch123 -p1 +%patch124 -p1 +%patch125 -p1 +%patch126 -p1 +%patch127 -p1 +%patch128 -p1 +%patch129 -p1 +%patch130 -p1 +%patch131 -p1 +%patch132 -p1 +%patch133 -p1 +%patch134 -p1 +%patch135 -p1 +%patch136 -p1 +%patch137 -p1 +%patch138 -p1 +%patch139 -p1 +%patch140 -p1 +%patch141 -p1 +%patch142 -p1 +%patch143 -p1 +%patch144 -p1 +%patch145 -p1 +%patch146 -p1 +%patch147 -p1 +%patch148 -p1 +%patch149 -p1 +%patch150 -p1 +%patch151 -p1 +%patch152 -p1 +%patch153 -p1 +%patch154 -p1 +%patch155 -p1 +%patch156 -p1 +%patch157 -p1 +%patch158 -p1 +%patch159 -p1 +%patch160 -p1 +%patch161 -p1 +%patch162 -p1 +%patch163 -p1 +%patch164 -p1 +%patch165 -p1 +%patch166 -p1 +%patch167 -p1 +%patch168 -p1 +%patch169 -p1 +%patch170 -p1 +%patch171 -p1 +%patch172 -p1 +%patch173 -p1 +%patch174 -p1 +%patch175 -p1 +%patch176 -p1 +%patch177 -p1 +%patch178 -p1 +%patch179 -p1 +%patch180 -p1 +%patch181 -p1 +%patch182 -p1 +%patch183 -p1 +%patch184 -p1 +%patch185 -p1 +%patch186 -p1 +%patch187 -p1 +%patch188 -p1 +%patch189 -p1 +%patch190 -p1 +%patch191 -p1 +%patch192 -p1 +%patch193 -p1 +%patch194 -p1 +%patch195 -p1 +%patch196 -p1 +%patch197 -p1 +%patch198 -p1 +%patch199 -p1 +%patch200 -p1 +%patch201 -p1 +%patch202 -p1 +%patch203 -p1 +%patch204 -p1 +%patch205 -p1 +%patch206 -p1 +%patch207 -p1 +%patch208 -p1 +%patch209 -p1 +%patch210 -p1 +%patch211 -p1 +%patch212 -p1 +%patch213 -p1 +%patch214 -p1 +%patch215 -p1 +%patch216 -p1 +%patch217 -p1 +%patch218 -p1 +%patch219 -p1 +%patch220 -p1 +%patch221 -p1 +%patch222 -p1 +%patch223 -p1 +%patch224 -p1 +%patch225 -p1 +%patch226 -p1 +%patch227 -p1 +%patch228 -p1 +%patch229 -p1 +%patch230 -p1 +%patch231 -p1 +%patch232 -p1 +%patch233 -p1 +%patch234 -p1 +%patch235 -p1 +%patch236 -p1 +%patch237 -p1 +%patch238 -p1 +%patch239 -p1 +%patch240 -p1 +%patch241 -p1 +%patch242 -p1 +%patch243 -p1 +%patch244 -p1 +%patch245 -p1 +%patch246 -p1 +%patch247 -p1 +%patch248 -p1 +%patch249 -p1 +%patch250 -p1 +%patch251 -p1 +%patch252 -p1 +%patch253 -p1 +%patch254 -p1 +%patch255 -p1 +%patch256 -p1 +%patch257 -p1 +%patch258 -p1 +%patch259 -p1 +%patch260 -p1 +%patch261 -p1 +%patch262 -p1 +%patch263 -p1 +%patch264 -p1 +%patch265 -p1 +%patch266 -p1 +%patch267 -p1 +%patch268 -p1 +%patch269 -p1 +%patch270 -p1 +%patch271 -p1 +%patch272 -p1 +%patch273 -p1 +%patch274 -p1 +%patch275 -p1 +%patch276 -p1 +%patch277 -p1 +%patch278 -p1 +%patch279 -p1 +%patch280 -p1 +%patch281 -p1 +%patch282 -p1 +%patch283 -p1 +%patch284 -p1 +%patch285 -p1 +%patch286 -p1 +%patch287 -p1 +%patch288 -p1 +%patch289 -p1 +%patch290 -p1 +%patch291 -p1 +%patch292 -p1 +%patch293 -p1 +%patch294 -p1 +%patch295 -p1 +%patch296 -p1 +%patch297 -p1 +%patch298 -p1 +%patch299 -p1 +%patch300 -p1 +%patch301 -p1 +%patch302 -p1 +%patch303 -p1 +%patch304 -p1 +%patch305 -p1 +%patch306 -p1 +%patch307 -p1 +%patch308 -p1 +%patch309 -p1 +%patch310 -p1 +%patch311 -p1 +%patch312 -p1 +%patch313 -p1 +%patch314 -p1 +%patch315 -p1 +%patch316 -p1 +%patch317 -p1 +set -- * +mkdir source +mv "$@" source/ +mkdir obj + +%build +rm -rf obj +cp -r source obj + +PWD_PATH="$PWD" +%if "%{workaround_no_pwd_rel_path}" != "1" +PWD_PATH=$(realpath --relative-to="%{kernel_source}" . 2>/dev/null || echo "$PWD") +%endif +%{make_build} -C %{kernel_source} V=1 M="$PWD_PATH/obj/%{kmod_kbuild_dir}" \ + NOSTDINC_FLAGS="-I$PWD_PATH/obj/include -I$PWD_PATH/obj/include/uapi %{nil}" \ + EXTRA_CFLAGS="%{nil}" \ + %{nil} +# mark modules executable so that strip-to-file can strip them +find obj/%{kmod_kbuild_dir} -name "*.ko" -type f -exec chmod u+x '{}' + + +whitelist="/lib/modules/kabi-current/kabi_whitelist_%{_target_cpu}" +for modules in $( find obj/%{kmod_kbuild_dir} -name "*.ko" -type f -printf "%{findpat}\n" | sed 's|\.ko$||' | sort -u ) ; do + # update depmod.conf + module_weak_path=$(echo "$modules" | sed 's/[\/]*[^\/]*$//') + if [ -z "$module_weak_path" ]; then + module_weak_path=%{name} + else + module_weak_path=%{name}/$module_weak_path + fi + echo "override $(echo $modules | sed 's/.*\///')" \ + "$(echo "%{kmod_kernel_version_dep}" | + sed 's/\.[^\.]*$//; + s/\([.+?^$\/\\|()\[]\|\]\)/\\\0/g').*" \ + "weak-updates/$module_weak_path" >> source/depmod.conf + + # update greylist + nm -u obj/%{kmod_kbuild_dir}/$modules.ko | sed 's/.*U //' | sed 's/^\.//' | sort -u | while read -r symbol; do + grep -q "^\s*$symbol\$" $whitelist || echo "$symbol" >> source/greylist + done +done +sort -u source/greylist | uniq > source/greylist.txt + +%install +export INSTALL_MOD_PATH=$RPM_BUILD_ROOT +export INSTALL_MOD_DIR=%{kmod_install_path} +PWD_PATH="$PWD" +%if "%{workaround_no_pwd_rel_path}" != "1" +PWD_PATH=$(realpath --relative-to="%{kernel_source}" . 2>/dev/null || echo "$PWD") +%endif +make -C %{kernel_source} modules_install \ + M=$PWD_PATH/obj/%{kmod_kbuild_dir} +# Cleanup unnecessary kernel-generated module dependency files. +find $INSTALL_MOD_PATH/lib/modules -iname 'modules.*' -exec rm {} \; + +install -m 644 -D source/depmod.conf $RPM_BUILD_ROOT/etc/depmod.d/%{kmod_name}.conf +install -m 644 -D source/greylist.txt $RPM_BUILD_ROOT/usr/share/doc/%{kmod_rpm_name}/greylist.txt +%if 0 +%{FIRMWARE_FILES_INSTALL} +%endif +%if 0%{kmod_devel_package} +install -m 644 -D $PWD/obj/%{kmod_kbuild_dir}/Module.symvers $RPM_BUILD_ROOT/lib/modules/%{kmod_rpm_name}-%{kmod_driver_version}/build/Module.symvers + +if [ -n "%{kmod_devel_src_paths}" ]; then + for i in %{kmod_devel_src_paths}; do + mkdir -p "$RPM_BUILD_ROOT/lib/modules/%{kmod_rpm_name}-%{kmod_driver_version}/build/$(dirname "$i")" + cp -rv "$PWD/source/$i" \ + "$RPM_BUILD_ROOT/lib/modules/%{kmod_rpm_name}-%{kmod_driver_version}/build/$i" + done +fi +%endif + + + +%clean +rm -rf $RPM_BUILD_ROOT + +%changelog +* Wed Oct 21 2020 Eugene Syromiatnikov 5.0_0_dup8.2-2 +- Bump release due to "Package build kmod-redhat-mlx5_core-5.0_0_dup8.2-1.el8_2 + kept gated because not onboarded to gating". + +* Wed Oct 21 2020 Eugene Syromiatnikov 5.0_0_dup8.2-1 +- 86de78c2e6f431762836a4ea5891f891bc0fdcb4 +- mlx5_core kernel module for Driver Update Program +- Resolves: #bz1889733