From: Daniel Machon <daniel.machon@microchip.com>
To: Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Horatiu Vultur <horatiu.vultur@microchip.com>,
Steen Hegelund <steen.hegelund@microchip.com>,
<UNGLinuxDriver@microchip.com>,
"Alexei Starovoitov" <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
"Jesper Dangaard Brouer" <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
Stanislav Fomichev <sdf@fomichev.me>,
Herve Codina <herve.codina@bootlin.com>,
Arnd Bergmann <arnd@arndb.de>,
Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Mohsin Bashir <mohsin.bashr@gmail.com>
Cc: <netdev@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
<bpf@vger.kernel.org>, <linux-arm-kernel@lists.infradead.org>
Subject: [PATCH net-next v3 10/13] net: lan966x: add PCIe FDMA MTU change support
Date: Mon, 4 May 2026 16:23:23 +0200 [thread overview]
Message-ID: <20260504-lan966x-pci-fdma-v3-10-a56f5740d870@microchip.com> (raw)
In-Reply-To: <20260504-lan966x-pci-fdma-v3-0-a56f5740d870@microchip.com>
Add MTU change support for the PCIe FDMA path. When the MTU changes,
the contiguous ATU-mapped RX and TX buffers are reallocated with the
new size. On allocation failure, the existing buffers are reused
after being reset.
Cap the PCIe DCB ring at 256 (FDMA_PCI_DCB_MAX) to keep the entire
contiguous allocation under MAX_PAGE_ORDER at jumbo MTU, which 512
DCBs would overflow.
Tested-by: Herve Codina <herve.codina@bootlin.com>
Signed-off-by: Daniel Machon <daniel.machon@microchip.com>
---
.../ethernet/microchip/lan966x/lan966x_fdma_pci.c | 157 ++++++++++++++++++++-
1 file changed, 154 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma_pci.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma_pci.c
index 2c5488046077..491ddc337760 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma_pci.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma_pci.c
@@ -3,6 +3,11 @@
#include "fdma_api.h"
#include "lan966x_main.h"
+/* Ring must fit in one MAX_PAGE_ORDER DMA block; 512 DCBs overflows
+ * at jumbo MTU.
+ */
+#define FDMA_PCI_DCB_MAX 256
+
static int lan966x_fdma_pci_dataptr_cb(struct fdma *fdma, int dcb, int db,
u64 *dataptr)
{
@@ -321,7 +326,7 @@ static int lan966x_fdma_pci_init(struct lan966x *lan966x)
lan966x->rx.lan966x = lan966x;
lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
rx_fdma->channel_id = FDMA_XTR_CHANNEL;
- rx_fdma->n_dcbs = FDMA_DCB_MAX;
+ rx_fdma->n_dcbs = FDMA_PCI_DCB_MAX;
rx_fdma->n_dbs = FDMA_RX_DCB_MAX_DBS;
rx_fdma->priv = lan966x;
rx_fdma->db_size = FDMA_PCI_DB_SIZE(lan966x->rx.max_mtu);
@@ -331,7 +336,7 @@ static int lan966x_fdma_pci_init(struct lan966x *lan966x)
lan966x->tx.lan966x = lan966x;
tx_fdma->channel_id = FDMA_INJ_CHANNEL;
- tx_fdma->n_dcbs = FDMA_DCB_MAX;
+ tx_fdma->n_dcbs = FDMA_PCI_DCB_MAX;
tx_fdma->n_dbs = FDMA_TX_DCB_MAX_DBS;
tx_fdma->priv = lan966x;
tx_fdma->db_size = FDMA_PCI_DB_SIZE(lan966x->rx.max_mtu);
@@ -354,9 +359,155 @@ static int lan966x_fdma_pci_init(struct lan966x *lan966x)
return 0;
}
+/* Reset existing rx and tx buffers. */
+static void lan966x_fdma_pci_reset_mem(struct lan966x *lan966x)
+{
+ struct lan966x_rx *rx = &lan966x->rx;
+ struct lan966x_tx *tx = &lan966x->tx;
+
+ memset(rx->fdma.dcbs, 0, rx->fdma.size);
+ memset(tx->fdma.dcbs, 0, tx->fdma.size);
+
+ fdma_dcbs_init(&rx->fdma,
+ FDMA_DCB_INFO_DATAL(rx->fdma.db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ fdma_dcbs_init(&tx->fdma,
+ FDMA_DCB_INFO_DATAL(tx->fdma.db_size),
+ FDMA_DCB_STATUS_DONE);
+
+ lan966x_fdma_llp_configure(lan966x,
+ tx->fdma.atu_region->base_addr,
+ tx->fdma.channel_id);
+ lan966x_fdma_llp_configure(lan966x,
+ rx->fdma.atu_region->base_addr,
+ rx->fdma.channel_id);
+}
+
+/* Drain in-flight xmit callers and stop all TX queues on every port. */
+static void lan966x_fdma_pci_stop_netdev(struct lan966x *lan966x)
+{
+ for (int i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port = lan966x->ports[i];
+
+ if (port)
+ netif_tx_disable(port->dev);
+ }
+}
+
+/* Wake all TX queues on every port (undoes lan966x_fdma_pci_stop_netdev). */
+static void lan966x_fdma_pci_wakeup_netdev(struct lan966x *lan966x)
+{
+ for (int i = 0; i < lan966x->num_phys_ports; ++i) {
+ struct lan966x_port *port = lan966x->ports[i];
+
+ if (port)
+ netif_tx_wake_all_queues(port->dev);
+ }
+}
+
+static int lan966x_fdma_pci_reload(struct lan966x *lan966x, int new_mtu)
+{
+ struct fdma tx_fdma_old = lan966x->tx.fdma;
+ struct fdma rx_fdma_old = lan966x->rx.fdma;
+ u32 old_mtu = lan966x->rx.max_mtu;
+ int err;
+
+ napi_synchronize(&lan966x->napi);
+ napi_disable(&lan966x->napi);
+ lan966x_fdma_pci_stop_netdev(lan966x);
+ lan966x_fdma_rx_disable(&lan966x->rx);
+ lan966x_fdma_tx_disable(&lan966x->tx);
+
+ lan966x->rx.max_mtu = new_mtu;
+
+ lan966x->tx.fdma.db_size = FDMA_PCI_DB_SIZE(lan966x->rx.max_mtu);
+ lan966x->tx.fdma.size = fdma_get_size_contiguous(&lan966x->tx.fdma);
+ lan966x->rx.fdma.db_size = FDMA_PCI_DB_SIZE(lan966x->rx.max_mtu);
+ lan966x->rx.fdma.size = fdma_get_size_contiguous(&lan966x->rx.fdma);
+
+ err = lan966x_fdma_pci_rx_alloc(&lan966x->rx);
+ if (err)
+ goto restore;
+
+ err = lan966x_fdma_pci_tx_alloc(&lan966x->tx);
+ if (err) {
+ fdma_free_coherent_and_unmap(lan966x->dev, &lan966x->rx.fdma);
+ goto restore;
+ }
+
+ /* Free and unmap old memory. */
+ fdma_free_coherent_and_unmap(lan966x->dev, &rx_fdma_old);
+ fdma_free_coherent_and_unmap(lan966x->dev, &tx_fdma_old);
+
+ /* Keep this order: rx_start, wakeup_netdev, napi_enable. */
+ lan966x_fdma_rx_start(&lan966x->rx);
+ lan966x_fdma_pci_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+restore:
+
+ /* No new buffers are allocated at this point. Use the old buffers,
+ * but reset them before starting the FDMA again.
+ */
+
+ memcpy(&lan966x->tx.fdma, &tx_fdma_old, sizeof(struct fdma));
+ memcpy(&lan966x->rx.fdma, &rx_fdma_old, sizeof(struct fdma));
+
+ lan966x->rx.max_mtu = old_mtu;
+
+ lan966x_fdma_pci_reset_mem(lan966x);
+
+ /* Keep this order: rx_start, wakeup_netdev, napi_enable. */
+ lan966x_fdma_rx_start(&lan966x->rx);
+ lan966x_fdma_pci_wakeup_netdev(lan966x);
+ napi_enable(&lan966x->napi);
+
+ return err;
+}
+
+static int __lan966x_fdma_pci_reload(struct lan966x *lan966x, int max_mtu)
+{
+ int err;
+ u32 val;
+
+ /* Disable the CPU port. */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Flush the CPU queues. */
+ readx_poll_timeout(lan966x_qsys_sw_status,
+ lan966x,
+ val,
+ !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+
+ /* Add a sleep in case there are frames between the queues and the CPU
+ * port
+ */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = lan966x_fdma_pci_reload(lan966x, max_mtu);
+
+ /* Enable back the CPU port. */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ return err;
+}
+
static int lan966x_fdma_pci_resize(struct lan966x *lan966x)
{
- return -EOPNOTSUPP;
+ int max_mtu;
+
+ max_mtu = lan966x_fdma_get_max_frame(lan966x);
+ if (max_mtu == lan966x->rx.max_mtu)
+ return 0;
+
+ return __lan966x_fdma_pci_reload(lan966x, max_mtu);
}
static void lan966x_fdma_pci_deinit(struct lan966x *lan966x)
--
2.34.1
next prev parent reply other threads:[~2026-05-04 14:24 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-04 14:23 [PATCH net-next v3 00/13] net: lan966x: add support for PCIe FDMA Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 01/13] MAINTAINERS: add FDMA library to Sparx5 SoC entry Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 02/13] net: microchip: fdma: rename contiguous dataptr helpers Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 03/13] net: microchip: fdma: add PCIe ATU support Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 04/13] net: lan966x: add FDMA LLP register write helper Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 05/13] net: lan966x: export FDMA helpers for reuse Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 06/13] net: lan966x: add FDMA ops dispatch for PCIe support Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 07/13] net: lan966x: clear FDMA interrupt stickies after switch reset Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 08/13] net: lan966x: add shutdown callback to stop FDMA on reboot Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 09/13] net: lan966x: add PCIe FDMA support Daniel Machon
2026-05-07 8:54 ` Paolo Abeni
2026-05-07 9:21 ` Daniel Machon
2026-05-04 14:23 ` Daniel Machon [this message]
2026-05-04 14:23 ` [PATCH net-next v3 11/13] net: lan966x: add PCIe FDMA XDP support Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 12/13] misc: lan966x-pci: dts: extend cpu reg to cover PCIE DBI space Daniel Machon
2026-05-04 14:23 ` [PATCH net-next v3 13/13] misc: lan966x-pci: dts: add fdma interrupt to overlay Daniel Machon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260504-lan966x-pci-fdma-v3-10-a56f5740d870@microchip.com \
--to=daniel.machon@microchip.com \
--cc=UNGLinuxDriver@microchip.com \
--cc=andrew+netdev@lunn.ch \
--cc=arnd@arndb.de \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=gregkh@linuxfoundation.org \
--cc=hawk@kernel.org \
--cc=herve.codina@bootlin.com \
--cc=horatiu.vultur@microchip.com \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mohsin.bashr@gmail.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=steen.hegelund@microchip.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox