public inbox for stable@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] net: stmmac: fix integer underflow in chain mode jumbo_frm
@ 2026-03-21  4:10 Tyllis Xu
  2026-03-23 14:18 ` Simon Horman
  0 siblings, 1 reply; 4+ messages in thread
From: Tyllis Xu @ 2026-03-21  4:10 UTC (permalink / raw)
  To: netdev
  Cc: linux-kernel, andrew+netdev, davem, edumazet, kuba, pabeni,
	rmk+kernel, maxime.chevallier, peppe.cavallaro, rayagond, stable,
	danisjiang, ychen, Tyllis Xu

The jumbo_frm() chain-mode implementation unconditionally computes

    len = nopaged_len - bmax;

where nopaged_len = skb_headlen(skb) (linear bytes only) and bmax is
BUF_SIZE_8KiB or BUF_SIZE_2KiB.  However, the caller stmmac_xmit()
decides to invoke jumbo_frm() based on skb->len (total length including
page fragments):

    is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);

When a packet has a small linear portion (nopaged_len <= bmax) but a
large total length due to page fragments (skb->len > bmax), the
subtraction wraps as an unsigned integer, producing a huge len value
(~0xFFFFxxxx).  This causes the while (len != 0) loop to execute
hundreds of thousands of iterations, passing skb->data + bmax * i
pointers far beyond the skb buffer to dma_map_single().  On IOMMU-less
SoCs (the typical deployment for stmmac), this maps arbitrary kernel
memory to the DMA engine, constituting a kernel memory disclosure and
potential memory corruption from hardware.

The ring-mode counterpart already guards against this with:

    if (nopaged_len > BUF_SIZE_8KiB) { ... use len ... }
    else { ... map nopaged_len directly ... }

Apply the same pattern to chain mode: guard the chunked-DMA path with
if (nopaged_len > bmax), and add an else branch that maps the entire
linear portion as a single descriptor when it fits within bmax.  The
fragment loop in stmmac_xmit() handles page fragments afterward.

Fixes: 286a83721720 ("stmmac: add CHAINED descriptor mode support (V4)")
Cc: stable@vger.kernel.org
Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
---
 drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 71 ++++++++++++++---------
 1 file changed, 44 insertions(+), 27 deletions(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index bf351bbec57f..c8980482dea2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -31,52 +31,65 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
 	else
 		bmax = BUF_SIZE_2KiB;

-	len = nopaged_len - bmax;
-
-	des2 = dma_map_single(priv->device, skb->data,
-			      bmax, DMA_TO_DEVICE);
-	desc->des2 = cpu_to_le32(des2);
-	if (dma_mapping_error(priv->device, des2))
-		return -1;
-	tx_q->tx_skbuff_dma[entry].buf = des2;
-	tx_q->tx_skbuff_dma[entry].len = bmax;
-	/* do not close the descriptor and do not set own bit */
-	stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
-			0, false, skb->len);
-
-	while (len != 0) {
-		tx_q->tx_skbuff[entry] = NULL;
-		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
-		desc = tx_q->dma_tx + entry;
-
-		if (len > bmax) {
-			des2 = dma_map_single(priv->device,
-					      (skb->data + bmax * i),
-					      bmax, DMA_TO_DEVICE);
-			desc->des2 = cpu_to_le32(des2);
-			if (dma_mapping_error(priv->device, des2))
-				return -1;
-			tx_q->tx_skbuff_dma[entry].buf = des2;
-			tx_q->tx_skbuff_dma[entry].len = bmax;
-			stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
-					STMMAC_CHAIN_MODE, 1, false, skb->len);
-			len -= bmax;
-			i++;
-		} else {
-			des2 = dma_map_single(priv->device,
-					      (skb->data + bmax * i), len,
-					      DMA_TO_DEVICE);
-			desc->des2 = cpu_to_le32(des2);
-			if (dma_mapping_error(priv->device, des2))
-				return -1;
-			tx_q->tx_skbuff_dma[entry].buf = des2;
-			tx_q->tx_skbuff_dma[entry].len = len;
-			/* last descriptor can be set now */
-			stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
-					STMMAC_CHAIN_MODE, 1, true, skb->len);
-			len = 0;
+	if (nopaged_len > bmax) {
+		len = nopaged_len - bmax;
+
+		des2 = dma_map_single(priv->device, skb->data,
+				      bmax, DMA_TO_DEVICE);
+		desc->des2 = cpu_to_le32(des2);
+		if (dma_mapping_error(priv->device, des2))
+			return -1;
+		tx_q->tx_skbuff_dma[entry].buf = des2;
+		tx_q->tx_skbuff_dma[entry].len = bmax;
+		/* do not close the descriptor and do not set own bit */
+		stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
+				0, false, skb->len);
+
+		while (len != 0) {
+			tx_q->tx_skbuff[entry] = NULL;
+			entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+			desc = tx_q->dma_tx + entry;
+
+			if (len > bmax) {
+				des2 = dma_map_single(priv->device,
+						      (skb->data + bmax * i),
+						      bmax, DMA_TO_DEVICE);
+				desc->des2 = cpu_to_le32(des2);
+				if (dma_mapping_error(priv->device, des2))
+					return -1;
+				tx_q->tx_skbuff_dma[entry].buf = des2;
+				tx_q->tx_skbuff_dma[entry].len = bmax;
+				stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
+						STMMAC_CHAIN_MODE, 1, false, skb->len);
+				len -= bmax;
+				i++;
+			} else {
+				des2 = dma_map_single(priv->device,
+						      (skb->data + bmax * i), len,
+						      DMA_TO_DEVICE);
+				desc->des2 = cpu_to_le32(des2);
+				if (dma_mapping_error(priv->device, des2))
+					return -1;
+				tx_q->tx_skbuff_dma[entry].buf = des2;
+				tx_q->tx_skbuff_dma[entry].len = len;
+				/* last descriptor can be set now */
+				stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
+						STMMAC_CHAIN_MODE, 1, true, skb->len);
+				len = 0;
+			}
 		}
-	}
+	} else {
+		des2 = dma_map_single(priv->device, skb->data,
+				      nopaged_len, DMA_TO_DEVICE);
+		desc->des2 = cpu_to_le32(des2);
+		if (dma_mapping_error(priv->device, des2))
+			return -1;
+		tx_q->tx_skbuff_dma[entry].buf = des2;
+		tx_q->tx_skbuff_dma[entry].len = nopaged_len;
+		stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
+				STMMAC_CHAIN_MODE, 0, !skb_is_nonlinear(skb),
+				skb->len);
+	}

 	tx_q->cur_tx = entry;

--
2.39.5

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-03-25 17:09 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-21  4:10 [PATCH] net: stmmac: fix integer underflow in chain mode jumbo_frm Tyllis Xu
2026-03-23 14:18 ` Simon Horman
2026-03-24  6:07   ` Tyllis Xu
2026-03-25 17:09     ` Russell King (Oracle)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox