netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net] net: dlink: handle dma_map_single() failure properly
@ 2025-10-02 15:26 Yeounsu Moon
  2025-10-03  9:44 ` Simon Horman
  0 siblings, 1 reply; 7+ messages in thread
From: Yeounsu Moon @ 2025-10-02 15:26 UTC (permalink / raw)
  To: Andrew Lunn, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni
  Cc: netdev, linux-kernel, Yeounsu Moon

Add error handling by checking `dma_mapping_error()` and cleaning up
the `skb` using the appropriate `dev_kfree_skb*()` variant.

Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
Tested-on: D-Link DGE-550T Rev-A3
---
 drivers/net/ethernet/dlink/dl2k.c | 49 ++++++++++++++++++++++++-------
 1 file changed, 38 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1996d2e4e3e2..a821c9921745 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -508,6 +508,7 @@ static int alloc_list(struct net_device *dev)
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		/* Allocated fixed size of skbuff */
 		struct sk_buff *skb;
+		dma_addr_t addr;
 
 		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
 		np->rx_skbuff[i] = skb;
@@ -516,13 +517,19 @@ static int alloc_list(struct net_device *dev)
 			return -ENOMEM;
 		}
 
+		addr = dma_map_single(&np->pdev->dev, skb->data,
+				      np->rx_buf_sz, DMA_FROM_DEVICE);
+		if (dma_mapping_error(&np->pdev->dev, addr)) {
+			dev_kfree_skb(skb);
+			np->rx_skbuff[i] = NULL;
+			free_list(dev);
+			return -ENOMEM;
+		}
 		np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
 						((i + 1) % RX_RING_SIZE) *
 						sizeof(struct netdev_desc));
 		/* Rubicon now supports 40 bits of addressing space. */
-		np->rx_ring[i].fraginfo =
-		    cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
-					       np->rx_buf_sz, DMA_FROM_DEVICE));
+		np->rx_ring[i].fraginfo = cpu_to_le64(addr);
 		np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
 	}
 
@@ -674,6 +681,7 @@ rio_timer (struct timer_list *t)
 		/* Re-allocate skbuffs to fill the descriptor ring */
 		for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
 			struct sk_buff *skb;
+			dma_addr_t addr;
 			entry = np->old_rx % RX_RING_SIZE;
 			/* Dropped packets don't need to re-allocate */
 			if (np->rx_skbuff[entry] == NULL) {
@@ -686,10 +694,16 @@ rio_timer (struct timer_list *t)
 						dev->name, entry);
 					break;
 				}
+				addr = dma_map_single(&np->pdev->dev, skb->data,
+						      np->rx_buf_sz,
+						      DMA_FROM_DEVICE);
+				if (dma_mapping_error(&np->pdev->dev, addr)) {
+					dev_kfree_skb_irq(skb);
+					np->rx_ring[entry].fraginfo = 0;
+					break;
+				}
 				np->rx_skbuff[entry] = skb;
-				np->rx_ring[entry].fraginfo =
-				    cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
-								np->rx_buf_sz, DMA_FROM_DEVICE));
+				np->rx_ring[entry].fraginfo = cpu_to_le64(addr);
 			}
 			np->rx_ring[entry].fraginfo |=
 			    cpu_to_le64((u64)np->rx_buf_sz << 48);
@@ -720,6 +734,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->ioaddr;
 	struct netdev_desc *txdesc;
+	dma_addr_t addr;
 	unsigned entry;
 	u64 tfc_vlan_tag = 0;
 
@@ -743,8 +758,14 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
 		    ((u64)np->vlan << 32) |
 		    ((u64)skb->priority << 45);
 	}
-	txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
-						       skb->len, DMA_TO_DEVICE));
+	addr = dma_map_single(&np->pdev->dev, skb->data, skb->len,
+			      DMA_TO_DEVICE);
+	if (dma_mapping_error(&np->pdev->dev, addr)) {
+		dev_kfree_skb_any(skb);
+		np->tx_skbuff[entry] = NULL;
+		return NETDEV_TX_OK;
+	}
+	txdesc->fraginfo = cpu_to_le64(addr);
 	txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
 
 	/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
@@ -1007,6 +1028,7 @@ receive_packet (struct net_device *dev)
 	entry = np->old_rx;
 	while (entry != np->cur_rx) {
 		struct sk_buff *skb;
+		dma_addr_t addr;
 		/* Dropped packets don't need to re-allocate */
 		if (np->rx_skbuff[entry] == NULL) {
 			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
@@ -1018,10 +1040,15 @@ receive_packet (struct net_device *dev)
 					dev->name, entry);
 				break;
 			}
+			addr = dma_map_single(&np->pdev->dev, skb->data,
+					      np->rx_buf_sz, DMA_FROM_DEVICE);
+			if (dma_mapping_error(&np->pdev->dev, addr)) {
+				dev_kfree_skb_irq(skb);
+				np->rx_ring[entry].fraginfo = 0;
+				break;
+			}
 			np->rx_skbuff[entry] = skb;
-			np->rx_ring[entry].fraginfo =
-			    cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
-						       np->rx_buf_sz, DMA_FROM_DEVICE));
+			np->rx_ring[entry].fraginfo = cpu_to_le64(addr);
 		}
 		np->rx_ring[entry].fraginfo |=
 		    cpu_to_le64((u64)np->rx_buf_sz << 48);
-- 
2.51.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-10-09 15:56 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-10-02 15:26 [PATCH net] net: dlink: handle dma_map_single() failure properly Yeounsu Moon
2025-10-03  9:44 ` Simon Horman
2025-10-03 12:29   ` Yeounsu Moon
2025-10-03 15:55     ` Jakub Kicinski
2025-10-05  5:22   ` Yeounsu Moon
2025-10-08  9:13     ` Simon Horman
2025-10-09 15:56       ` Yeounsu Moon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).