* FAILED: patch "[PATCH] gve: fix incorrect buffer cleanup in" failed to apply to 6.12-stable tree
@ 2026-03-09 10:21 gregkh
2026-03-09 12:47 ` [PATCH 6.12.y] gve: fix incorrect buffer cleanup in gve_tx_clean_pending_packets for QPL Sasha Levin
0 siblings, 1 reply; 3+ messages in thread
From: gregkh @ 2026-03-09 10:21 UTC (permalink / raw)
To: nktgrg, horms, hramamurthy, jordanrhee, joshwash, kuba; +Cc: stable
The patch below does not apply to the 6.12-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable@vger.kernel.org>.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-6.12.y
git checkout FETCH_HEAD
git cherry-pick -x fb868db5f4bccd7a78219313ab2917429f715cea
# <resolve conflicts, build, test, etc.>
git commit -s
git send-email --to '<stable@vger.kernel.org>' --in-reply-to '2026030917-ferment-untamed-144d@gregkh' --subject-prefix 'PATCH 6.12.y' HEAD^..
Possible dependencies:
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From fb868db5f4bccd7a78219313ab2917429f715cea Mon Sep 17 00:00:00 2001
From: Ankit Garg <nktgrg@google.com>
Date: Fri, 20 Feb 2026 13:53:24 -0800
Subject: [PATCH] gve: fix incorrect buffer cleanup in
gve_tx_clean_pending_packets for QPL
In DQ-QPL mode, gve_tx_clean_pending_packets() incorrectly uses the RDA
buffer cleanup path. It iterates num_bufs times and attempts to unmap
entries in the dma array.
This leads to two issues:
1. The dma array shares storage with tx_qpl_buf_ids (union).
Interpreting buffer IDs as DMA addresses results in attempting to
unmap incorrect memory locations.
2. num_bufs in QPL mode (counting 2K chunks) can significantly exceed
the size of the dma array, causing out-of-bounds access warnings
(trace below is how we noticed this issue).
UBSAN: array-index-out-of-bounds in
drivers/net/ethernet/drivers/net/ethernet/google/gve/gve_tx_dqo.c:178:5 index 18 is out of
range for type 'dma_addr_t[18]' (aka 'unsigned long long[18]')
Workqueue: gve gve_service_task [gve]
Call Trace:
<TASK>
dump_stack_lvl+0x33/0xa0
__ubsan_handle_out_of_bounds+0xdc/0x110
gve_tx_stop_ring_dqo+0x182/0x200 [gve]
gve_close+0x1be/0x450 [gve]
gve_reset+0x99/0x120 [gve]
gve_service_task+0x61/0x100 [gve]
process_scheduled_works+0x1e9/0x380
Fix this by properly checking for QPL mode and delegating to
gve_free_tx_qpl_bufs() to reclaim the buffers.
Cc: stable@vger.kernel.org
Fixes: a6fb8d5a8b69 ("gve: Tx path for DQO-QPL")
Signed-off-by: Ankit Garg <nktgrg@google.com>
Reviewed-by: Jordan Rhee <jordanrhee@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20260220215324.1631350-1-joshwash@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 28e85730f785..b57e8f13cb51 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -167,6 +167,25 @@ gve_free_pending_packet(struct gve_tx_ring *tx,
}
}
+static void gve_unmap_packet(struct device *dev,
+ struct gve_tx_pending_packet_dqo *pkt)
+{
+ int i;
+
+ if (!pkt->num_bufs)
+ return;
+
+ /* SKB linear portion is guaranteed to be mapped */
+ dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+ dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+ for (i = 1; i < pkt->num_bufs; i++) {
+ netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE, 0);
+ }
+ pkt->num_bufs = 0;
+}
+
/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
*/
static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
@@ -176,21 +195,12 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
for (i = 0; i < tx->dqo.num_pending_packets; i++) {
struct gve_tx_pending_packet_dqo *cur_state =
&tx->dqo.pending_packets[i];
- int j;
- for (j = 0; j < cur_state->num_bufs; j++) {
- if (j == 0) {
- dma_unmap_single(tx->dev,
- dma_unmap_addr(cur_state, dma[j]),
- dma_unmap_len(cur_state, len[j]),
- DMA_TO_DEVICE);
- } else {
- dma_unmap_page(tx->dev,
- dma_unmap_addr(cur_state, dma[j]),
- dma_unmap_len(cur_state, len[j]),
- DMA_TO_DEVICE);
- }
- }
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, cur_state);
+ else
+ gve_unmap_packet(tx->dev, cur_state);
+
if (cur_state->skb) {
dev_consume_skb_any(cur_state->skb);
cur_state->skb = NULL;
@@ -1157,22 +1167,6 @@ static void remove_from_list(struct gve_tx_ring *tx,
}
}
-static void gve_unmap_packet(struct device *dev,
- struct gve_tx_pending_packet_dqo *pkt)
-{
- int i;
-
- /* SKB linear portion is guaranteed to be mapped */
- dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
- dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
- for (i = 1; i < pkt->num_bufs; i++) {
- netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]),
- DMA_TO_DEVICE, 0);
- }
- pkt->num_bufs = 0;
-}
-
/* Completion types and expected behavior:
* No Miss compl + Packet compl = Packet completed normally.
* Miss compl + Re-inject compl = Packet completed normally.
^ permalink raw reply related [flat|nested] 3+ messages in thread* [PATCH 6.12.y] gve: fix incorrect buffer cleanup in gve_tx_clean_pending_packets for QPL
2026-03-09 10:21 FAILED: patch "[PATCH] gve: fix incorrect buffer cleanup in" failed to apply to 6.12-stable tree gregkh
@ 2026-03-09 12:47 ` Sasha Levin
2026-03-09 13:26 ` Ankit Garg
0 siblings, 1 reply; 3+ messages in thread
From: Sasha Levin @ 2026-03-09 12:47 UTC (permalink / raw)
To: stable
Cc: Ankit Garg, Jordan Rhee, Harshitha Ramamurthy, Joshua Washington,
Simon Horman, Jakub Kicinski, Sasha Levin
From: Ankit Garg <nktgrg@google.com>
[ Upstream commit fb868db5f4bccd7a78219313ab2917429f715cea ]
In DQ-QPL mode, gve_tx_clean_pending_packets() incorrectly uses the RDA
buffer cleanup path. It iterates num_bufs times and attempts to unmap
entries in the dma array.
This leads to two issues:
1. The dma array shares storage with tx_qpl_buf_ids (union).
Interpreting buffer IDs as DMA addresses results in attempting to
unmap incorrect memory locations.
2. num_bufs in QPL mode (counting 2K chunks) can significantly exceed
the size of the dma array, causing out-of-bounds access warnings
(trace below is how we noticed this issue).
UBSAN: array-index-out-of-bounds in
drivers/net/ethernet/drivers/net/ethernet/google/gve/gve_tx_dqo.c:178:5 index 18 is out of
range for type 'dma_addr_t[18]' (aka 'unsigned long long[18]')
Workqueue: gve gve_service_task [gve]
Call Trace:
<TASK>
dump_stack_lvl+0x33/0xa0
__ubsan_handle_out_of_bounds+0xdc/0x110
gve_tx_stop_ring_dqo+0x182/0x200 [gve]
gve_close+0x1be/0x450 [gve]
gve_reset+0x99/0x120 [gve]
gve_service_task+0x61/0x100 [gve]
process_scheduled_works+0x1e9/0x380
Fix this by properly checking for QPL mode and delegating to
gve_free_tx_qpl_bufs() to reclaim the buffers.
Cc: stable@vger.kernel.org
Fixes: a6fb8d5a8b69 ("gve: Tx path for DQO-QPL")
Signed-off-by: Ankit Garg <nktgrg@google.com>
Reviewed-by: Jordan Rhee <jordanrhee@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20260220215324.1631350-1-joshwash@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
[ netmem_dma_unmap_page_attrs() => dma_unmap_page() ]
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/net/ethernet/google/gve/gve_tx_dqo.c | 54 +++++++++-----------
1 file changed, 24 insertions(+), 30 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 26053cc85d1c5..62a6df009cda9 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -157,6 +157,24 @@ gve_free_pending_packet(struct gve_tx_ring *tx,
}
}
+static void gve_unmap_packet(struct device *dev,
+ struct gve_tx_pending_packet_dqo *pkt)
+{
+ int i;
+
+ if (!pkt->num_bufs)
+ return;
+
+ /* SKB linear portion is guaranteed to be mapped */
+ dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+ dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+ for (i = 1; i < pkt->num_bufs; i++) {
+ dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
+ }
+ pkt->num_bufs = 0;
+}
+
/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
*/
static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
@@ -166,21 +184,12 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
for (i = 0; i < tx->dqo.num_pending_packets; i++) {
struct gve_tx_pending_packet_dqo *cur_state =
&tx->dqo.pending_packets[i];
- int j;
-
- for (j = 0; j < cur_state->num_bufs; j++) {
- if (j == 0) {
- dma_unmap_single(tx->dev,
- dma_unmap_addr(cur_state, dma[j]),
- dma_unmap_len(cur_state, len[j]),
- DMA_TO_DEVICE);
- } else {
- dma_unmap_page(tx->dev,
- dma_unmap_addr(cur_state, dma[j]),
- dma_unmap_len(cur_state, len[j]),
- DMA_TO_DEVICE);
- }
- }
+
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, cur_state);
+ else
+ gve_unmap_packet(tx->dev, cur_state);
+
if (cur_state->skb) {
dev_consume_skb_any(cur_state->skb);
cur_state->skb = NULL;
@@ -1039,21 +1048,6 @@ static void remove_from_list(struct gve_tx_ring *tx,
}
}
-static void gve_unmap_packet(struct device *dev,
- struct gve_tx_pending_packet_dqo *pkt)
-{
- int i;
-
- /* SKB linear portion is guaranteed to be mapped */
- dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
- dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
- for (i = 1; i < pkt->num_bufs; i++) {
- dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
- }
- pkt->num_bufs = 0;
-}
-
/* Completion types and expected behavior:
* No Miss compl + Packet compl = Packet completed normally.
* Miss compl + Re-inject compl = Packet completed normally.
--
2.51.0
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [PATCH 6.12.y] gve: fix incorrect buffer cleanup in gve_tx_clean_pending_packets for QPL
2026-03-09 12:47 ` [PATCH 6.12.y] gve: fix incorrect buffer cleanup in gve_tx_clean_pending_packets for QPL Sasha Levin
@ 2026-03-09 13:26 ` Ankit Garg
0 siblings, 0 replies; 3+ messages in thread
From: Ankit Garg @ 2026-03-09 13:26 UTC (permalink / raw)
To: Sasha Levin
Cc: stable, Jordan Rhee, Harshitha Ramamurthy, Joshua Washington,
Simon Horman, Jakub Kicinski
On Mon, Mar 9, 2026 at 5:47 AM Sasha Levin <sashal@kernel.org> wrote:
>
> From: Ankit Garg <nktgrg@google.com>
>
> [ Upstream commit fb868db5f4bccd7a78219313ab2917429f715cea ]
>
> In DQ-QPL mode, gve_tx_clean_pending_packets() incorrectly uses the RDA
> buffer cleanup path. It iterates num_bufs times and attempts to unmap
> entries in the dma array.
>
> This leads to two issues:
> 1. The dma array shares storage with tx_qpl_buf_ids (union).
> Interpreting buffer IDs as DMA addresses results in attempting to
> unmap incorrect memory locations.
> 2. num_bufs in QPL mode (counting 2K chunks) can significantly exceed
> the size of the dma array, causing out-of-bounds access warnings
> (trace below is how we noticed this issue).
>
> UBSAN: array-index-out-of-bounds in
> drivers/net/ethernet/drivers/net/ethernet/google/gve/gve_tx_dqo.c:178:5 index 18 is out of
> range for type 'dma_addr_t[18]' (aka 'unsigned long long[18]')
> Workqueue: gve gve_service_task [gve]
> Call Trace:
> <TASK>
> dump_stack_lvl+0x33/0xa0
> __ubsan_handle_out_of_bounds+0xdc/0x110
> gve_tx_stop_ring_dqo+0x182/0x200 [gve]
> gve_close+0x1be/0x450 [gve]
> gve_reset+0x99/0x120 [gve]
> gve_service_task+0x61/0x100 [gve]
> process_scheduled_works+0x1e9/0x380
>
> Fix this by properly checking for QPL mode and delegating to
> gve_free_tx_qpl_bufs() to reclaim the buffers.
>
> Cc: stable@vger.kernel.org
> Fixes: a6fb8d5a8b69 ("gve: Tx path for DQO-QPL")
> Signed-off-by: Ankit Garg <nktgrg@google.com>
> Reviewed-by: Jordan Rhee <jordanrhee@google.com>
> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
> Signed-off-by: Joshua Washington <joshwash@google.com>
> Reviewed-by: Simon Horman <horms@kernel.org>
> Link: https://patch.msgid.link/20260220215324.1631350-1-joshwash@google.com
> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
> [ netmem_dma_unmap_page_attrs() => dma_unmap_page() ]
> Signed-off-by: Sasha Levin <sashal@kernel.org>
Reviewed-by: Ankit Garg <nktgrg@google.com>
Thank you very much!
> ---
> drivers/net/ethernet/google/gve/gve_tx_dqo.c | 54 +++++++++-----------
> 1 file changed, 24 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
> index 26053cc85d1c5..62a6df009cda9 100644
> --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
> +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
> @@ -157,6 +157,24 @@ gve_free_pending_packet(struct gve_tx_ring *tx,
> }
> }
>
> +static void gve_unmap_packet(struct device *dev,
> + struct gve_tx_pending_packet_dqo *pkt)
> +{
> + int i;
> +
> + if (!pkt->num_bufs)
> + return;
> +
> + /* SKB linear portion is guaranteed to be mapped */
> + dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
> + dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
> + for (i = 1; i < pkt->num_bufs; i++) {
> + dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
> + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
> + }
> + pkt->num_bufs = 0;
> +}
> +
> /* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
> */
> static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
> @@ -166,21 +184,12 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
> for (i = 0; i < tx->dqo.num_pending_packets; i++) {
> struct gve_tx_pending_packet_dqo *cur_state =
> &tx->dqo.pending_packets[i];
> - int j;
> -
> - for (j = 0; j < cur_state->num_bufs; j++) {
> - if (j == 0) {
> - dma_unmap_single(tx->dev,
> - dma_unmap_addr(cur_state, dma[j]),
> - dma_unmap_len(cur_state, len[j]),
> - DMA_TO_DEVICE);
> - } else {
> - dma_unmap_page(tx->dev,
> - dma_unmap_addr(cur_state, dma[j]),
> - dma_unmap_len(cur_state, len[j]),
> - DMA_TO_DEVICE);
> - }
> - }
> +
> + if (tx->dqo.qpl)
> + gve_free_tx_qpl_bufs(tx, cur_state);
> + else
> + gve_unmap_packet(tx->dev, cur_state);
> +
> if (cur_state->skb) {
> dev_consume_skb_any(cur_state->skb);
> cur_state->skb = NULL;
> @@ -1039,21 +1048,6 @@ static void remove_from_list(struct gve_tx_ring *tx,
> }
> }
>
> -static void gve_unmap_packet(struct device *dev,
> - struct gve_tx_pending_packet_dqo *pkt)
> -{
> - int i;
> -
> - /* SKB linear portion is guaranteed to be mapped */
> - dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
> - dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
> - for (i = 1; i < pkt->num_bufs; i++) {
> - dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
> - dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
> - }
> - pkt->num_bufs = 0;
> -}
> -
> /* Completion types and expected behavior:
> * No Miss compl + Packet compl = Packet completed normally.
> * Miss compl + Re-inject compl = Packet completed normally.
> --
> 2.51.0
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2026-03-09 13:26 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-09 10:21 FAILED: patch "[PATCH] gve: fix incorrect buffer cleanup in" failed to apply to 6.12-stable tree gregkh
2026-03-09 12:47 ` [PATCH 6.12.y] gve: fix incorrect buffer cleanup in gve_tx_clean_pending_packets for QPL Sasha Levin
2026-03-09 13:26 ` Ankit Garg
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox