bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Amery Hung <ameryhung@gmail.com>
To: bpf@vger.kernel.org
Cc: netdev@vger.kernel.org, alexei.starovoitov@gmail.com,
	andrii@kernel.org, daniel@iogearbox.net, kuba@kernel.org,
	martin.lau@kernel.org, mohsin.bashr@gmail.com, saeedm@nvidia.com,
	tariqt@nvidia.com, mbloch@nvidia.com,
	maciej.fijalkowski@intel.com, kernel-team@meta.com
Subject: [RFC bpf-next v1 1/7] net/mlx5e: Fix generating skb from nonlinear xdp_buff
Date: Mon, 25 Aug 2025 12:39:12 -0700	[thread overview]
Message-ID: <20250825193918.3445531-2-ameryhung@gmail.com> (raw)
In-Reply-To: <20250825193918.3445531-1-ameryhung@gmail.com>

xdp programs can change the layout of an xdp_buff through
bpf_xdp_adjust_tail(), bpf_xdp_adjust_head(). Therefore, the driver
cannot assume the size of the linear data area nor fragments. Fix the
bug in mlx5e driver by generating skb according to xdp_buff layout.

Currently, when handling multi-buf xdp, the mlx5e driver assumes the
layout of an xdp_buff to be unchanged. That is, the linear data area
continues to be empty and the fragments remains the same. This may
cause the driver to generate erroneous skb or triggering a kernel
warning. When an xdp program added linear data through
bpf_xdp_adjust_head() the linear data will be ignored as
mlx5e_build_linear_skb() builds an skb with empty linear data and then
pull data from fragments to fill the linear data area. When an xdp
program has shrunk the nonlinear data through bpf_xdp_adjust_tail(),
the delta passed to __pskb_pull_tail() may exceed the actual nonlinear
data size and trigger the BUG_ON in it.

To fix the issue, first build the skb with linear data area matching
the xdp_buff. Then, call __pskb_pull_tail() to fill the linear data for
up to MLX5E_RX_MAX_HEAD bytes. In addition, recalculate nr_frags and
truesize after xdp program runs.

Fixes: f52ac7028bec ("net/mlx5e: RX, Add XDP multi-buffer support in Striding RQ")
Signed-off-by: Amery Hung <ameryhung@gmail.com>
---
 .../net/ethernet/mellanox/mlx5/core/en_rx.c   | 59 ++++++++++++++-----
 1 file changed, 43 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b8c609d91d11..c5173f1ccb4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1725,16 +1725,17 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
 			     struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
 {
 	struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+	struct mlx5e_wqe_frag_info *pwi, *head_wi = wi;
 	struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
-	struct mlx5e_wqe_frag_info *head_wi = wi;
 	u16 rx_headroom = rq->buff.headroom;
 	struct mlx5e_frag_page *frag_page;
 	struct skb_shared_info *sinfo;
-	u32 frag_consumed_bytes;
+	u32 frag_consumed_bytes, i;
 	struct bpf_prog *prog;
 	struct sk_buff *skb;
 	dma_addr_t addr;
 	u32 truesize;
+	u8 nr_frags;
 	void *va;
 
 	frag_page = wi->frag_page;
@@ -1775,14 +1776,26 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
 	prog = rcu_dereference(rq->xdp_prog);
 	if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-			struct mlx5e_wqe_frag_info *pwi;
+			pwi = head_wi;
+			while (pwi->frag_page->netmem != sinfo->frags[0].netmem && pwi < wi)
+				pwi++;
 
-			for (pwi = head_wi; pwi < wi; pwi++)
+			for (i = 0; i < sinfo->nr_frags; i++, pwi++)
 				pwi->frag_page->frags++;
 		}
 		return NULL; /* page/packet was consumed by XDP */
 	}
 
+	nr_frags = sinfo->nr_frags;
+	pwi = head_wi + 1;
+
+	if (prog) {
+		truesize = sinfo->nr_frags * frag_info->frag_stride;
+
+		while (pwi->frag_page->netmem != sinfo->frags[0].netmem && pwi < wi)
+			pwi++;
+	}
+
 	skb = mlx5e_build_linear_skb(
 		rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
 		mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
@@ -1796,12 +1809,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
 
 	if (xdp_buff_has_frags(&mxbuf->xdp)) {
 		/* sinfo->nr_frags is reset by build_skb, calculate again. */
-		xdp_update_skb_shared_info(skb, wi - head_wi - 1,
+		xdp_update_skb_shared_info(skb, nr_frags,
 					   sinfo->xdp_frags_size, truesize,
 					   xdp_buff_is_frag_pfmemalloc(
 						&mxbuf->xdp));
 
-		for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
+		for (i = 0; i < nr_frags; i++, pwi++)
 			pwi->frag_page->frags++;
 	}
 
@@ -2073,12 +2086,18 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
 	}
 
 	if (prog) {
+		u8 nr_frags;
+		u32 len, i;
+
 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
-				struct mlx5e_frag_page *pfp;
+				struct mlx5e_frag_page *pagep = head_page;
+
+				while (pagep->netmem != sinfo->frags[0].netmem && pagep < frag_page)
+					pagep++;
 
-				for (pfp = head_page; pfp < frag_page; pfp++)
-					pfp->frags++;
+				for (i = 0; i < sinfo->nr_frags; i++)
+					pagep->frags++;
 
 				wi->linear_page.frags++;
 			}
@@ -2087,9 +2106,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
 			return NULL; /* page/packet was consumed by XDP */
 		}
 
+		len = mxbuf->xdp.data_end - mxbuf->xdp.data;
+		nr_frags = sinfo->nr_frags;
+
 		skb = mlx5e_build_linear_skb(
 			rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
-			mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+			mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
 			mxbuf->xdp.data - mxbuf->xdp.data_meta);
 		if (unlikely(!skb)) {
 			mlx5e_page_release_fragmented(rq->page_pool,
@@ -2102,20 +2124,25 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
 		mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
 
 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
-			struct mlx5e_frag_page *pagep;
+			struct mlx5e_frag_page *pagep = head_page;
+
+			truesize = nr_frags * PAGE_SIZE;
 
 			/* sinfo->nr_frags is reset by build_skb, calculate again. */
-			xdp_update_skb_shared_info(skb, frag_page - head_page,
+			xdp_update_skb_shared_info(skb, nr_frags,
 						   sinfo->xdp_frags_size, truesize,
 						   xdp_buff_is_frag_pfmemalloc(
 							&mxbuf->xdp));
 
-			pagep = head_page;
-			do
+			while (pagep->netmem != sinfo->frags[0].netmem && pagep < frag_page)
+				pagep++;
+
+			for (i = 0; i < nr_frags; i++, pagep++)
 				pagep->frags++;
-			while (++pagep < frag_page);
+
+			headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len, sinfo->xdp_frags_size);
+			__pskb_pull_tail(skb, headlen);
 		}
-		__pskb_pull_tail(skb, headlen);
 	} else {
 		dma_addr_t addr;
 
-- 
2.47.3


  reply	other threads:[~2025-08-25 19:39 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-25 19:39 [RFC bpf-next v1 0/7] Add kfunc bpf_xdp_pull_data Amery Hung
2025-08-25 19:39 ` Amery Hung [this message]
2025-08-27 13:45   ` [RFC bpf-next v1 1/7] net/mlx5e: Fix generating skb from nonlinear xdp_buff Dragos Tatulea
2025-08-28  3:44     ` Amery Hung
2025-08-28 16:23       ` Dragos Tatulea
2025-08-28 13:41   ` Nimrod Oren
2025-08-25 19:39 ` [RFC bpf-next v1 2/7] bpf: Allow bpf_xdp_shrink_data to shrink a frag from head and tail Amery Hung
2025-08-28 13:43   ` Nimrod Oren
2025-08-25 19:39 ` [RFC bpf-next v1 3/7] bpf: Support pulling non-linear xdp data Amery Hung
2025-08-25 21:29   ` Stanislav Fomichev
2025-08-25 22:23     ` Amery Hung
2025-08-25 22:29       ` Jakub Kicinski
2025-08-25 22:36         ` Amery Hung
2025-08-25 22:46       ` Stanislav Fomichev
2025-08-25 22:58         ` Jakub Kicinski
2025-08-26  0:12           ` Stanislav Fomichev
2025-08-26  0:30             ` Jakub Kicinski
2025-08-25 22:39   ` Jakub Kicinski
2025-08-26  5:12     ` Amery Hung
2025-08-26 13:20       ` Jakub Kicinski
2025-08-26 13:44         ` Amery Hung
2025-08-25 19:39 ` [RFC bpf-next v1 4/7] bpf: Clear packet pointers after changing packet data in kfuncs Amery Hung
2025-08-25 19:39 ` [RFC bpf-next v1 5/7] bpf: Support specifying linear xdp packet data size in test_run Amery Hung
2025-08-25 19:39 ` [RFC bpf-next v1 6/7] selftests/bpf: Test bpf_xdp_pull_data Amery Hung
2025-08-25 19:39 ` [RFC bpf-next v1 7/7] selftests: drv-net: Pull data before parsing headers Amery Hung
2025-08-25 22:41 ` [RFC bpf-next v1 0/7] Add kfunc bpf_xdp_pull_data Jakub Kicinski
2025-08-26 19:38   ` Gal Pressman
2025-08-28 13:39 ` Nimrod Oren
2025-08-29  7:26   ` Amery Hung
2025-08-30  0:09     ` Jakub Kicinski
2025-08-29 18:21   ` Martin KaFai Lau

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250825193918.3445531-2-ameryhung@gmail.com \
    --to=ameryhung@gmail.com \
    --cc=alexei.starovoitov@gmail.com \
    --cc=andrii@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kernel-team@meta.com \
    --cc=kuba@kernel.org \
    --cc=maciej.fijalkowski@intel.com \
    --cc=martin.lau@kernel.org \
    --cc=mbloch@nvidia.com \
    --cc=mohsin.bashr@gmail.com \
    --cc=netdev@vger.kernel.org \
    --cc=saeedm@nvidia.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).