public inbox for stable@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 2/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock
       [not found] <20260417023119.3830723-1-werner@verivus.com>
@ 2026-04-17  2:31 ` Werner Kasselman
  2026-04-20 20:43   ` Martin KaFai Lau
       [not found] ` <20260420221621.1441707-1-werner@verivus.com>
       [not found] ` <20260420230030.2802408-1-werner@verivus.com>
  2 siblings, 1 reply; 5+ messages in thread
From: Werner Kasselman @ 2026-04-17  2:31 UTC (permalink / raw)
  To: bpf@vger.kernel.org, netdev@vger.kernel.org
  Cc: andrii@kernel.org, ast@kernel.org, brakmo@fb.com,
	daniel@iogearbox.net, davem@davemloft.net, eddyz87@gmail.com,
	edumazet@google.com, haoluo@google.com, horms@kernel.org,
	john.fastabend@gmail.com, jolsa@kernel.org, kpsingh@kernel.org,
	kuba@kernel.org, linux-kernel@vger.kernel.org,
	linux-kselftest@vger.kernel.org, martin.lau@linux.dev,
	pabeni@redhat.com, sdf@fomichev.me, shuah@kernel.org,
	song@kernel.org, yonghong.song@linux.dev, jiayuan.chen@linux.dev,
	Werner Kasselman, stable@vger.kernel.org

sock_ops_convert_ctx_access() reads rtt_min without the
is_locked_tcp_sock guard used for every other tcp_sock field. On
request_sock-backed sock_ops callbacks, sk points at a
tcp_request_sock and the converted load reads past the end of the
allocation.

Reuse SOCK_OPS_LOAD_TCP_SOCK_FIELD() for the rtt_min access and compute
the offset with offsetof(struct minmax_sample, v). This leaves the byte
addressed unchanged from the old sizeof_field(struct minmax_sample, t)
expression, while making rtt_min consistent with every other tcp_sock
field.

This also picks up the same dst_reg == src_reg handling used by the
other guarded field loads. Extend the sock_ops_get_sk selftest with an
rtt_min subtest that checks request_sock-backed !fullsock callbacks read
zero instead of leaking request_sock-adjacent memory.

Found via AST-based call-graph analysis using sqry.

Fixes: 44f0e43037d3 ("bpf: Add support for reading sk_state and more")
Cc: stable@vger.kernel.org
Signed-off-by: Werner Kasselman <werner@verivus.com>
---
 net/core/filter.c                             | 12 +++----
 .../bpf/prog_tests/sock_ops_get_sk.c          |  9 ++++++
 .../selftests/bpf/progs/sock_ops_get_sk.c     | 31 +++++++++++++++++++
 3 files changed, 45 insertions(+), 7 deletions(-)

diff --git a/net/core/filter.c b/net/core/filter.c
index e8ad062f63bc..9c43193a5c39 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -10827,14 +10827,12 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 			     sizeof(struct minmax));
 		BUILD_BUG_ON(sizeof(struct minmax) <
 			     sizeof(struct minmax_sample));
+		BUILD_BUG_ON(offsetof(struct tcp_sock, rtt_min) +
+			     offsetof(struct minmax_sample, v) > S16_MAX);
 
-		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
-						struct bpf_sock_ops_kern, sk),
-				      si->dst_reg, si->src_reg,
-				      offsetof(struct bpf_sock_ops_kern, sk));
-		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
-				      offsetof(struct tcp_sock, rtt_min) +
-				      sizeof_field(struct minmax_sample, t));
+		off = offsetof(struct tcp_sock, rtt_min) +
+		      offsetof(struct minmax_sample, v);
+		SOCK_OPS_LOAD_TCP_SOCK_FIELD(BPF_W, off);
 		break;
 
 	case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c b/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
index 343d92c4df30..1aea4c97d5d3 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
@@ -70,6 +70,15 @@ void test_ns_sock_ops_get_sk(void)
 		ASSERT_EQ(skel->bss->diff_reg_bug_detected, 0, "diff_reg_bug_not_detected");
 	}
 
+	/* Test sock_ops rtt_min access in !fullsock callbacks */
+	if (test__start_subtest("get_rtt_min")) {
+		run_sock_ops_test(cgroup_fd,
+				  bpf_program__fd(skel->progs.sock_ops_get_rtt_min));
+		ASSERT_EQ(skel->bss->rtt_min_null_seen, 1, "rtt_min_null_seen");
+		ASSERT_EQ(skel->bss->rtt_min_bug_detected, 0,
+			  "rtt_min_bug_not_detected");
+	}
+
 	sock_ops_get_sk__destroy(skel);
 close_cgroup:
 	close(cgroup_fd);
diff --git a/tools/testing/selftests/bpf/progs/sock_ops_get_sk.c b/tools/testing/selftests/bpf/progs/sock_ops_get_sk.c
index 3a0689f8ce7c..dee07da8901e 100644
--- a/tools/testing/selftests/bpf/progs/sock_ops_get_sk.c
+++ b/tools/testing/selftests/bpf/progs/sock_ops_get_sk.c
@@ -114,4 +114,35 @@ __naked void sock_ops_get_sk_diff_reg(void)
 		: __clobber_all);
 }
 
+/* sock_ops rtt_min access: different-register, is_locked_tcp_sock == 0 path (TCP_NEW_SYN_RECV). */
+int rtt_min_bug_detected;
+int rtt_min_null_seen;
+
+SEC("sockops")
+__naked void sock_ops_get_rtt_min(void)
+{
+	asm volatile (
+		"r7 = *(u32 *)(r1 + %[is_fullsock_off]);"
+		"r2 = *(u32 *)(r1 + %[rtt_min_off]);"
+		"if r7 != 0 goto 2f;"
+		"if r2 == 0 goto 1f;"
+		"r1 = %[rtt_min_bug_detected] ll;"
+		"r3 = 1;"
+		"*(u32 *)(r1 + 0) = r3;"
+		"goto 2f;"
+	"1:"
+		"r1 = %[rtt_min_null_seen] ll;"
+		"r3 = 1;"
+		"*(u32 *)(r1 + 0) = r3;"
+	"2:"
+		"r0 = 1;"
+		"exit;"
+		:
+		: __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)),
+		  __imm_const(rtt_min_off, offsetof(struct bpf_sock_ops, rtt_min)),
+		  __imm_addr(rtt_min_bug_detected),
+		  __imm_addr(rtt_min_null_seen)
+		: __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock
  2026-04-17  2:31 ` [PATCH 2/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock Werner Kasselman
@ 2026-04-20 20:43   ` Martin KaFai Lau
  0 siblings, 0 replies; 5+ messages in thread
From: Martin KaFai Lau @ 2026-04-20 20:43 UTC (permalink / raw)
  To: Werner Kasselman
  Cc: bpf@vger.kernel.org, netdev@vger.kernel.org, andrii@kernel.org,
	ast@kernel.org, brakmo@fb.com, daniel@iogearbox.net,
	davem@davemloft.net, eddyz87@gmail.com, edumazet@google.com,
	haoluo@google.com, horms@kernel.org, john.fastabend@gmail.com,
	jolsa@kernel.org, kpsingh@kernel.org, kuba@kernel.org,
	linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org,
	pabeni@redhat.com, sdf@fomichev.me, shuah@kernel.org,
	song@kernel.org, yonghong.song@linux.dev, jiayuan.chen@linux.dev,
	stable@vger.kernel.org

On Fri, Apr 17, 2026 at 02:31:26AM +0000, Werner Kasselman wrote:
> diff --git a/net/core/filter.c b/net/core/filter.c
> index e8ad062f63bc..9c43193a5c39 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -10827,14 +10827,12 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
>  			     sizeof(struct minmax));
>  		BUILD_BUG_ON(sizeof(struct minmax) <
>  			     sizeof(struct minmax_sample));
> +		BUILD_BUG_ON(offsetof(struct tcp_sock, rtt_min) +
> +			     offsetof(struct minmax_sample, v) > S16_MAX);

This doesn't look like a test that is added by human.
Will sizeof(tcp_sock) ever reach S16_MAX? It is unnecessarily defensive and
inconsistent with other tcp_sock field loads.

> diff --git a/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c b/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
> index 343d92c4df30..1aea4c97d5d3 100644
> --- a/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
> +++ b/tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c

Separate the test in its own patch.

Also tag and add revision to subject, "[PATCH v3 bpf...]".
Take a look at how other patches are posted in the bpf mailing list.

pw-bot: cr

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH bpf v4 1/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock
       [not found] ` <20260420221621.1441707-1-werner@verivus.com>
@ 2026-04-20 22:16   ` Werner Kasselman
  0 siblings, 0 replies; 5+ messages in thread
From: Werner Kasselman @ 2026-04-20 22:16 UTC (permalink / raw)
  To: bpf@vger.kernel.org, netdev@vger.kernel.org
  Cc: Werner Kasselman, stable@vger.kernel.org, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau,
	Eduard Zingerman, Song Liu, Yonghong Song, John Fastabend,
	KP Singh, Stanislav Fomichev, Hao Luo, Jiri Olsa, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, Simon Horman,
	Lawrence Brakmo, open list

sock_ops_convert_ctx_access() reads rtt_min without the
is_locked_tcp_sock guard used for every other tcp_sock field. On
request_sock-backed sock_ops callbacks, sk points at a
tcp_request_sock and the converted load reads past the end of the
allocation.

Extract the guarded tcp_sock field load sequence into
SOCK_OPS_LOAD_TCP_SOCK_FIELD() and use it for the rtt_min access after
computing the sub-field offset with offsetof(struct minmax_sample, v).
Reusing the shared helper keeps rtt_min aligned with the other guarded
tcp_sock field loads and preserves the dst_reg == src_reg failure path
that zeros the destination register when the guard fails.

Found via AST-based call-graph analysis using sqry.

Fixes: 44f0e43037d3 ("bpf: Add support for reading sk_state and more")
Cc: stable@vger.kernel.org
Signed-off-by: Werner Kasselman <werner@verivus.com>
---
net/core/filter.c | 39 ++++++++++++++++++++-------------------
 1 file changed, 20 insertions(+), 19 deletions(-)

diff --git a/net/core/filter.c b/net/core/filter.c
index 78b548158fb0..b60f279c004a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -10544,12 +10544,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 	struct bpf_insn *insn = insn_buf;
 	int off;
 
-/* Helper macro for adding read access to tcp_sock or sock fields. */
-#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)			      \
+/* Helper macro for adding guarded read access to tcp_sock fields. */
+#define SOCK_OPS_LOAD_TCP_SOCK_FIELD(FIELD_SIZE, FIELD_OFFSET)		      \
 	do {								      \
 		int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2;     \
-		BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >		      \
-			     sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
 		if (si->dst_reg == reg || si->src_reg == reg)		      \
 			reg--;						      \
 		if (si->dst_reg == reg || si->src_reg == reg)		      \
@@ -10557,7 +10555,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 		if (si->dst_reg == si->src_reg) {			      \
 			*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,	      \
 					  offsetof(struct bpf_sock_ops_kern,  \
-					  temp));			      \
+						   temp));		      \
 			fullsock_reg = reg;				      \
 			jmp += 2;					      \
 		}							      \
@@ -10571,23 +10569,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 		if (si->dst_reg == si->src_reg)				      \
 			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \
 				      offsetof(struct bpf_sock_ops_kern,      \
-				      temp));				      \
+					       temp));			      \
 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
 						struct bpf_sock_ops_kern, sk),\
 				      si->dst_reg, si->src_reg,		      \
 				      offsetof(struct bpf_sock_ops_kern, sk));\
-		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ,		      \
-						       OBJ_FIELD),	      \
+		*insn++ = BPF_LDX_MEM(FIELD_SIZE,			      \
 				      si->dst_reg, si->dst_reg,		      \
-				      offsetof(OBJ, OBJ_FIELD));	      \
+				      FIELD_OFFSET);			      \
 		if (si->dst_reg == si->src_reg)	{			      \
-			*insn++ = BPF_JMP_A(1);				      \
+			*insn++ = BPF_JMP_A(2);				      \
 			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \
 				      offsetof(struct bpf_sock_ops_kern,      \
-				      temp));				      \
+					       temp));			      \
+			*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);	      \
 		}							      \
 	} while (0)
 
+#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)			      \
+	do {								      \
+		BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >		      \
+			     sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
+		SOCK_OPS_LOAD_TCP_SOCK_FIELD(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),\
+					     offsetof(OBJ, OBJ_FIELD));       \
+	} while (0)
+
 #define SOCK_OPS_GET_SK()							      \
 	do {								      \
 		int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1;     \
@@ -10829,14 +10835,9 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 			     sizeof(struct minmax));
 		BUILD_BUG_ON(sizeof(struct minmax) <
 			     sizeof(struct minmax_sample));
-
-		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
-						struct bpf_sock_ops_kern, sk),
-				      si->dst_reg, si->src_reg,
-				      offsetof(struct bpf_sock_ops_kern, sk));
-		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
-				      offsetof(struct tcp_sock, rtt_min) +
-				      sizeof_field(struct minmax_sample, t));
+		off = offsetof(struct tcp_sock, rtt_min) +
+		      offsetof(struct minmax_sample, v);
+		SOCK_OPS_LOAD_TCP_SOCK_FIELD(BPF_W, off);
 		break;
 
 	case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
-- 
2.43.0

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH bpf v5 1/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock
       [not found] ` <20260420230030.2802408-1-werner@verivus.com>
@ 2026-04-20 23:00   ` Werner Kasselman
  2026-04-22 21:03     ` Martin KaFai Lau
  0 siblings, 1 reply; 5+ messages in thread
From: Werner Kasselman @ 2026-04-20 23:00 UTC (permalink / raw)
  To: bpf@vger.kernel.org, netdev@vger.kernel.org
  Cc: stable@vger.kernel.org, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Eduard Zingerman,
	Kumar Kartikeya Dwivedi, Song Liu, Yonghong Song, Jiri Olsa,
	John Fastabend, Stanislav Fomichev, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Paolo Abeni, Simon Horman, Lawrence Brakmo,
	open list

sock_ops_convert_ctx_access() reads rtt_min without the is_locked_tcp_sock guard used for every other tcp_sock field. On request_sock-backed sock_ops callbacks, sk points at a tcp_request_sock and the converted load reads past the end of the allocation.

Extract the guarded tcp_sock field load sequence into SOCK_OPS_LOAD_TCP_SOCK_FIELD() and use it for the rtt_min access after computing the sub-field offset with offsetof(struct minmax_sample, v). Reusing the shared helper keeps rtt_min aligned with the other guarded tcp_sock field loads and preserves the dst_reg == src_reg failure path that zeros the destination register when the guard fails.

Found via AST-based call-graph analysis using sqry.

Fixes: 44f0e43037d3 ("bpf: Add support for reading sk_state and more")
Cc: stable@vger.kernel.org
Signed-off-by: Werner Kasselman <werner@verivus.com>
---
 net/core/filter.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/net/core/filter.c b/net/core/filter.c
index fcfcb72663ca..2e7c33d00749 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -10535,12 +10535,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 	struct bpf_insn *insn = insn_buf;
 	int off;
 
-/* Helper macro for adding read access to tcp_sock or sock fields. */
-#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)			      \
+/* Helper macro for adding guarded read access to tcp_sock fields. */
+#define SOCK_OPS_LOAD_TCP_SOCK_FIELD(FIELD_SIZE, FIELD_OFFSET)		      \
 	do {								      \
 		int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2;     \
-		BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >		      \
-			     sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
 		if (si->dst_reg == reg || si->src_reg == reg)		      \
 			reg--;						      \
 		if (si->dst_reg == reg || si->src_reg == reg)		      \
@@ -10548,7 +10546,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 		if (si->dst_reg == si->src_reg) {			      \
 			*insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,	      \
 					  offsetof(struct bpf_sock_ops_kern,  \
-					  temp));			      \
+						   temp));		      \
 			fullsock_reg = reg;				      \
 			jmp += 2;					      \
 		}							      \
@@ -10562,24 +10560,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 		if (si->dst_reg == si->src_reg)				      \
 			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \
 				      offsetof(struct bpf_sock_ops_kern,      \
-				      temp));				      \
+					       temp));			      \
 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
 						struct bpf_sock_ops_kern, sk),\
 				      si->dst_reg, si->src_reg,		      \
 				      offsetof(struct bpf_sock_ops_kern, sk));\
-		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ,		      \
-						       OBJ_FIELD),	      \
+		*insn++ = BPF_LDX_MEM(FIELD_SIZE,			      \
 				      si->dst_reg, si->dst_reg,		      \
-				      offsetof(OBJ, OBJ_FIELD));	      \
+				      FIELD_OFFSET);			      \
 		if (si->dst_reg == si->src_reg)	{			      \
 			*insn++ = BPF_JMP_A(2);				      \
 			*insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,	      \
 				      offsetof(struct bpf_sock_ops_kern,      \
-				      temp));				      \
+					       temp));			      \
 			*insn++ = BPF_MOV64_IMM(si->dst_reg, 0);	      \
 		}							      \
 	} while (0)
 
+#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)			      \
+	do {								      \
+		BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >		      \
+			     sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
+		SOCK_OPS_LOAD_TCP_SOCK_FIELD(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),\
+					     offsetof(OBJ, OBJ_FIELD));       \
+	} while (0)
+
 #define SOCK_OPS_GET_SK()							      \
 	do {								      \
 		int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1;     \
@@ -10822,14 +10827,9 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 			     sizeof(struct minmax));
 		BUILD_BUG_ON(sizeof(struct minmax) <
 			     sizeof(struct minmax_sample));
-
-		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
-						struct bpf_sock_ops_kern, sk),
-				      si->dst_reg, si->src_reg,
-				      offsetof(struct bpf_sock_ops_kern, sk));
-		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
-				      offsetof(struct tcp_sock, rtt_min) +
-				      sizeof_field(struct minmax_sample, t));
+		off = offsetof(struct tcp_sock, rtt_min) +
+		      offsetof(struct minmax_sample, v);
+		SOCK_OPS_LOAD_TCP_SOCK_FIELD(BPF_W, off);
 		break;
 
 	case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
-- 
2.43.0

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH bpf v5 1/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock
  2026-04-20 23:00   ` [PATCH bpf v5 " Werner Kasselman
@ 2026-04-22 21:03     ` Martin KaFai Lau
  0 siblings, 0 replies; 5+ messages in thread
From: Martin KaFai Lau @ 2026-04-22 21:03 UTC (permalink / raw)
  To: Werner Kasselman
  Cc: bpf@vger.kernel.org, netdev@vger.kernel.org,
	stable@vger.kernel.org, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Eduard Zingerman, Kumar Kartikeya Dwivedi,
	Song Liu, Yonghong Song, Jiri Olsa, John Fastabend,
	Stanislav Fomichev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Simon Horman, Lawrence Brakmo, open list

On Mon, Apr 20, 2026 at 11:00:35PM +0000, Werner Kasselman wrote:
> sock_ops_convert_ctx_access() reads rtt_min without the is_locked_tcp_sock guard used for every other tcp_sock field. On request_sock-backed sock_ops callbacks, sk points at a tcp_request_sock and the converted load reads past the end of the allocation.
> 
> Extract the guarded tcp_sock field load sequence into SOCK_OPS_LOAD_TCP_SOCK_FIELD() and use it for the rtt_min access after computing the sub-field offset with offsetof(struct minmax_sample, v). Reusing the shared helper keeps rtt_min aligned with the other guarded tcp_sock field loads and preserves the dst_reg == src_reg failure path that zeros the destination register when the guard fails.

I think some formatting instruction was not given to the AI this time and
no human bothered to look at the formatting of the commit message
before posting?

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2026-04-22 21:03 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20260417023119.3830723-1-werner@verivus.com>
2026-04-17  2:31 ` [PATCH 2/2] bpf: guard sock_ops rtt_min against non-locked tcp_sock Werner Kasselman
2026-04-20 20:43   ` Martin KaFai Lau
     [not found] ` <20260420221621.1441707-1-werner@verivus.com>
2026-04-20 22:16   ` [PATCH bpf v4 1/2] " Werner Kasselman
     [not found] ` <20260420230030.2802408-1-werner@verivus.com>
2026-04-20 23:00   ` [PATCH bpf v5 " Werner Kasselman
2026-04-22 21:03     ` Martin KaFai Lau

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox