bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields
@ 2025-07-21 12:57 Paul Chaignon
  2025-07-21 13:02 ` [PATCH bpf-next 2/2] selftests/bpf: Test invalid narrower ctx load Paul Chaignon
  2025-07-22  0:08 ` [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Eduard Zingerman
  0 siblings, 2 replies; 6+ messages in thread
From: Paul Chaignon @ 2025-07-21 12:57 UTC (permalink / raw)
  To: bpf; +Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko

The following BPF program, simplified from a syzkaller repro, causes a
kernel warning:

    r0 = *(u8 *)(r1 + 169);
    exit;

With pointer field sk being at offset 168 in __sk_buff. This access is
detected as a narrower read in bpf_skb_is_valid_access because it
doesn't match offsetof(struct __sk_buff, sk). It is therefore allowed
and later proceeds to bpf_convert_ctx_access. At that point,
target_size is null and the verifier errors with a kernel warning and:

    verifier bug: error during ctx access conversion(1)

This patch fixes that to return a proper "invalid bpf_context" error on
the load instruction.

The same issue affects the sk field in multiple context structure, as
well as data and data_end in bpf_sock_ops and optval and optval_end in
bpf_sockopt.

Note this syzkaller crash was reported in [1], which used to be about a
different bug, fixed in commit fce7bd8e385a ("bpf/verifier: Handle
BPF_LOAD_ACQ instructions in insn_def_regno()"). Because syzbot somehow
confused the two bugs, the new crash and repro didn't get reported to
the mailing list.

Link: https://syzkaller.appspot.com/bug?extid=0ef84a7bdf5301d4cbec [1]
Fixes: f96da09473b52 ("bpf: simplify narrower ctx access")
Fixes: 0df1a55afa832 ("bpf: Warn on internal verifier errors")
Reported-by: syzbot+0ef84a7bdf5301d4cbec@syzkaller.appspotmail.com
Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
---
 kernel/bpf/cgroup.c |  6 +++---
 net/core/filter.c   | 14 +++++++-------
 2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 72c8b50dca0a..3a4ad9f124e1 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -2577,17 +2577,17 @@ static bool cg_sockopt_is_valid_access(int off, int size,
 	}
 
 	switch (off) {
-	case offsetof(struct bpf_sockopt, sk):
+	case bpf_ctx_range_ptr(struct bpf_sockopt, sk):
 		if (size != sizeof(__u64))
 			return false;
 		info->reg_type = PTR_TO_SOCKET;
 		break;
-	case offsetof(struct bpf_sockopt, optval):
+	case bpf_ctx_range_ptr(struct bpf_sockopt, optval):
 		if (size != sizeof(__u64))
 			return false;
 		info->reg_type = PTR_TO_PACKET;
 		break;
-	case offsetof(struct bpf_sockopt, optval_end):
+	case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end):
 		if (size != sizeof(__u64))
 			return false;
 		info->reg_type = PTR_TO_PACKET_END;
diff --git a/net/core/filter.c b/net/core/filter.c
index 7a72f766aacf..458908c5f1f4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8690,7 +8690,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
 		if (size != sizeof(__u64))
 			return false;
 		break;
-	case offsetof(struct __sk_buff, sk):
+	case bpf_ctx_range_ptr(struct __sk_buff, sk):
 		if (type == BPF_WRITE || size != sizeof(__u64))
 			return false;
 		info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
@@ -9268,7 +9268,7 @@ static bool sock_addr_is_valid_access(int off, int size,
 				return false;
 		}
 		break;
-	case offsetof(struct bpf_sock_addr, sk):
+	case bpf_ctx_range_ptr(struct bpf_sock_addr, sk):
 		if (type != BPF_READ)
 			return false;
 		if (size != sizeof(__u64))
@@ -9318,17 +9318,17 @@ static bool sock_ops_is_valid_access(int off, int size,
 			if (size != sizeof(__u64))
 				return false;
 			break;
-		case offsetof(struct bpf_sock_ops, sk):
+		case bpf_ctx_range_ptr(struct bpf_sock_ops, sk):
 			if (size != sizeof(__u64))
 				return false;
 			info->reg_type = PTR_TO_SOCKET_OR_NULL;
 			break;
-		case offsetof(struct bpf_sock_ops, skb_data):
+		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data):
 			if (size != sizeof(__u64))
 				return false;
 			info->reg_type = PTR_TO_PACKET;
 			break;
-		case offsetof(struct bpf_sock_ops, skb_data_end):
+		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end):
 			if (size != sizeof(__u64))
 				return false;
 			info->reg_type = PTR_TO_PACKET_END;
@@ -9417,7 +9417,7 @@ static bool sk_msg_is_valid_access(int off, int size,
 		if (size != sizeof(__u64))
 			return false;
 		break;
-	case offsetof(struct sk_msg_md, sk):
+	case bpf_ctx_range_ptr(struct sk_msg_md, sk):
 		if (size != sizeof(__u64))
 			return false;
 		info->reg_type = PTR_TO_SOCKET;
@@ -11623,7 +11623,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
 		return false;
 
 	switch (off) {
-	case offsetof(struct bpf_sk_lookup, sk):
+	case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk):
 		info->reg_type = PTR_TO_SOCKET_OR_NULL;
 		return size == sizeof(__u64);
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH bpf-next 2/2] selftests/bpf: Test invalid narrower ctx load
  2025-07-21 12:57 [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Paul Chaignon
@ 2025-07-21 13:02 ` Paul Chaignon
  2025-07-22  0:11   ` Eduard Zingerman
  2025-07-22  0:08 ` [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Eduard Zingerman
  1 sibling, 1 reply; 6+ messages in thread
From: Paul Chaignon @ 2025-07-21 13:02 UTC (permalink / raw)
  To: bpf; +Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko

This patch adds two selftests to cover invalid narrower loads on the
context. These used to cause kernel warning before the previous patch.
To trigger the warning, the load had to be aligned, to read an affected
pointer field (ex., skb->sk), and not starting at the beginning of the
pointer field. The new selftests show two such loads of 1B and 4B sizes.

Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
---
 .../selftests/bpf/progs/verifier_ctx.c        | 27 +++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c
index a83809a1dbbf..229f26d1d413 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ctx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c
@@ -218,4 +218,31 @@ __naked void null_check_8_null_bind(void)
 	: __clobber_all);
 }
 
+SEC("tc")
+__description("invalid narrow skb->sk load")
+__failure __msg("invalid bpf_context access")
+__naked void invalid_narrow_skb_sk_load(void)
+{
+	asm volatile ("				\
+	r0 = *(u8 *)(r1 + %[__sk_buff_sk]);	\
+	exit;					\
+"	:
+	: __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk) + 1)
+	: __clobber_all);
+}
+
+SEC("sockops")
+__description("invalid narrow skops->sk_data load")
+__failure __msg("invalid bpf_context access")
+__naked void invalid_narrow_skops_sk_data_load(void)
+{
+	asm volatile ("				\
+	r1 = *(u32 *)(r1 + %[sk_data]);		\
+	r0 = 0;					\
+	exit;					\
+"	:
+	: __imm_const(sk_data, offsetof(struct bpf_sock_ops, skb_data) + 4)
+	: __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields
  2025-07-21 12:57 [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Paul Chaignon
  2025-07-21 13:02 ` [PATCH bpf-next 2/2] selftests/bpf: Test invalid narrower ctx load Paul Chaignon
@ 2025-07-22  0:08 ` Eduard Zingerman
  2025-07-22  5:30   ` John Fastabend
  2025-07-22 14:44   ` Paul Chaignon
  1 sibling, 2 replies; 6+ messages in thread
From: Eduard Zingerman @ 2025-07-22  0:08 UTC (permalink / raw)
  To: Paul Chaignon, bpf; +Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko

On Mon, 2025-07-21 at 14:57 +0200, Paul Chaignon wrote:

[...]

> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 72c8b50dca0a..3a4ad9f124e1 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -2577,17 +2577,17 @@ static bool cg_sockopt_is_valid_access(int off, int size,
>  	}
>  
>  	switch (off) {
> -	case offsetof(struct bpf_sockopt, sk):
> +	case bpf_ctx_range_ptr(struct bpf_sockopt, sk):
>  		if (size != sizeof(__u64))
>  			return false;
>  		info->reg_type = PTR_TO_SOCKET;
>  		break;
> -	case offsetof(struct bpf_sockopt, optval):
> +	case bpf_ctx_range_ptr(struct bpf_sockopt, optval):
>  		if (size != sizeof(__u64))
>  			return false;
>  		info->reg_type = PTR_TO_PACKET;
>  		break;
> -	case offsetof(struct bpf_sockopt, optval_end):
> +	case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end):
>  		if (size != sizeof(__u64))
>  			return false;
>  		info->reg_type = PTR_TO_PACKET_END;

Nit: I'd also convert `case offsetof(struct bpf_sockopt, retval):`
     just below.  Otherwise reader would spend some time figuring out
     why `retval` is special (it's not).

> diff --git a/net/core/filter.c b/net/core/filter.c
> index 7a72f766aacf..458908c5f1f4 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -8690,7 +8690,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
>  		if (size != sizeof(__u64))
>  			return false;
>  		break;
> -	case offsetof(struct __sk_buff, sk):
> +	case bpf_ctx_range_ptr(struct __sk_buff, sk):
>  		if (type == BPF_WRITE || size != sizeof(__u64))
>  			return false;
>  		info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
> @@ -9268,7 +9268,7 @@ static bool sock_addr_is_valid_access(int off, int size,
>  				return false;
>  		}
>  		break;
> -	case offsetof(struct bpf_sock_addr, sk):
> +	case bpf_ctx_range_ptr(struct bpf_sock_addr, sk):
>  		if (type != BPF_READ)
>  			return false;
>  		if (size != sizeof(__u64))
> @@ -9318,17 +9318,17 @@ static bool sock_ops_is_valid_access(int off, int size,
>  			if (size != sizeof(__u64))
>  				return false;
>  			break;
> -		case offsetof(struct bpf_sock_ops, sk):
> +		case bpf_ctx_range_ptr(struct bpf_sock_ops, sk):
>  			if (size != sizeof(__u64))
>  				return false;
>  			info->reg_type = PTR_TO_SOCKET_OR_NULL;
>  			break;
> -		case offsetof(struct bpf_sock_ops, skb_data):
> +		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data):
>  			if (size != sizeof(__u64))
>  				return false;
>  			info->reg_type = PTR_TO_PACKET;
>  			break;
> -		case offsetof(struct bpf_sock_ops, skb_data_end):
> +		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end):
>  			if (size != sizeof(__u64))
>  				return false;
>  			info->reg_type = PTR_TO_PACKET_END;

I think this function is buggy for `skb_hwtstamp` as well.
The skb_hwtstamp field is u64, side_default is sizeof(u32).
So access at `offsetof(struct bpf_sock_ops, skb_hwtstamp) + 4` would
be permitted by the default branch. But this range is not handled by
accompanying sock_ops_convert_ctx_access().


> @@ -9417,7 +9417,7 @@ static bool sk_msg_is_valid_access(int off, int size,
>  		if (size != sizeof(__u64))
>  			return false;
>  		break;
> -	case offsetof(struct sk_msg_md, sk):
> +	case bpf_ctx_range_ptr(struct sk_msg_md, sk):
>  		if (size != sizeof(__u64))
>  			return false;
>  		info->reg_type = PTR_TO_SOCKET;

I don't think this change is necessary, the default branch rejects
access at any not matched offset. Otherwise `data` and `data_end`
should be converted for uniformity.

> @@ -11623,7 +11623,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
>  		return false;
>  
>  	switch (off) {
> -	case offsetof(struct bpf_sk_lookup, sk):
> +	case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk):
>  		info->reg_type = PTR_TO_SOCKET_OR_NULL;
>  		return size == sizeof(__u64);
>  

Same here, the default branch would reject access at the wrong offset already.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 2/2] selftests/bpf: Test invalid narrower ctx load
  2025-07-21 13:02 ` [PATCH bpf-next 2/2] selftests/bpf: Test invalid narrower ctx load Paul Chaignon
@ 2025-07-22  0:11   ` Eduard Zingerman
  0 siblings, 0 replies; 6+ messages in thread
From: Eduard Zingerman @ 2025-07-22  0:11 UTC (permalink / raw)
  To: Paul Chaignon, bpf; +Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko

On Mon, 2025-07-21 at 15:02 +0200, Paul Chaignon wrote:
> This patch adds two selftests to cover invalid narrower loads on the
> context. These used to cause kernel warning before the previous patch.
> To trigger the warning, the load had to be aligned, to read an affected
> pointer field (ex., skb->sk), and not starting at the beginning of the
> pointer field. The new selftests show two such loads of 1B and 4B sizes.
> 
> Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
> ---

Since we hit a bug here, I think it would be nice to have a test case
for each buggy field if that could be done in a terse way.
Wrapping `invalid_narrow_skb_sk_load` here in a macro and stamping a
few instances seem to be sufficient.

[...]

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields
  2025-07-22  0:08 ` [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Eduard Zingerman
@ 2025-07-22  5:30   ` John Fastabend
  2025-07-22 14:44   ` Paul Chaignon
  1 sibling, 0 replies; 6+ messages in thread
From: John Fastabend @ 2025-07-22  5:30 UTC (permalink / raw)
  To: Eduard Zingerman
  Cc: Paul Chaignon, bpf, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko

On 2025-07-21 17:08:05, Eduard Zingerman wrote:
> On Mon, 2025-07-21 at 14:57 +0200, Paul Chaignon wrote:
> 
> [...]
> 
> > diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> > index 72c8b50dca0a..3a4ad9f124e1 100644
> > --- a/kernel/bpf/cgroup.c
> > +++ b/kernel/bpf/cgroup.c
> > @@ -2577,17 +2577,17 @@ static bool cg_sockopt_is_valid_access(int off, int size,
> >  	}
> >  
> >  	switch (off) {
> > -	case offsetof(struct bpf_sockopt, sk):
> > +	case bpf_ctx_range_ptr(struct bpf_sockopt, sk):
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		info->reg_type = PTR_TO_SOCKET;
> >  		break;
> > -	case offsetof(struct bpf_sockopt, optval):
> > +	case bpf_ctx_range_ptr(struct bpf_sockopt, optval):
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		info->reg_type = PTR_TO_PACKET;
> >  		break;
> > -	case offsetof(struct bpf_sockopt, optval_end):
> > +	case bpf_ctx_range_ptr(struct bpf_sockopt, optval_end):
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		info->reg_type = PTR_TO_PACKET_END;
> 
> Nit: I'd also convert `case offsetof(struct bpf_sockopt, retval):`
>      just below.  Otherwise reader would spend some time figuring out
>      why `retval` is special (it's not).
> 
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index 7a72f766aacf..458908c5f1f4 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -8690,7 +8690,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		break;
> > -	case offsetof(struct __sk_buff, sk):
> > +	case bpf_ctx_range_ptr(struct __sk_buff, sk):
> >  		if (type == BPF_WRITE || size != sizeof(__u64))
> >  			return false;
> >  		info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
> > @@ -9268,7 +9268,7 @@ static bool sock_addr_is_valid_access(int off, int size,
> >  				return false;
> >  		}
> >  		break;
> > -	case offsetof(struct bpf_sock_addr, sk):
> > +	case bpf_ctx_range_ptr(struct bpf_sock_addr, sk):
> >  		if (type != BPF_READ)
> >  			return false;
> >  		if (size != sizeof(__u64))
> > @@ -9318,17 +9318,17 @@ static bool sock_ops_is_valid_access(int off, int size,
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			break;
> > -		case offsetof(struct bpf_sock_ops, sk):
> > +		case bpf_ctx_range_ptr(struct bpf_sock_ops, sk):
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			info->reg_type = PTR_TO_SOCKET_OR_NULL;
> >  			break;
> > -		case offsetof(struct bpf_sock_ops, skb_data):
> > +		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data):
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			info->reg_type = PTR_TO_PACKET;
> >  			break;
> > -		case offsetof(struct bpf_sock_ops, skb_data_end):
> > +		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end):
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			info->reg_type = PTR_TO_PACKET_END;
> 
> I think this function is buggy for `skb_hwtstamp` as well.
> The skb_hwtstamp field is u64, side_default is sizeof(u32).
> So access at `offsetof(struct bpf_sock_ops, skb_hwtstamp) + 4` would
> be permitted by the default branch. But this range is not handled by
> accompanying sock_ops_convert_ctx_access().


+1 looks that way to me.

> 
> 
> > @@ -9417,7 +9417,7 @@ static bool sk_msg_is_valid_access(int off, int size,
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		break;
> > -	case offsetof(struct sk_msg_md, sk):
> > +	case bpf_ctx_range_ptr(struct sk_msg_md, sk):
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		info->reg_type = PTR_TO_SOCKET;
> 
> I don't think this change is necessary, the default branch rejects
> access at any not matched offset. Otherwise `data` and `data_end`
> should be converted for uniformity.

I sort of like it if all the ptr types are referenced with
bpf_ctx_range_ptr seems more consistent to me. So its it
is a bpf-next change I would do data and data_end as well.


> 
> > @@ -11623,7 +11623,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
> >  		return false;
> >  
> >  	switch (off) {
> > -	case offsetof(struct bpf_sk_lookup, sk):
> > +	case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk):
> >  		info->reg_type = PTR_TO_SOCKET_OR_NULL;
> >  		return size == sizeof(__u64);
> >  
> 
> Same here, the default branch would reject access at the wrong offset already.
> 

Other than above patch LGTM.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields
  2025-07-22  0:08 ` [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Eduard Zingerman
  2025-07-22  5:30   ` John Fastabend
@ 2025-07-22 14:44   ` Paul Chaignon
  1 sibling, 0 replies; 6+ messages in thread
From: Paul Chaignon @ 2025-07-22 14:44 UTC (permalink / raw)
  To: Eduard Zingerman
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	John Fastabend

On Mon, Jul 21, 2025 at 05:08:05PM -0700, Eduard Zingerman wrote:
> On Mon, 2025-07-21 at 14:57 +0200, Paul Chaignon wrote:

Thanks for the review!

[...]

> > @@ -9318,17 +9318,17 @@ static bool sock_ops_is_valid_access(int off, int size,
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			break;
> > -		case offsetof(struct bpf_sock_ops, sk):
> > +		case bpf_ctx_range_ptr(struct bpf_sock_ops, sk):
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			info->reg_type = PTR_TO_SOCKET_OR_NULL;
> >  			break;
> > -		case offsetof(struct bpf_sock_ops, skb_data):
> > +		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data):
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			info->reg_type = PTR_TO_PACKET;
> >  			break;
> > -		case offsetof(struct bpf_sock_ops, skb_data_end):
> > +		case bpf_ctx_range_ptr(struct bpf_sock_ops, skb_data_end):
> >  			if (size != sizeof(__u64))
> >  				return false;
> >  			info->reg_type = PTR_TO_PACKET_END;
> 
> I think this function is buggy for `skb_hwtstamp` as well.
> The skb_hwtstamp field is u64, side_default is sizeof(u32).
> So access at `offsetof(struct bpf_sock_ops, skb_hwtstamp) + 4` would
> be permitted by the default branch. But this range is not handled by
> accompanying sock_ops_convert_ctx_access().

Nice catch, thanks! It's fixed and tested in the v2.

> 
> 
> > @@ -9417,7 +9417,7 @@ static bool sk_msg_is_valid_access(int off, int size,
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		break;
> > -	case offsetof(struct sk_msg_md, sk):
> > +	case bpf_ctx_range_ptr(struct sk_msg_md, sk):
> >  		if (size != sizeof(__u64))
> >  			return false;
> >  		info->reg_type = PTR_TO_SOCKET;
> 
> I don't think this change is necessary, the default branch rejects
> access at any not matched offset. Otherwise `data` and `data_end`
> should be converted for uniformity.

Yes, this is not necessary; I got carried away. Per John's suggestion, I
still kept it in the v2 and converted data and data_end for consistency.
Hopefully, that'll be less confusing to future readers.

> 
> > @@ -11623,7 +11623,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
> >  		return false;
> >  
> >  	switch (off) {
> > -	case offsetof(struct bpf_sk_lookup, sk):
> > +	case bpf_ctx_range_ptr(struct bpf_sk_lookup, sk):
> >  		info->reg_type = PTR_TO_SOCKET_OR_NULL;
> >  		return size == sizeof(__u64);
> >  
> 
> Same here, the default branch would reject access at the wrong offset already.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-07-22 14:44 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-21 12:57 [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Paul Chaignon
2025-07-21 13:02 ` [PATCH bpf-next 2/2] selftests/bpf: Test invalid narrower ctx load Paul Chaignon
2025-07-22  0:11   ` Eduard Zingerman
2025-07-22  0:08 ` [PATCH bpf-next 1/2] bpf: Reject narrower access to pointer ctx fields Eduard Zingerman
2025-07-22  5:30   ` John Fastabend
2025-07-22 14:44   ` Paul Chaignon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).