BPF List
 help / color / mirror / Atom feed
* [PATCH bpf-next v1 1/2] bpf: refine u32/s32 bounds when ranges cross min/max boundary
@ 2026-03-05 11:09 Eduard Zingerman
  2026-03-05 11:09 ` [PATCH bpf-next v1 2/2] selftests/bpf: test refining " Eduard Zingerman
  2026-03-05 16:49 ` [PATCH bpf-next v1 1/2] bpf: refine " Emil Tsalapatis
  0 siblings, 2 replies; 5+ messages in thread
From: Eduard Zingerman @ 2026-03-05 11:09 UTC (permalink / raw)
  To: bpf, ast
  Cc: andrii, daniel, martin.lau, kernel-team, yonghong.song, emil,
	arighi, shung-hsi.yu, Eduard Zingerman

Same as in __reg64_deduce_bounds(), refine s32/u32 ranges
in __reg32_deduce_bounds() in the following situations:

0                                                   U32_MAX
|  [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]              |
|----------------------------|----------------------------|
|xxxxx s32 range xxxxxxxxx]                       [xxxxxxx|
0                     S32_MAX S32_MIN                    -1

0                                                   U32_MAX
|              [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]  |
|----------------------------|----------------------------|
|xxxxxxxxx]                       [xxxxxxxxxxxx s32 range |
0                     S32_MAX S32_MIN                    -1

This helps for e.g. consider the following program:

   call %[bpf_get_prandom_u32];
   w0 &= 0xffffffff;
   if w0 < 0x3 goto 1f;    // on fall-through u32 range [3..U32_MAX]
   if w0 s> 0x1 goto 1f;   // on fall-through s32 range [S32_MIN..1]
   if w0 s< 0x0 goto 1f;   // range can be narrowed to  [S32_MIN..-1]
   r10 = 0;
1: ...;

The reg_bounds.c selftest is updated to incorporate identical logic,
refinement based on non-overflowing range halves:

  ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
  ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))

Reported-by: Andrea Righi <arighi@nvidia.com>
Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
Closes: https://lore.kernel.org/bpf/aakqucg4vcujVwif@gpd4/T/
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 kernel/bpf/verifier.c                         | 24 ++++++++
 .../selftests/bpf/prog_tests/reg_bounds.c     | 58 +++++++++++++++++--
 2 files changed, 78 insertions(+), 4 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d92cf2821657..2b79a2a4d1b5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2532,6 +2532,30 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
 	if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
 		reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
 		reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
+	} else {
+		if (reg->u32_max_value < (u32)reg->s32_min_value) {
+			/* See __reg64_deduce_bounds() for detailed explanation.
+			 * Refine ranges in the following situation:
+			 *
+			 * 0                                                   U32_MAX
+			 * |  [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]              |
+			 * |----------------------------|----------------------------|
+			 * |xxxxx s32 range xxxxxxxxx]                       [xxxxxxx|
+			 * 0                     S32_MAX S32_MIN                    -1
+			 */
+			reg->s32_min_value = (s32)reg->u32_min_value;
+			reg->u32_max_value = min_t(u32, reg->u32_max_value, reg->s32_max_value);
+		} else if ((u32)reg->s32_max_value < reg->u32_min_value) {
+			/*
+			 * 0                                                   U32_MAX
+			 * |              [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]  |
+			 * |----------------------------|----------------------------|
+			 * |xxxxxxxxx]                       [xxxxxxxxxxxx s32 range |
+			 * 0                     S32_MAX S32_MIN                    -1
+			 */
+			reg->s32_max_value = (s32)reg->u32_max_value;
+			reg->u32_min_value = max_t(u32, reg->u32_min_value, reg->s32_min_value);
+		}
 	}
 }
 
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index 0322f817d07b..446e5ef62f44 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -422,15 +422,65 @@ static bool is_valid_range(enum num_t t, struct range x)
 	}
 }
 
-static struct range range_improve(enum num_t t, struct range old, struct range new)
+static struct range range_intersection(enum num_t t, struct range old, struct range new)
 {
 	return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
 }
 
+static struct range range_union(enum num_t t, struct range x, struct range y)
+{
+	if (!is_valid_range(t, x))
+		return y;
+	if (!is_valid_range(t, y))
+		return x;
+	return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
+}
+
+/*
+ * This function attempts to improve x range intersecting it with y.
+ * range_cast(... to_t ...) looses precision for ranges that pass to_t
+ * min/max boundaries. To avoid such precision loses this function
+ * splits both x and y into halves corresponding to non-overflowing
+ * sub-ranges: [0, smin] and [smax, -1].
+ * Final result is computed as follows:
+ *
+ *   ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
+ *   ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
+ *
+ * Precision might still be lost if final union is not a continuous range.
+ */
+static struct range range_refine_in_halves(enum num_t x_t, struct range x,
+					   enum num_t y_t, struct range y)
+{
+	struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
+	u64 smax, smin, neg_one;
+
+	if (t_is_32(x_t)) {
+		smax = (u64)(u32)S32_MAX;
+		smin = (u64)(u32)S32_MIN;
+		neg_one = (u64)(u32)(s32)(-1);
+	} else {
+		smax = (u64)S64_MAX;
+		smin = (u64)S64_MIN;
+		neg_one = U64_MAX;
+	}
+	x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
+	x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
+	y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
+	y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
+	r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
+	r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
+	return range_union(x_t, r_pos, r_neg);
+
+}
+
 static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
 {
 	struct range y_cast;
 
+	if (t_is_32(x_t) == t_is_32(y_t))
+		x = range_refine_in_halves(x_t, x, y_t, y);
+
 	y_cast = range_cast(y_t, x_t, y);
 
 	/* If we know that
@@ -444,7 +494,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
 	 */
 	if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX  && y_cast.b <= S32_MAX &&
 	    (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
-		return range_improve(x_t, x, y_cast);
+		return range_intersection(x_t, x, y_cast);
 
 	/* the case when new range knowledge, *y*, is a 32-bit subregister
 	 * range, while previous range knowledge, *x*, is a full register
@@ -462,7 +512,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
 		x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
 		if (!is_valid_range(x_t, x_swap))
 			return x;
-		return range_improve(x_t, x, x_swap);
+		return range_intersection(x_t, x, x_swap);
 	}
 
 	if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
@@ -480,7 +530,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
 	}
 
 	/* otherwise, plain range cast and intersection works */
-	return range_improve(x_t, x, y_cast);
+	return range_intersection(x_t, x, y_cast);
 }
 
 /* =======================
-- 
2.51.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH bpf-next v1 2/2] selftests/bpf: test refining u32/s32 bounds when ranges cross min/max boundary
  2026-03-05 11:09 [PATCH bpf-next v1 1/2] bpf: refine u32/s32 bounds when ranges cross min/max boundary Eduard Zingerman
@ 2026-03-05 11:09 ` Eduard Zingerman
  2026-03-05 16:51   ` Emil Tsalapatis
  2026-03-05 16:49 ` [PATCH bpf-next v1 1/2] bpf: refine " Emil Tsalapatis
  1 sibling, 1 reply; 5+ messages in thread
From: Eduard Zingerman @ 2026-03-05 11:09 UTC (permalink / raw)
  To: bpf, ast
  Cc: andrii, daniel, martin.lau, kernel-team, yonghong.song, emil,
	arighi, shung-hsi.yu, Eduard Zingerman

Two test cases for signed/unsigned 32-bit bounds refinement
when s32 range crosses the sign boundary:
- s32 range [S32_MIN..1] overlapping with u32 range [3..U32_MAX],
  s32 range tail before sign boundary overlaps with u32 range.
- s32 range [-3..5] overlapping with u32 range [0..S32_MIN+3],
  s32 range head after the sign boundary overlaps with u32 range.

This covers both branches added in the __reg32_deduce_bounds().

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 .../selftests/bpf/progs/verifier_bounds.c     | 37 +++++++++++++++++++
 1 file changed, 37 insertions(+)

diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
index 4588e5b55b41..0c2a62b16ec4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bounds.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
@@ -2000,4 +2000,41 @@ __naked void bounds_refinement_multiple_overlaps(void *ctx)
 	: __clobber_all);
 }
 
+SEC("socket")
+__success
+__flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void signed_unsigned_intersection32_case1(void *ctx)
+{
+	asm volatile("									\
+	call %[bpf_get_prandom_u32];							\
+	w0 &= 0xffffffff;								\
+	if w0 < 0x3 goto 1f;		/* on fall-through u32 range [3..U32_MAX]  */	\
+	if w0 s> 0x1 goto 1f;		/* on fall-through s32 range [S32_MIN..1]  */	\
+	if w0 s< 0x0 goto 1f;		/* range can be narrowed to  [S32_MIN..-1] */	\
+	r10 = 0;			/* thus predicting the jump. */			\
+1:	exit;										\
+"	:
+	: __imm(bpf_get_prandom_u32)
+	: __clobber_all);
+}
+
+SEC("socket")
+__success
+__flag(BPF_F_TEST_REG_INVARIANTS)
+__naked void signed_unsigned_intersection32_case2(void *ctx)
+{
+	asm volatile("									\
+	call %[bpf_get_prandom_u32];							\
+	w0 &= 0xffffffff;								\
+	if w0 > 0x80000003 goto 1f;	/* on fall-through u32 range [0..S32_MIN+3] */	\
+	if w0 s< -3 goto 1f;		/* on fall-through s32 range [-3..S32_MAX] */	\
+	if w0 s> 5 goto 1f;		/* on fall-through s32 range [-3..5] */		\
+	if w0 <= 5 goto 1f;		/* range can be narrowed to  [0..5] */		\
+	r10 = 0;			/* thus predicting the jump */			\
+1:	exit;										\
+"	:
+	: __imm(bpf_get_prandom_u32)
+	: __clobber_all);
+}
+
 char _license[] SEC("license") = "GPL";
-- 
2.51.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH bpf-next v1 1/2] bpf: refine u32/s32 bounds when ranges cross min/max boundary
  2026-03-05 11:09 [PATCH bpf-next v1 1/2] bpf: refine u32/s32 bounds when ranges cross min/max boundary Eduard Zingerman
  2026-03-05 11:09 ` [PATCH bpf-next v1 2/2] selftests/bpf: test refining " Eduard Zingerman
@ 2026-03-05 16:49 ` Emil Tsalapatis
  2026-03-05 18:05   ` Eduard Zingerman
  1 sibling, 1 reply; 5+ messages in thread
From: Emil Tsalapatis @ 2026-03-05 16:49 UTC (permalink / raw)
  To: Eduard Zingerman, bpf, ast
  Cc: andrii, daniel, martin.lau, kernel-team, yonghong.song, arighi,
	shung-hsi.yu

On Thu Mar 5, 2026 at 6:09 AM EST, Eduard Zingerman wrote:
> Same as in __reg64_deduce_bounds(), refine s32/u32 ranges
> in __reg32_deduce_bounds() in the following situations:
Nit: Can we add a cover letter since this is a 2-patch series?

Also, can we explicitly state the condition: IIUC it's: "When
the signed and unsigned ranges of a register partially overlap
on an entirely (signed) positive of (signed) negative interval".
This helps with the ASCII and also covers the question "what happens
when the ranges overlap in two places" - nothing, we don't refine
the register's bounds in that case, as noted in __reg64_deduce_bounds.

>
> 0                                                   U32_MAX
> |  [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]              |
> |----------------------------|----------------------------|
> |xxxxx s32 range xxxxxxxxx]                       [xxxxxxx|
> 0                     S32_MAX S32_MIN                    -1
>
> 0                                                   U32_MAX
> |              [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]  |
> |----------------------------|----------------------------|
> |xxxxxxxxx]                       [xxxxxxxxxxxx s32 range |
> 0                     S32_MAX S32_MIN                    -1
>
> This helps for e.g. consider the following program:
>
>    call %[bpf_get_prandom_u32];
>    w0 &= 0xffffffff;
>    if w0 < 0x3 goto 1f;    // on fall-through u32 range [3..U32_MAX]
>    if w0 s> 0x1 goto 1f;   // on fall-through s32 range [S32_MIN..1]
>    if w0 s< 0x0 goto 1f;   // range can be narrowed to  [S32_MIN..-1]
>    r10 = 0;
> 1: ...;
>
> The reg_bounds.c selftest is updated to incorporate identical logic,
> refinement based on non-overflowing range halves:
>
>   ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
>   ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
>

> Reported-by: Andrea Righi <arighi@nvidia.com>
> Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
> Closes: https://lore.kernel.org/bpf/aakqucg4vcujVwif@gpd4/T/
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> ---
>  kernel/bpf/verifier.c                         | 24 ++++++++
>  .../selftests/bpf/prog_tests/reg_bounds.c     | 58 +++++++++++++++++--
>  2 files changed, 78 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index d92cf2821657..2b79a2a4d1b5 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -2532,6 +2532,30 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
>  	if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
>  		reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
>  		reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
> +	} else {
> +		if (reg->u32_max_value < (u32)reg->s32_min_value) {
> +			/* See __reg64_deduce_bounds() for detailed explanation.
> +			 * Refine ranges in the following situation:
> +			 *
> +			 * 0                                                   U32_MAX
> +			 * |  [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]              |
> +			 * |----------------------------|----------------------------|
> +			 * |xxxxx s32 range xxxxxxxxx]                       [xxxxxxx|
> +			 * 0                     S32_MAX S32_MIN                    -1
> +			 */
> +			reg->s32_min_value = (s32)reg->u32_min_value;
> +			reg->u32_max_value = min_t(u32, reg->u32_max_value, reg->s32_max_value);
> +		} else if ((u32)reg->s32_max_value < reg->u32_min_value) {
> +			/*
> +			 * 0                                                   U32_MAX
> +			 * |              [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]  |
> +			 * |----------------------------|----------------------------|
> +			 * |xxxxxxxxx]                       [xxxxxxxxxxxx s32 range |
> +			 * 0                     S32_MAX S32_MIN                    -1
> +			 */
> +			reg->s32_max_value = (s32)reg->u32_max_value;
> +			reg->u32_min_value = max_t(u32, reg->u32_min_value, reg->s32_min_value);
> +		}
>  	}
>  }
>  
> diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
> index 0322f817d07b..446e5ef62f44 100644
> --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
> +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
> @@ -422,15 +422,65 @@ static bool is_valid_range(enum num_t t, struct range x)
>  	}
>  }
>  
> -static struct range range_improve(enum num_t t, struct range old, struct range new)
> +static struct range range_intersection(enum num_t t, struct range old, struct range new)
>  {
>  	return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
>  }
>  
> +static struct range range_union(enum num_t t, struct range x, struct range y)
> +{
> +	if (!is_valid_range(t, x))
> +		return y;
> +	if (!is_valid_range(t, y))
> +		return x;
> +	return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
> +}
> +

AFAICT this is only fully accurate if the ranges are intersect/are consecutive.
Is it that it's fine to include extra values into the interval since
we're dealing with bounds checking, and so can relax the bounds as we
please? If so, then this is fine. 

If not, can we either change the name to make it clear we assume the caller has 
ensured this, or add a check in the function itself? 

> +/*
> + * This function attempts to improve x range intersecting it with y.
> + * range_cast(... to_t ...) looses precision for ranges that pass to_t
> + * min/max boundaries. To avoid such precision loses this function
> + * splits both x and y into halves corresponding to non-overflowing
> + * sub-ranges: [0, smin] and [smax, -1].
> + * Final result is computed as follows:
> + *
> + *   ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
> + *   ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
> + *
> + * Precision might still be lost if final union is not a continuous range.
> + */
> +static struct range range_refine_in_halves(enum num_t x_t, struct range x,
> +					   enum num_t y_t, struct range y)
> +{
> +	struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
> +	u64 smax, smin, neg_one;
> +
> +	if (t_is_32(x_t)) {
> +		smax = (u64)(u32)S32_MAX;
> +		smin = (u64)(u32)S32_MIN;
> +		neg_one = (u64)(u32)(s32)(-1);
> +	} else {
> +		smax = (u64)S64_MAX;
> +		smin = (u64)S64_MIN;
> +		neg_one = U64_MAX;
> +	}
> +	x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
> +	x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
> +	y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
> +	y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
> +	r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
> +	r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
> +	return range_union(x_t, r_pos, r_neg);
> +
> +}
> +
>  static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
>  {
>  	struct range y_cast;
>  
> +	if (t_is_32(x_t) == t_is_32(y_t))
> +		x = range_refine_in_halves(x_t, x, y_t, y);
> +
>  	y_cast = range_cast(y_t, x_t, y);
>  
>  	/* If we know that
> @@ -444,7 +494,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
>  	 */
>  	if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX  && y_cast.b <= S32_MAX &&
>  	    (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
> -		return range_improve(x_t, x, y_cast);
> +		return range_intersection(x_t, x, y_cast);
>  
>  	/* the case when new range knowledge, *y*, is a 32-bit subregister
>  	 * range, while previous range knowledge, *x*, is a full register
> @@ -462,7 +512,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
>  		x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
>  		if (!is_valid_range(x_t, x_swap))
>  			return x;
> -		return range_improve(x_t, x, x_swap);
> +		return range_intersection(x_t, x, x_swap);
>  	}
>  
>  	if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
> @@ -480,7 +530,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
>  	}
>  
>  	/* otherwise, plain range cast and intersection works */
> -	return range_improve(x_t, x, y_cast);
> +	return range_intersection(x_t, x, y_cast);
>  }
>  
>  /* =======================


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH bpf-next v1 2/2] selftests/bpf: test refining u32/s32 bounds when ranges cross min/max boundary
  2026-03-05 11:09 ` [PATCH bpf-next v1 2/2] selftests/bpf: test refining " Eduard Zingerman
@ 2026-03-05 16:51   ` Emil Tsalapatis
  0 siblings, 0 replies; 5+ messages in thread
From: Emil Tsalapatis @ 2026-03-05 16:51 UTC (permalink / raw)
  To: Eduard Zingerman, bpf, ast
  Cc: andrii, daniel, martin.lau, kernel-team, yonghong.song, arighi,
	shung-hsi.yu

On Thu Mar 5, 2026 at 6:09 AM EST, Eduard Zingerman wrote:
> Two test cases for signed/unsigned 32-bit bounds refinement
> when s32 range crosses the sign boundary:
> - s32 range [S32_MIN..1] overlapping with u32 range [3..U32_MAX],
>   s32 range tail before sign boundary overlaps with u32 range.
> - s32 range [-3..5] overlapping with u32 range [0..S32_MIN+3],
>   s32 range head after the sign boundary overlaps with u32 range.
>
> This covers both branches added in the __reg32_deduce_bounds().
>
> Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>

Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>

> ---
>  .../selftests/bpf/progs/verifier_bounds.c     | 37 +++++++++++++++++++
>  1 file changed, 37 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
> index 4588e5b55b41..0c2a62b16ec4 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
> @@ -2000,4 +2000,41 @@ __naked void bounds_refinement_multiple_overlaps(void *ctx)
>  	: __clobber_all);
>  }
>  
> +SEC("socket")
> +__success
> +__flag(BPF_F_TEST_REG_INVARIANTS)
> +__naked void signed_unsigned_intersection32_case1(void *ctx)
> +{
> +	asm volatile("									\
> +	call %[bpf_get_prandom_u32];							\
> +	w0 &= 0xffffffff;								\
> +	if w0 < 0x3 goto 1f;		/* on fall-through u32 range [3..U32_MAX]  */	\
> +	if w0 s> 0x1 goto 1f;		/* on fall-through s32 range [S32_MIN..1]  */	\
> +	if w0 s< 0x0 goto 1f;		/* range can be narrowed to  [S32_MIN..-1] */	\
> +	r10 = 0;			/* thus predicting the jump. */			\
> +1:	exit;										\
> +"	:
> +	: __imm(bpf_get_prandom_u32)
> +	: __clobber_all);
> +}
> +
> +SEC("socket")
> +__success
> +__flag(BPF_F_TEST_REG_INVARIANTS)
> +__naked void signed_unsigned_intersection32_case2(void *ctx)
> +{
> +	asm volatile("									\
> +	call %[bpf_get_prandom_u32];							\
> +	w0 &= 0xffffffff;								\
> +	if w0 > 0x80000003 goto 1f;	/* on fall-through u32 range [0..S32_MIN+3] */	\
> +	if w0 s< -3 goto 1f;		/* on fall-through s32 range [-3..S32_MAX] */	\
> +	if w0 s> 5 goto 1f;		/* on fall-through s32 range [-3..5] */		\
> +	if w0 <= 5 goto 1f;		/* range can be narrowed to  [0..5] */		\
> +	r10 = 0;			/* thus predicting the jump */			\
> +1:	exit;										\
> +"	:
> +	: __imm(bpf_get_prandom_u32)
> +	: __clobber_all);
> +}
> +
>  char _license[] SEC("license") = "GPL";


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH bpf-next v1 1/2] bpf: refine u32/s32 bounds when ranges cross min/max boundary
  2026-03-05 16:49 ` [PATCH bpf-next v1 1/2] bpf: refine " Emil Tsalapatis
@ 2026-03-05 18:05   ` Eduard Zingerman
  0 siblings, 0 replies; 5+ messages in thread
From: Eduard Zingerman @ 2026-03-05 18:05 UTC (permalink / raw)
  To: Emil Tsalapatis, bpf, ast
  Cc: andrii, daniel, martin.lau, kernel-team, yonghong.song, arighi,
	shung-hsi.yu

On Thu, 2026-03-05 at 11:49 -0500, Emil Tsalapatis wrote:
> On Thu Mar 5, 2026 at 6:09 AM EST, Eduard Zingerman wrote:
> > Same as in __reg64_deduce_bounds(), refine s32/u32 ranges
> > in __reg32_deduce_bounds() in the following situations:
> Nit: Can we add a cover letter since this is a 2-patch series?
> 
> Also, can we explicitly state the condition: IIUC it's: "When
> the signed and unsigned ranges of a register partially overlap
> on an entirely (signed) positive of (signed) negative interval".
> This helps with the ASCII and also covers the question "what happens
> when the ranges overlap in two places" - nothing, we don't refine
> the register's bounds in that case, as noted in __reg64_deduce_bounds.

Sure, I'll extend the description.

> > 
> > 0                                                   U32_MAX
> > >  [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]              |
> > > ----------------------------|----------------------------|
> > > xxxxx s32 range xxxxxxxxx]                       [xxxxxxx|
> > 0                     S32_MAX S32_MIN                    -1
> > 
> > 0                                                   U32_MAX
> > >              [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]  |
> > > ----------------------------|----------------------------|
> > > xxxxxxxxx]                       [xxxxxxxxxxxx s32 range |
> > 0                     S32_MAX S32_MIN                    -1
> > 
> > This helps for e.g. consider the following program:
> > 
> >    call %[bpf_get_prandom_u32];
> >    w0 &= 0xffffffff;
> >    if w0 < 0x3 goto 1f;    // on fall-through u32 range [3..U32_MAX]
> >    if w0 s> 0x1 goto 1f;   // on fall-through s32 range [S32_MIN..1]
> >    if w0 s< 0x0 goto 1f;   // range can be narrowed to  [S32_MIN..-1]
> >    r10 = 0;
> > 1: ...;
> > 
> > The reg_bounds.c selftest is updated to incorporate identical logic,
> > refinement based on non-overflowing range halves:
> > 
> >   ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
> >   ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
> > 
> 
> > Reported-by: Andrea Righi <arighi@nvidia.com>
> > Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
> > Closes: https://lore.kernel.org/bpf/aakqucg4vcujVwif@gpd4/T/
> > Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
> > ---
> >  kernel/bpf/verifier.c                         | 24 ++++++++
> >  .../selftests/bpf/prog_tests/reg_bounds.c     | 58 +++++++++++++++++--
> >  2 files changed, 78 insertions(+), 4 deletions(-)
> > 
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index d92cf2821657..2b79a2a4d1b5 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -2532,6 +2532,30 @@ static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
> >  	if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
> >  		reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
> >  		reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
> > +	} else {
> > +		if (reg->u32_max_value < (u32)reg->s32_min_value) {
> > +			/* See __reg64_deduce_bounds() for detailed explanation.
> > +			 * Refine ranges in the following situation:
> > +			 *
> > +			 * 0                                                   U32_MAX
> > +			 * |  [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]              |
> > +			 * |----------------------------|----------------------------|
> > +			 * |xxxxx s32 range xxxxxxxxx]                       [xxxxxxx|
> > +			 * 0                     S32_MAX S32_MIN                    -1
> > +			 */
> > +			reg->s32_min_value = (s32)reg->u32_min_value;
> > +			reg->u32_max_value = min_t(u32, reg->u32_max_value, reg->s32_max_value);
> > +		} else if ((u32)reg->s32_max_value < reg->u32_min_value) {
> > +			/*
> > +			 * 0                                                   U32_MAX
> > +			 * |              [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx]  |
> > +			 * |----------------------------|----------------------------|
> > +			 * |xxxxxxxxx]                       [xxxxxxxxxxxx s32 range |
> > +			 * 0                     S32_MAX S32_MIN                    -1
> > +			 */
> > +			reg->s32_max_value = (s32)reg->u32_max_value;
> > +			reg->u32_min_value = max_t(u32, reg->u32_min_value, reg->s32_min_value);
> > +		}
> >  	}
> >  }
> >  
> > diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
> > index 0322f817d07b..446e5ef62f44 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
> > @@ -422,15 +422,65 @@ static bool is_valid_range(enum num_t t, struct range x)
> >  	}
> >  }
> >  
> > -static struct range range_improve(enum num_t t, struct range old, struct range new)
> > +static struct range range_intersection(enum num_t t, struct range old, struct range new)
> >  {
> >  	return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
> >  }
> >  
> > +static struct range range_union(enum num_t t, struct range x, struct range y)
> > +{
> > +	if (!is_valid_range(t, x))
> > +		return y;
> > +	if (!is_valid_range(t, y))
> > +		return x;
> > +	return range(t, min_t(t, x.a, y.a), max_t(t, x.b, y.b));
> > +}
> > +
> 
> AFAICT this is only fully accurate if the ranges are intersect/are consecutive.
> Is it that it's fine to include extra values into the interval since
> we're dealing with bounds checking, and so can relax the bounds as we
> please? If so, then this is fine. 

Yes, this over-approximates when on range can't represent the union
and that is fine for the context. I'll add a comment to clarify this,
can't figure a better name.

> If not, can we either change the name to make it clear we assume the caller has 
> ensured this, or add a check in the function itself? 
> 
> > +/*
> > + * This function attempts to improve x range intersecting it with y.
> > + * range_cast(... to_t ...) looses precision for ranges that pass to_t
> > + * min/max boundaries. To avoid such precision loses this function
> > + * splits both x and y into halves corresponding to non-overflowing
> > + * sub-ranges: [0, smin] and [smax, -1].
> > + * Final result is computed as follows:
> > + *
> > + *   ((x ∩ [0, smax]) ∩ (y ∩ [0, smax])) ∪
> > + *   ((x ∩ [smin,-1]) ∩ (y ∩ [smin,-1]))
> > + *
> > + * Precision might still be lost if final union is not a continuous range.
> > + */
> > +static struct range range_refine_in_halves(enum num_t x_t, struct range x,
> > +					   enum num_t y_t, struct range y)
> > +{
> > +	struct range x_pos, x_neg, y_pos, y_neg, r_pos, r_neg;
> > +	u64 smax, smin, neg_one;
> > +
> > +	if (t_is_32(x_t)) {
> > +		smax = (u64)(u32)S32_MAX;
> > +		smin = (u64)(u32)S32_MIN;
> > +		neg_one = (u64)(u32)(s32)(-1);
> > +	} else {
> > +		smax = (u64)S64_MAX;
> > +		smin = (u64)S64_MIN;
> > +		neg_one = U64_MAX;
> > +	}
> > +	x_pos = range_intersection(x_t, x, range(x_t, 0, smax));
> > +	x_neg = range_intersection(x_t, x, range(x_t, smin, neg_one));
> > +	y_pos = range_intersection(y_t, y, range(x_t, 0, smax));
> > +	y_neg = range_intersection(y_t, y, range(y_t, smin, neg_one));
> > +	r_pos = range_intersection(x_t, x_pos, range_cast(y_t, x_t, y_pos));
> > +	r_neg = range_intersection(x_t, x_neg, range_cast(y_t, x_t, y_neg));
> > +	return range_union(x_t, r_pos, r_neg);
> > +
> > +}
> > +
> >  static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
> >  {
> >  	struct range y_cast;
> >  
> > +	if (t_is_32(x_t) == t_is_32(y_t))
> > +		x = range_refine_in_halves(x_t, x, y_t, y);
> > +
> >  	y_cast = range_cast(y_t, x_t, y);
> >  
> >  	/* If we know that
> > @@ -444,7 +494,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
> >  	 */
> >  	if (x_t == S64 && y_t == S32 && y_cast.a <= S32_MAX  && y_cast.b <= S32_MAX &&
> >  	    (s64)x.a >= S32_MIN && (s64)x.b <= S32_MAX)
> > -		return range_improve(x_t, x, y_cast);
> > +		return range_intersection(x_t, x, y_cast);
> >  
> >  	/* the case when new range knowledge, *y*, is a 32-bit subregister
> >  	 * range, while previous range knowledge, *x*, is a full register
> > @@ -462,7 +512,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
> >  		x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
> >  		if (!is_valid_range(x_t, x_swap))
> >  			return x;
> > -		return range_improve(x_t, x, x_swap);
> > +		return range_intersection(x_t, x, x_swap);
> >  	}
> >  
> >  	if (!t_is_32(x_t) && !t_is_32(y_t) && x_t != y_t) {
> > @@ -480,7 +530,7 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
> >  	}
> >  
> >  	/* otherwise, plain range cast and intersection works */
> > -	return range_improve(x_t, x, y_cast);
> > +	return range_intersection(x_t, x, y_cast);
> >  }
> >  
> >  /* =======================

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2026-03-05 18:05 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-05 11:09 [PATCH bpf-next v1 1/2] bpf: refine u32/s32 bounds when ranges cross min/max boundary Eduard Zingerman
2026-03-05 11:09 ` [PATCH bpf-next v1 2/2] selftests/bpf: test refining " Eduard Zingerman
2026-03-05 16:51   ` Emil Tsalapatis
2026-03-05 16:49 ` [PATCH bpf-next v1 1/2] bpf: refine " Emil Tsalapatis
2026-03-05 18:05   ` Eduard Zingerman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox