* [PATCH RFC bpf-next 2/4] bpf: use accessor functions for bpf_reg_state min/max fields
2026-04-21 10:28 [PATCH RFC bpf-next 0/4] bpf: replace min/max fields with struct cnum{32,64} Eduard Zingerman
2026-04-21 10:28 ` [PATCH RFC bpf-next 1/4] bpf: representation and basic operations on circular numbers Eduard Zingerman
@ 2026-04-21 10:28 ` Eduard Zingerman
2026-04-21 10:28 ` [PATCH RFC bpf-next 3/4] bpf: replace min/max fields with struct cnum{32,64} Eduard Zingerman
` (4 subsequent siblings)
6 siblings, 0 replies; 24+ messages in thread
From: Eduard Zingerman @ 2026-04-21 10:28 UTC (permalink / raw)
To: bpf, ast, andrii
Cc: daniel, martin.lau, kernel-team, yonghong.song, eddyz87,
shung-hsi.yu, paul.chaignon, harishankar.vishwanathan
Replace direct access to bpf_reg_state->{smin,smax,umin,umax,
s32_min,s32_max,u32_min,u32_max}_value with getter/setter inline
functions, preparing for future switch to cnum-based internal
representation.
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 8 +-
include/linux/bpf_verifier.h | 64 ++
kernel/bpf/log.c | 24 +-
kernel/bpf/states.c | 16 +-
kernel/bpf/verifier.c | 1233 ++++++++++-----------
5 files changed, 678 insertions(+), 667 deletions(-)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 70368fe7c510..1caa87da72b5 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -561,10 +561,10 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_reg_state *dreg =
cur_regs(env) + meta->insn.dst_reg;
- meta->umin_src = min(meta->umin_src, sreg->umin_value);
- meta->umax_src = max(meta->umax_src, sreg->umax_value);
- meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
- meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
+ meta->umin_src = min(meta->umin_src, reg_umin(sreg));
+ meta->umax_src = max(meta->umax_src, reg_umax(sreg));
+ meta->umin_dst = min(meta->umin_dst, reg_umin(dreg));
+ meta->umax_dst = max(meta->umax_dst, reg_umax(dreg));
/* NFP supports u16 and u32 multiplication.
*
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b148f816f25b..b44d399adbb2 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -209,6 +209,70 @@ struct bpf_reg_state {
bool precise;
};
+static inline s64 reg_smin(const struct bpf_reg_state *reg)
+{
+ return reg->smin_value;
+}
+
+static inline s64 reg_smax(const struct bpf_reg_state *reg)
+{
+ return reg->smax_value;
+}
+
+static inline u64 reg_umin(const struct bpf_reg_state *reg)
+{
+ return reg->umin_value;
+}
+
+static inline u64 reg_umax(const struct bpf_reg_state *reg)
+{
+ return reg->umax_value;
+}
+
+static inline s32 reg_s32_min(const struct bpf_reg_state *reg)
+{
+ return reg->s32_min_value;
+}
+
+static inline s32 reg_s32_max(const struct bpf_reg_state *reg)
+{
+ return reg->s32_max_value;
+}
+
+static inline u32 reg_u32_min(const struct bpf_reg_state *reg)
+{
+ return reg->u32_min_value;
+}
+
+static inline u32 reg_u32_max(const struct bpf_reg_state *reg)
+{
+ return reg->u32_max_value;
+}
+
+static inline void reg_set_srange32(struct bpf_reg_state *reg, s32 smin, s32 smax)
+{
+ reg->s32_min_value = smin;
+ reg->s32_max_value = smax;
+}
+
+static inline void reg_set_urange32(struct bpf_reg_state *reg, u32 umin, u32 umax)
+{
+ reg->u32_min_value = umin;
+ reg->u32_max_value = umax;
+}
+
+static inline void reg_set_srange64(struct bpf_reg_state *reg, s64 smin, s64 smax)
+{
+ reg->smin_value = smin;
+ reg->smax_value = smax;
+}
+
+static inline void reg_set_urange64(struct bpf_reg_state *reg, u64 umin, u64 umax)
+{
+ reg->umin_value = umin;
+ reg->umax_value = umax;
+}
+
enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 011e4ec25acd..64566b86dd27 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -571,20 +571,20 @@ static void print_scalar_ranges(struct bpf_verifier_env *env,
u64 val;
bool omit;
} minmaxs[] = {
- {"smin", reg->smin_value, reg->smin_value == S64_MIN},
- {"smax", reg->smax_value, reg->smax_value == S64_MAX},
- {"umin", reg->umin_value, reg->umin_value == 0},
- {"umax", reg->umax_value, reg->umax_value == U64_MAX},
+ {"smin", reg_smin(reg), reg_smin(reg) == S64_MIN},
+ {"smax", reg_smax(reg), reg_smax(reg) == S64_MAX},
+ {"umin", reg_umin(reg), reg_umin(reg) == 0},
+ {"umax", reg_umax(reg), reg_umax(reg) == U64_MAX},
{"smin32",
- is_snum_decimal((s64)reg->s32_min_value)
- ? (s64)reg->s32_min_value
- : (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN},
+ is_snum_decimal((s64)reg_s32_min(reg))
+ ? (s64)reg_s32_min(reg)
+ : (u32)reg_s32_min(reg), reg_s32_min(reg) == S32_MIN},
{"smax32",
- is_snum_decimal((s64)reg->s32_max_value)
- ? (s64)reg->s32_max_value
- : (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX},
- {"umin32", reg->u32_min_value, reg->u32_min_value == 0},
- {"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
+ is_snum_decimal((s64)reg_s32_max(reg))
+ ? (s64)reg_s32_max(reg)
+ : (u32)reg_s32_max(reg), reg_s32_max(reg) == S32_MAX},
+ {"umin32", reg_u32_min(reg), reg_u32_min(reg) == 0},
+ {"umax32", reg_u32_max(reg), reg_u32_max(reg) == U32_MAX},
}, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
bool neg1, neg2;
diff --git a/kernel/bpf/states.c b/kernel/bpf/states.c
index 8478d2c6ed5b..a78ae891b743 100644
--- a/kernel/bpf/states.c
+++ b/kernel/bpf/states.c
@@ -301,14 +301,14 @@ int bpf_update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_s
static bool range_within(const struct bpf_reg_state *old,
const struct bpf_reg_state *cur)
{
- return old->umin_value <= cur->umin_value &&
- old->umax_value >= cur->umax_value &&
- old->smin_value <= cur->smin_value &&
- old->smax_value >= cur->smax_value &&
- old->u32_min_value <= cur->u32_min_value &&
- old->u32_max_value >= cur->u32_max_value &&
- old->s32_min_value <= cur->s32_min_value &&
- old->s32_max_value >= cur->s32_max_value;
+ return reg_umin(old) <= reg_umin(cur) &&
+ reg_umax(old) >= reg_umax(cur) &&
+ reg_smin(old) <= reg_smin(cur) &&
+ reg_smax(old) >= reg_smax(cur) &&
+ reg_u32_min(old) <= reg_u32_min(cur) &&
+ reg_u32_max(old) >= reg_u32_max(cur) &&
+ reg_s32_min(old) <= reg_s32_min(cur) &&
+ reg_s32_max(old) >= reg_s32_max(cur);
}
/* If in the old state two registers had the same id, then they need to have
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 185210b73385..2e896f5d92a2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -290,12 +290,12 @@ static void verbose_invalid_scalar(struct bpf_verifier_env *env,
bool unknown = true;
verbose(env, "%s the register %s has", ctx, reg_name);
- if (reg->smin_value > S64_MIN) {
- verbose(env, " smin=%lld", reg->smin_value);
+ if (reg_smin(reg) > S64_MIN) {
+ verbose(env, " smin=%lld", reg_smin(reg));
unknown = false;
}
- if (reg->smax_value < S64_MAX) {
- verbose(env, " smax=%lld", reg->smax_value);
+ if (reg_smax(reg) < S64_MAX) {
+ verbose(env, " smax=%lld", reg_smax(reg));
unknown = false;
}
if (unknown)
@@ -1750,15 +1750,10 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
reg->var_off = tnum_const(imm);
- reg->smin_value = (s64)imm;
- reg->smax_value = (s64)imm;
- reg->umin_value = imm;
- reg->umax_value = imm;
-
- reg->s32_min_value = (s32)imm;
- reg->s32_max_value = (s32)imm;
- reg->u32_min_value = (u32)imm;
- reg->u32_max_value = (u32)imm;
+ reg_set_srange64(reg, (s64)imm, (s64)imm);
+ reg_set_urange64(reg, imm, imm);
+ reg_set_srange32(reg, (s32)imm, (s32)imm);
+ reg_set_urange32(reg, (u32)imm, (u32)imm);
}
/* Mark the unknown part of a register (variable offset or scalar value) as
@@ -1777,10 +1772,8 @@ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
{
reg->var_off = tnum_const_subreg(reg->var_off, imm);
- reg->s32_min_value = (s32)imm;
- reg->s32_max_value = (s32)imm;
- reg->u32_min_value = (u32)imm;
- reg->u32_max_value = (u32)imm;
+ reg_set_srange32(reg, (s32)imm, (s32)imm);
+ reg_set_urange32(reg, (u32)imm, (u32)imm);
}
/* Mark the 'variable offset' part of a register as zero. This should be
@@ -1891,34 +1884,25 @@ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
tnum_equals_const(reg->var_off, 0);
}
+static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
+{
+ reg_set_srange32(reg, S32_MIN, S32_MAX);
+ reg_set_urange32(reg, 0, U32_MAX);
+}
+
/* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{
- reg->smin_value = S64_MIN;
- reg->smax_value = S64_MAX;
- reg->umin_value = 0;
- reg->umax_value = U64_MAX;
+ reg_set_srange64(reg, S64_MIN, S64_MAX);
+ reg_set_urange64(reg, 0, U64_MAX);
- reg->s32_min_value = S32_MIN;
- reg->s32_max_value = S32_MAX;
- reg->u32_min_value = 0;
- reg->u32_max_value = U32_MAX;
+ __mark_reg32_unbounded(reg);
}
static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
{
- reg->smin_value = S64_MIN;
- reg->smax_value = S64_MAX;
- reg->umin_value = 0;
- reg->umax_value = U64_MAX;
-}
-
-static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
-{
- reg->s32_min_value = S32_MIN;
- reg->s32_max_value = S32_MAX;
- reg->u32_min_value = 0;
- reg->u32_max_value = U32_MAX;
+ reg_set_srange64(reg, S64_MIN, S64_MAX);
+ reg_set_urange64(reg, 0, U64_MAX);
}
static void reset_reg64_and_tnum(struct bpf_reg_state *reg)
@@ -1937,15 +1921,14 @@ static void __update_reg32_bounds(struct bpf_reg_state *reg)
{
struct tnum var32_off = tnum_subreg(reg->var_off);
- /* min signed is max(sign bit) | min(other bits) */
- reg->s32_min_value = max_t(s32, reg->s32_min_value,
- var32_off.value | (var32_off.mask & S32_MIN));
- /* max signed is min(sign bit) | max(other bits) */
- reg->s32_max_value = min_t(s32, reg->s32_max_value,
- var32_off.value | (var32_off.mask & S32_MAX));
- reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
- reg->u32_max_value = min(reg->u32_max_value,
- (u32)(var32_off.value | var32_off.mask));
+ reg_set_srange32(reg,
+ /* min signed is max(sign bit) | min(other bits) */
+ max_t(s32, reg_s32_min(reg), var32_off.value | (var32_off.mask & S32_MIN)),
+ /* max signed is min(sign bit) | max(other bits) */
+ min_t(s32, reg_s32_max(reg), var32_off.value | (var32_off.mask & S32_MAX)));
+ reg_set_urange32(reg,
+ max_t(u32, reg_u32_min(reg), (u32)var32_off.value),
+ min(reg_u32_max(reg), (u32)(var32_off.value | var32_off.mask)));
}
static void __update_reg64_bounds(struct bpf_reg_state *reg)
@@ -1954,25 +1937,27 @@ static void __update_reg64_bounds(struct bpf_reg_state *reg)
bool umin_in_tnum;
/* min signed is max(sign bit) | min(other bits) */
- reg->smin_value = max_t(s64, reg->smin_value,
- reg->var_off.value | (reg->var_off.mask & S64_MIN));
/* max signed is min(sign bit) | max(other bits) */
- reg->smax_value = min_t(s64, reg->smax_value,
- reg->var_off.value | (reg->var_off.mask & S64_MAX));
- reg->umin_value = max(reg->umin_value, reg->var_off.value);
- reg->umax_value = min(reg->umax_value,
- reg->var_off.value | reg->var_off.mask);
+ reg_set_srange64(reg,
+ max_t(s64, reg_smin(reg),
+ reg->var_off.value | (reg->var_off.mask & S64_MIN)),
+ min_t(s64, reg_smax(reg),
+ reg->var_off.value | (reg->var_off.mask & S64_MAX)));
+ reg_set_urange64(reg,
+ max(reg_umin(reg), reg->var_off.value),
+ min(reg_umax(reg),
+ reg->var_off.value | reg->var_off.mask));
/* Check if u64 and tnum overlap in a single value */
- tnum_next = tnum_step(reg->var_off, reg->umin_value);
- umin_in_tnum = (reg->umin_value & ~reg->var_off.mask) == reg->var_off.value;
+ tnum_next = tnum_step(reg->var_off, reg_umin(reg));
+ umin_in_tnum = (reg_umin(reg) & ~reg->var_off.mask) == reg->var_off.value;
tmax = reg->var_off.value | reg->var_off.mask;
- if (umin_in_tnum && tnum_next > reg->umax_value) {
+ if (umin_in_tnum && tnum_next > reg_umax(reg)) {
/* The u64 range and the tnum only overlap in umin.
* u64: ---[xxxxxx]-----
* tnum: --xx----------x-
*/
- ___mark_reg_known(reg, reg->umin_value);
+ ___mark_reg_known(reg, reg_umin(reg));
} else if (!umin_in_tnum && tnum_next == tmax) {
/* The u64 range and the tnum only overlap in the maximum value
* represented by the tnum, called tmax.
@@ -1980,8 +1965,8 @@ static void __update_reg64_bounds(struct bpf_reg_state *reg)
* tnum: xx-----x--------
*/
___mark_reg_known(reg, tmax);
- } else if (!umin_in_tnum && tnum_next <= reg->umax_value &&
- tnum_step(reg->var_off, tnum_next) > reg->umax_value) {
+ } else if (!umin_in_tnum && tnum_next <= reg_umax(reg) &&
+ tnum_step(reg->var_off, tnum_next) > reg_umax(reg)) {
/* The u64 range and the tnum only overlap in between umin
* (excluded) and umax.
* u64: ---[xxxxxx]-----
@@ -2021,28 +2006,32 @@ static void deduce_bounds_32_from_64(struct bpf_reg_state *reg)
*
* So we use all these insights to derive bounds for subregisters here.
*/
- if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) {
+ if ((reg_umin(reg) >> 32) == (reg_umax(reg) >> 32)) {
/* u64 to u32 casting preserves validity of low 32 bits as
* a range, if upper 32 bits are the same
*/
- reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value);
- reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value);
+ reg_set_urange32(reg,
+ max_t(u32, reg_u32_min(reg), (u32)reg_umin(reg)),
+ min_t(u32, reg_u32_max(reg), (u32)reg_umax(reg)));
- if ((s32)reg->umin_value <= (s32)reg->umax_value) {
- reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value);
- reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value);
+ if ((s32)reg_umin(reg) <= (s32)reg_umax(reg)) {
+ reg_set_srange32(reg,
+ max_t(s32, reg_s32_min(reg), (s32)reg_umin(reg)),
+ min_t(s32, reg_s32_max(reg), (s32)reg_umax(reg)));
}
}
- if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) {
+ if ((reg_smin(reg) >> 32) == (reg_smax(reg) >> 32)) {
/* low 32 bits should form a proper u32 range */
- if ((u32)reg->smin_value <= (u32)reg->smax_value) {
- reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value);
- reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value);
+ if ((u32)reg_smin(reg) <= (u32)reg_smax(reg)) {
+ reg_set_urange32(reg,
+ max_t(u32, reg_u32_min(reg), (u32)reg_smin(reg)),
+ min_t(u32, reg_u32_max(reg), (u32)reg_smax(reg)));
}
/* low 32 bits should form a proper s32 range */
- if ((s32)reg->smin_value <= (s32)reg->smax_value) {
- reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value);
- reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value);
+ if ((s32)reg_smin(reg) <= (s32)reg_smax(reg)) {
+ reg_set_srange32(reg,
+ max_t(s32, reg_s32_min(reg), (s32)reg_smin(reg)),
+ min_t(s32, reg_s32_max(reg), (s32)reg_smax(reg)));
}
}
/* Special case where upper bits form a small sequence of two
@@ -2058,15 +2047,17 @@ static void deduce_bounds_32_from_64(struct bpf_reg_state *reg)
* [0xfffffff0fffffff0; 0xfffffff100000010], forms a valid s32 range
* [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister.
*/
- if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) &&
- (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) {
- reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value);
- reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value);
+ if ((u32)(reg_umin(reg) >> 32) + 1 == (u32)(reg_umax(reg) >> 32) &&
+ (s32)reg_umin(reg) < 0 && (s32)reg_umax(reg) >= 0) {
+ reg_set_srange32(reg,
+ max_t(s32, reg_s32_min(reg), (s32)reg_umin(reg)),
+ min_t(s32, reg_s32_max(reg), (s32)reg_umax(reg)));
}
- if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) &&
- (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) {
- reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value);
- reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value);
+ if ((u32)(reg_smin(reg) >> 32) + 1 == (u32)(reg_smax(reg) >> 32) &&
+ (s32)reg_smin(reg) < 0 && (s32)reg_smax(reg) >= 0) {
+ reg_set_srange32(reg,
+ max_t(s32, reg_s32_min(reg), (s32)reg_smin(reg)),
+ min_t(s32, reg_s32_max(reg), (s32)reg_smax(reg)));
}
}
@@ -2075,19 +2066,21 @@ static void deduce_bounds_32_from_32(struct bpf_reg_state *reg)
/* if u32 range forms a valid s32 range (due to matching sign bit),
* try to learn from that
*/
- if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) {
- reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value);
- reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value);
+ if ((s32)reg_u32_min(reg) <= (s32)reg_u32_max(reg)) {
+ reg_set_srange32(reg,
+ max_t(s32, reg_s32_min(reg), reg_u32_min(reg)),
+ min_t(s32, reg_s32_max(reg), reg_u32_max(reg)));
}
/* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
- if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
- reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
- reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
+ if ((u32)reg_s32_min(reg) <= (u32)reg_s32_max(reg)) {
+ reg_set_urange32(reg,
+ max_t(u32, reg_s32_min(reg), reg_u32_min(reg)),
+ min_t(u32, reg_s32_max(reg), reg_u32_max(reg)));
} else {
- if (reg->u32_max_value < (u32)reg->s32_min_value) {
+ if (reg_u32_max(reg) < (u32)reg_s32_min(reg)) {
/* See __reg64_deduce_bounds() for detailed explanation.
* Refine ranges in the following situation:
*
@@ -2097,9 +2090,11 @@ static void deduce_bounds_32_from_32(struct bpf_reg_state *reg)
* |xxxxx s32 range xxxxxxxxx] [xxxxxxx|
* 0 S32_MAX S32_MIN -1
*/
- reg->s32_min_value = (s32)reg->u32_min_value;
- reg->u32_max_value = min_t(u32, reg->u32_max_value, reg->s32_max_value);
- } else if ((u32)reg->s32_max_value < reg->u32_min_value) {
+ reg_set_srange32(reg, (s32)reg_u32_min(reg), reg_s32_max(reg));
+ reg_set_urange32(reg,
+ reg_u32_min(reg),
+ min_t(u32, reg_u32_max(reg), reg_s32_max(reg)));
+ } else if ((u32)reg_s32_max(reg) < reg_u32_min(reg)) {
/*
* 0 U32_MAX
* | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] |
@@ -2107,8 +2102,10 @@ static void deduce_bounds_32_from_32(struct bpf_reg_state *reg)
* |xxxxxxxxx] [xxxxxxxxxxxx s32 range |
* 0 S32_MAX S32_MIN -1
*/
- reg->s32_max_value = (s32)reg->u32_max_value;
- reg->u32_min_value = max_t(u32, reg->u32_min_value, reg->s32_min_value);
+ reg_set_srange32(reg, reg_s32_min(reg), (s32)reg_u32_max(reg));
+ reg_set_urange32(reg,
+ max_t(u32, reg_u32_min(reg), reg_s32_min(reg)),
+ reg_u32_max(reg));
}
}
}
@@ -2182,17 +2179,19 @@ static void deduce_bounds_64_from_64(struct bpf_reg_state *reg)
* casting umin/umax as smin/smax and checking if they form valid
* range, and vice versa. Those are equivalent checks.
*/
- if ((s64)reg->umin_value <= (s64)reg->umax_value) {
- reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value);
- reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value);
+ if ((s64)reg_umin(reg) <= (s64)reg_umax(reg)) {
+ reg_set_srange64(reg,
+ max_t(s64, reg_smin(reg), reg_umin(reg)),
+ min_t(s64, reg_smax(reg), reg_umax(reg)));
}
/* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
- if ((u64)reg->smin_value <= (u64)reg->smax_value) {
- reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
- reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
+ if ((u64)reg_smin(reg) <= (u64)reg_smax(reg)) {
+ reg_set_urange64(reg,
+ max_t(u64, reg_smin(reg), reg_umin(reg)),
+ min_t(u64, reg_smax(reg), reg_umax(reg)));
} else {
/* If the s64 range crosses the sign boundary, then it's split
* between the beginning and end of the U64 domain. In that
@@ -2229,10 +2228,10 @@ static void deduce_bounds_64_from_64(struct bpf_reg_state *reg)
* The first condition below corresponds to the first diagram
* above.
*/
- if (reg->umax_value < (u64)reg->smin_value) {
- reg->smin_value = (s64)reg->umin_value;
- reg->umax_value = min_t(u64, reg->umax_value, reg->smax_value);
- } else if ((u64)reg->smax_value < reg->umin_value) {
+ if (reg_umax(reg) < (u64)reg_smin(reg)) {
+ reg_set_srange64(reg, (s64)reg_umin(reg), reg_smax(reg));
+ reg_set_urange64(reg, reg_umin(reg), min_t(u64, reg_umax(reg), reg_smax(reg)));
+ } else if ((u64)reg_smax(reg) < reg_umin(reg)) {
/* This second condition considers the case where the u64 range
* overlaps with the negative portion of the s64 range:
*
@@ -2242,8 +2241,8 @@ static void deduce_bounds_64_from_64(struct bpf_reg_state *reg)
* |xxxxxxxxx] [xxxxxxxxxxxx s64 range |
* 0 S64_MAX S64_MIN -1
*/
- reg->smax_value = (s64)reg->umax_value;
- reg->umin_value = max_t(u64, reg->umin_value, reg->smin_value);
+ reg_set_srange64(reg, reg_smin(reg), (s64)reg_umax(reg));
+ reg_set_urange64(reg, max_t(u64, reg_umin(reg), reg_smin(reg)), reg_umax(reg));
}
}
}
@@ -2266,15 +2265,17 @@ static void deduce_bounds_64_from_32(struct bpf_reg_state *reg)
__s64 new_smin, new_smax;
/* u32 -> u64 tightening, it's always well-formed */
- new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value;
- new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value;
- reg->umin_value = max_t(u64, reg->umin_value, new_umin);
- reg->umax_value = min_t(u64, reg->umax_value, new_umax);
+ new_umin = (reg_umin(reg) & ~0xffffffffULL) | reg_u32_min(reg);
+ new_umax = (reg_umax(reg) & ~0xffffffffULL) | reg_u32_max(reg);
+ reg_set_urange64(reg,
+ max_t(u64, reg_umin(reg), new_umin),
+ min_t(u64, reg_umax(reg), new_umax));
/* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */
- new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value;
- new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value;
- reg->smin_value = max_t(s64, reg->smin_value, new_smin);
- reg->smax_value = min_t(s64, reg->smax_value, new_smax);
+ new_smin = (reg_smin(reg) & ~0xffffffffULL) | reg_u32_min(reg);
+ new_smax = (reg_smax(reg) & ~0xffffffffULL) | reg_u32_max(reg);
+ reg_set_srange64(reg,
+ max_t(s64, reg_smin(reg), new_smin),
+ min_t(s64, reg_smax(reg), new_smax));
/* Here we would like to handle a special case after sign extending load,
* when upper bits for a 64-bit range are all 1s or all 0s.
@@ -2305,13 +2306,11 @@ static void deduce_bounds_64_from_32(struct bpf_reg_state *reg)
* - 0x0000_0000_7fff_ffff == (s64)S32_MAX
* These relations are used in the conditions below.
*/
- if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) {
- reg->smin_value = reg->s32_min_value;
- reg->smax_value = reg->s32_max_value;
- reg->umin_value = reg->s32_min_value;
- reg->umax_value = reg->s32_max_value;
+ if (reg_s32_min(reg) >= 0 && reg_smin(reg) >= S32_MIN && reg_smax(reg) <= S32_MAX) {
+ reg_set_srange64(reg, reg_s32_min(reg), reg_s32_max(reg));
+ reg_set_urange64(reg, reg_s32_min(reg), reg_s32_max(reg));
reg->var_off = tnum_intersect(reg->var_off,
- tnum_range(reg->smin_value, reg->smax_value));
+ tnum_range(reg_smin(reg), reg_smax(reg)));
}
}
@@ -2327,11 +2326,11 @@ static void __reg_deduce_bounds(struct bpf_reg_state *reg)
static void __reg_bound_offset(struct bpf_reg_state *reg)
{
struct tnum var64_off = tnum_intersect(reg->var_off,
- tnum_range(reg->umin_value,
- reg->umax_value));
+ tnum_range(reg_umin(reg),
+ reg_umax(reg)));
struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
- tnum_range(reg->u32_min_value,
- reg->u32_max_value));
+ tnum_range(reg_u32_min(reg),
+ reg_u32_max(reg)));
reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
}
@@ -2359,9 +2358,9 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
static bool range_bounds_violation(struct bpf_reg_state *reg)
{
- return (reg->umin_value > reg->umax_value || reg->smin_value > reg->smax_value ||
- reg->u32_min_value > reg->u32_max_value ||
- reg->s32_min_value > reg->s32_max_value);
+ return (reg_umin(reg) > reg_umax(reg) || reg_smin(reg) > reg_smax(reg) ||
+ reg_u32_min(reg) > reg_u32_max(reg) ||
+ reg_s32_min(reg) > reg_s32_max(reg));
}
static bool const_tnum_range_mismatch(struct bpf_reg_state *reg)
@@ -2372,8 +2371,8 @@ static bool const_tnum_range_mismatch(struct bpf_reg_state *reg)
if (!tnum_is_const(reg->var_off))
return false;
- return reg->umin_value != uval || reg->umax_value != uval ||
- reg->smin_value != sval || reg->smax_value != sval;
+ return reg_umin(reg) != uval || reg_umax(reg) != uval ||
+ reg_smin(reg) != sval || reg_smax(reg) != sval;
}
static bool const_tnum_range_mismatch_32(struct bpf_reg_state *reg)
@@ -2384,8 +2383,8 @@ static bool const_tnum_range_mismatch_32(struct bpf_reg_state *reg)
if (!tnum_subreg_is_const(reg->var_off))
return false;
- return reg->u32_min_value != uval32 || reg->u32_max_value != uval32 ||
- reg->s32_min_value != sval32 || reg->s32_max_value != sval32;
+ return reg_u32_min(reg) != uval32 || reg_u32_max(reg) != uval32 ||
+ reg_s32_min(reg) != sval32 || reg_s32_max(reg) != sval32;
}
static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
@@ -2412,10 +2411,10 @@ static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
out:
verifier_bug(env, "REG INVARIANTS VIOLATION (%s): %s u64=[%#llx, %#llx] "
"s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)",
- ctx, msg, reg->umin_value, reg->umax_value,
- reg->smin_value, reg->smax_value,
- reg->u32_min_value, reg->u32_max_value,
- reg->s32_min_value, reg->s32_max_value,
+ ctx, msg, reg_umin(reg), reg_umax(reg),
+ reg_smin(reg), reg_smax(reg),
+ reg_u32_min(reg), reg_u32_max(reg),
+ reg_s32_min(reg), reg_s32_max(reg),
reg->var_off.value, reg->var_off.mask);
if (env->test_reg_invariants)
return -EFAULT;
@@ -2430,21 +2429,17 @@ static bool __reg32_bound_s64(s32 a)
static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
{
- reg->umin_value = reg->u32_min_value;
- reg->umax_value = reg->u32_max_value;
+ reg_set_urange64(reg, reg_u32_min(reg), reg_u32_max(reg));
/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
* be positive otherwise set to worse case bounds and refine later
* from tnum.
*/
- if (__reg32_bound_s64(reg->s32_min_value) &&
- __reg32_bound_s64(reg->s32_max_value)) {
- reg->smin_value = reg->s32_min_value;
- reg->smax_value = reg->s32_max_value;
- } else {
- reg->smin_value = 0;
- reg->smax_value = U32_MAX;
- }
+ if (__reg32_bound_s64(reg_s32_min(reg)) &&
+ __reg32_bound_s64(reg_s32_max(reg)))
+ reg_set_srange64(reg, reg_s32_min(reg), reg_s32_max(reg));
+ else
+ reg_set_srange64(reg, 0, U32_MAX);
}
/* Mark a register as having a completely unknown (scalar) value. */
@@ -2488,11 +2483,12 @@ static int __mark_reg_s32_range(struct bpf_verifier_env *env,
{
struct bpf_reg_state *reg = regs + regno;
- reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min);
- reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max);
-
- reg->smin_value = max_t(s64, reg->smin_value, s32_min);
- reg->smax_value = min_t(s64, reg->smax_value, s32_max);
+ reg_set_srange32(reg,
+ max_t(s32, reg_s32_min(reg), s32_min),
+ min_t(s32, reg_s32_max(reg), s32_max));
+ reg_set_srange64(reg,
+ max_t(s64, reg_smin(reg), s32_min),
+ min_t(s64, reg_smax(reg), s32_max));
reg_bounds_sync(reg);
@@ -3755,7 +3751,7 @@ static bool is_bpf_st_mem(struct bpf_insn *insn)
static int get_reg_width(struct bpf_reg_state *reg)
{
- return fls64(reg->umax_value);
+ return fls64(reg_umax(reg));
}
/* See comment for mark_fastcall_pattern_for_call() */
@@ -3945,8 +3941,8 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
cur = env->cur_state->frame[env->cur_state->curframe];
ptr_reg = &cur->regs[ptr_regno];
- min_off = ptr_reg->smin_value + off;
- max_off = ptr_reg->smax_value + off + size;
+ min_off = reg_smin(ptr_reg) + off;
+ max_off = reg_smax(ptr_reg) + off + size;
if (value_regno >= 0)
value_reg = &cur->regs[value_regno];
if ((value_reg && bpf_register_is_null(value_reg)) ||
@@ -4281,8 +4277,8 @@ static int check_stack_read_var_off(struct bpf_verifier_env *env,
if (err)
return err;
- min_off = reg->smin_value + off;
- max_off = reg->smax_value + off;
+ min_off = reg_smin(reg) + off;
+ max_off = reg_smax(reg) + off;
mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off);
return 0;
@@ -4385,13 +4381,13 @@ static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
verbose(env, "write into map forbidden, value_size=%d off=%lld size=%d\n",
- map->value_size, reg->smin_value + off, size);
+ map->value_size, reg_smin(reg) + off, size);
return -EACCES;
}
if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
verbose(env, "read from map forbidden, value_size=%d off=%lld size=%d\n",
- map->value_size, reg->smin_value + off, size);
+ map->value_size, reg_smin(reg) + off, size);
return -EACCES;
}
@@ -4458,15 +4454,15 @@ static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
* index'es we need to make sure that whatever we use
* will have a set floor within our range.
*/
- if (reg->smin_value < 0 &&
- (reg->smin_value == S64_MIN ||
- (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
- reg->smin_value + off < 0)) {
+ if (reg_smin(reg) < 0 &&
+ (reg_smin(reg) == S64_MIN ||
+ (off + reg_smin(reg) != (s64)(s32)(off + reg_smin(reg))) ||
+ reg_smin(reg) + off < 0)) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
}
- err = __check_mem_access(env, regno, reg->smin_value + off, size,
+ err = __check_mem_access(env, regno, reg_smin(reg) + off, size,
mem_size, zero_size_allowed);
if (err) {
verbose(env, "R%d min value is outside of the allowed memory range\n",
@@ -4476,14 +4472,14 @@ static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
/* If we haven't set a max value then we need to bail since we can't be
* sure we won't do bad things.
- * If reg->umax_value + off could overflow, treat that as unbounded too.
+ * If reg_umax(reg) + off could overflow, treat that as unbounded too.
*/
- if (reg->umax_value >= BPF_MAX_VAR_OFF) {
+ if (reg_umax(reg) >= BPF_MAX_VAR_OFF) {
verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
regno);
return -EACCES;
}
- err = __check_mem_access(env, regno, reg->umax_value + off, size,
+ err = __check_mem_access(env, regno, reg_umax(reg) + off, size,
mem_size, zero_size_allowed);
if (err) {
verbose(env, "R%d max value is outside of the allowed memory range\n",
@@ -4511,7 +4507,7 @@ static int __check_ptr_off_reg(struct bpf_verifier_env *env,
return -EACCES;
}
- if (reg->smin_value < 0) {
+ if (reg_smin(reg) < 0) {
verbose(env, "negative offset %s ptr R%d off=%lld disallowed\n",
reg_type_str(env, reg->type), regno, reg->var_off.value);
return -EACCES;
@@ -4814,8 +4810,8 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
* this program. To check that [x1, x2) overlaps with [y1, y2),
* it is sufficient to check x1 < y2 && y1 < x2.
*/
- if (reg->smin_value + off < p + field->size &&
- p < reg->umax_value + off + size) {
+ if (reg_smin(reg) + off < p + field->size &&
+ p < reg_umax(reg) + off + size) {
switch (field->type) {
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
@@ -4911,14 +4907,14 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
return err;
/* __check_mem_access has made sure "off + size - 1" is within u16.
- * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
+ * reg_umax(reg) can't be bigger than MAX_PACKET_OFF which is 0xffff,
* otherwise find_good_pkt_pointers would have refused to set range info
* that __check_mem_access would have rejected this pkt access.
- * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
+ * Therefore, "off + reg_umax(reg) + size - 1" won't overflow u32.
*/
env->prog->aux->max_pkt_offset =
max_t(u32, env->prog->aux->max_pkt_offset,
- off + reg->umax_value + size - 1);
+ off + reg_umax(reg) + size - 1);
return 0;
}
@@ -4981,7 +4977,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
err = __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
if (err)
return err;
- off += reg->umax_value;
+ off += reg_umax(reg);
err = __check_ctx_access(env, insn_idx, off, access_size, t, info);
if (err)
@@ -5009,7 +5005,7 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
struct bpf_insn_access_aux info = {};
bool valid;
- if (reg->smin_value < 0) {
+ if (reg_smin(reg) < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
@@ -5624,15 +5620,12 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
/* fix arithmetic bounds */
mask = ((u64)1 << (size * 8)) - 1;
- if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
- reg->umin_value &= mask;
- reg->umax_value &= mask;
+ if ((reg_umin(reg) & ~mask) == (reg_umax(reg) & ~mask)) {
+ reg_set_urange64(reg, reg_umin(reg) & mask, reg_umax(reg) & mask);
} else {
- reg->umin_value = 0;
- reg->umax_value = mask;
+ reg_set_urange64(reg, 0, mask);
}
- reg->smin_value = reg->umin_value;
- reg->smax_value = reg->umax_value;
+ reg_set_srange64(reg, reg_umin(reg), reg_umax(reg));
/* If size is smaller than 32bit register the 32bit register
* values are also truncated so we push 64-bit bounds into
@@ -5647,19 +5640,18 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
{
if (size == 1) {
- reg->smin_value = reg->s32_min_value = S8_MIN;
- reg->smax_value = reg->s32_max_value = S8_MAX;
+ reg_set_srange64(reg, S8_MIN, S8_MAX);
+ reg_set_srange32(reg, S8_MIN, S8_MAX);
} else if (size == 2) {
- reg->smin_value = reg->s32_min_value = S16_MIN;
- reg->smax_value = reg->s32_max_value = S16_MAX;
+ reg_set_srange64(reg, S16_MIN, S16_MAX);
+ reg_set_srange32(reg, S16_MIN, S16_MAX);
} else {
/* size == 4 */
- reg->smin_value = reg->s32_min_value = S32_MIN;
- reg->smax_value = reg->s32_max_value = S32_MAX;
+ reg_set_srange64(reg, S32_MIN, S32_MAX);
+ reg_set_srange32(reg, S32_MIN, S32_MAX);
}
- reg->umin_value = reg->u32_min_value = 0;
- reg->umax_value = U64_MAX;
- reg->u32_max_value = U32_MAX;
+ reg_set_urange64(reg, 0, U64_MAX);
+ reg_set_urange32(reg, 0, U32_MAX);
reg->var_off = tnum_unknown;
}
@@ -5680,29 +5672,29 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
reg->var_off = tnum_const((s32)u64_cval);
u64_cval = reg->var_off.value;
- reg->smax_value = reg->smin_value = u64_cval;
- reg->umax_value = reg->umin_value = u64_cval;
- reg->s32_max_value = reg->s32_min_value = u64_cval;
- reg->u32_max_value = reg->u32_min_value = u64_cval;
+ reg_set_srange64(reg, u64_cval, u64_cval);
+ reg_set_urange64(reg, u64_cval, u64_cval);
+ reg_set_srange32(reg, u64_cval, u64_cval);
+ reg_set_urange32(reg, u64_cval, u64_cval);
return;
}
- top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
- top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
+ top_smax_value = ((u64)reg_smax(reg) >> num_bits) << num_bits;
+ top_smin_value = ((u64)reg_smin(reg) >> num_bits) << num_bits;
if (top_smax_value != top_smin_value)
goto out;
/* find the s64_min and s64_min after sign extension */
if (size == 1) {
- init_s64_max = (s8)reg->smax_value;
- init_s64_min = (s8)reg->smin_value;
+ init_s64_max = (s8)reg_smax(reg);
+ init_s64_min = (s8)reg_smin(reg);
} else if (size == 2) {
- init_s64_max = (s16)reg->smax_value;
- init_s64_min = (s16)reg->smin_value;
+ init_s64_max = (s16)reg_smax(reg);
+ init_s64_min = (s16)reg_smin(reg);
} else {
- init_s64_max = (s32)reg->smax_value;
- init_s64_min = (s32)reg->smin_value;
+ init_s64_max = (s32)reg_smax(reg);
+ init_s64_min = (s32)reg_smin(reg);
}
s64_max = max(init_s64_max, init_s64_min);
@@ -5710,10 +5702,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
/* both of s64_max/s64_min positive or negative */
if ((s64_max >= 0) == (s64_min >= 0)) {
- reg->s32_min_value = reg->smin_value = s64_min;
- reg->s32_max_value = reg->smax_value = s64_max;
- reg->u32_min_value = reg->umin_value = s64_min;
- reg->u32_max_value = reg->umax_value = s64_max;
+ reg_set_srange64(reg, s64_min, s64_max);
+ reg_set_urange64(reg, s64_min, s64_max);
+ reg_set_srange32(reg, s64_min, s64_max);
+ reg_set_urange32(reg, s64_min, s64_max);
reg->var_off = tnum_range(s64_min, s64_max);
return;
}
@@ -5724,16 +5716,12 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
{
- if (size == 1) {
- reg->s32_min_value = S8_MIN;
- reg->s32_max_value = S8_MAX;
- } else {
+ if (size == 1)
+ reg_set_srange32(reg, S8_MIN, S8_MAX);
+ else
/* size == 2 */
- reg->s32_min_value = S16_MIN;
- reg->s32_max_value = S16_MAX;
- }
- reg->u32_min_value = 0;
- reg->u32_max_value = U32_MAX;
+ reg_set_srange32(reg, S16_MIN, S16_MAX);
+ reg_set_urange32(reg, 0, U32_MAX);
reg->var_off = tnum_subreg(tnum_unknown);
}
@@ -5751,34 +5739,32 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
reg->var_off = tnum_const((s16)u32_val);
u32_val = reg->var_off.value;
- reg->s32_min_value = reg->s32_max_value = u32_val;
- reg->u32_min_value = reg->u32_max_value = u32_val;
+ reg_set_srange32(reg, u32_val, u32_val);
+ reg_set_urange32(reg, u32_val, u32_val);
return;
}
- top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits;
- top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits;
+ top_smax_value = ((u32)reg_s32_max(reg) >> num_bits) << num_bits;
+ top_smin_value = ((u32)reg_s32_min(reg) >> num_bits) << num_bits;
if (top_smax_value != top_smin_value)
goto out;
/* find the s32_min and s32_min after sign extension */
if (size == 1) {
- init_s32_max = (s8)reg->s32_max_value;
- init_s32_min = (s8)reg->s32_min_value;
+ init_s32_max = (s8)reg_s32_max(reg);
+ init_s32_min = (s8)reg_s32_min(reg);
} else {
/* size == 2 */
- init_s32_max = (s16)reg->s32_max_value;
- init_s32_min = (s16)reg->s32_min_value;
+ init_s32_max = (s16)reg_s32_max(reg);
+ init_s32_min = (s16)reg_s32_min(reg);
}
s32_max = max(init_s32_max, init_s32_min);
s32_min = min(init_s32_max, init_s32_min);
if ((s32_min >= 0) == (s32_max >= 0)) {
- reg->s32_min_value = s32_min;
- reg->s32_max_value = s32_max;
- reg->u32_min_value = (u32)s32_min;
- reg->u32_max_value = (u32)s32_max;
+ reg_set_srange32(reg, s32_min, s32_max);
+ reg_set_urange32(reg, (u32)s32_min, (u32)s32_max);
reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max));
return;
}
@@ -6238,14 +6224,14 @@ static int check_stack_access_within_bounds(
min_off = (s64)reg->var_off.value + off;
max_off = min_off + access_size;
} else {
- if (reg->smax_value >= BPF_MAX_VAR_OFF ||
- reg->smin_value <= -BPF_MAX_VAR_OFF) {
+ if (reg_smax(reg) >= BPF_MAX_VAR_OFF ||
+ reg_smin(reg) <= -BPF_MAX_VAR_OFF) {
verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
err_extra, regno);
return -EACCES;
}
- min_off = reg->smin_value + off;
- max_off = reg->smax_value + off + access_size;
+ min_off = reg_smin(reg) + off;
+ max_off = reg_smax(reg) + off + access_size;
}
err = check_stack_slot_within_bounds(env, min_off, state, type);
@@ -6861,8 +6847,8 @@ static int check_stack_range_initialized(
if (meta && meta->raw_mode)
meta = NULL;
- min_off = reg->smin_value + off;
- max_off = reg->smax_value + off;
+ min_off = reg_smin(reg) + off;
+ max_off = reg_smax(reg) + off;
}
if (meta && meta->raw_mode) {
@@ -7018,8 +7004,8 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
zero_size_allowed);
if (err)
return err;
- if (env->prog->aux->max_ctx_offset < reg->umax_value + access_size)
- env->prog->aux->max_ctx_offset = reg->umax_value + access_size;
+ if (env->prog->aux->max_ctx_offset < reg_umax(reg) + access_size)
+ env->prog->aux->max_ctx_offset = reg_umax(reg) + access_size;
return 0;
}
fallthrough;
@@ -7058,7 +7044,7 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
* out. Only upper bounds can be learned because retval is an
* int type and negative retvals are allowed.
*/
- meta->msize_max_value = reg->umax_value;
+ meta->msize_max_value = reg_umax(reg);
/* The register is SCALAR_VALUE; the access check happens using
* its boundaries. For unprivileged variable accesses, disable
@@ -7068,24 +7054,24 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
if (!tnum_is_const(reg->var_off))
meta = NULL;
- if (reg->smin_value < 0) {
+ if (reg_smin(reg) < 0) {
verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
regno);
return -EACCES;
}
- if (reg->umin_value == 0 && !zero_size_allowed) {
+ if (reg_umin(reg) == 0 && !zero_size_allowed) {
verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n",
- regno, reg->umin_value, reg->umax_value);
+ regno, reg_umin(reg), reg_umax(reg));
return -EACCES;
}
- if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
+ if (reg_umax(reg) >= BPF_MAX_VAR_SIZ) {
verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
regno);
return -EACCES;
}
- err = check_helper_mem_access(env, regno - 1, reg->umax_value,
+ err = check_helper_mem_access(env, regno - 1, reg_umax(reg),
access_type, zero_size_allowed, meta);
if (!err)
err = mark_chain_precision(env, regno);
@@ -9810,9 +9796,9 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
{
if (range.return_32bit)
- return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval;
+ return range.minval <= reg_s32_min(reg) && reg_s32_max(reg) <= range.maxval;
else
- return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
+ return range.minval <= reg_smin(reg) && reg_smax(reg) <= range.maxval;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
@@ -9921,21 +9907,15 @@ static int do_refine_retval_range(struct bpf_verifier_env *env,
case BPF_FUNC_probe_read_str:
case BPF_FUNC_probe_read_kernel_str:
case BPF_FUNC_probe_read_user_str:
- ret_reg->smax_value = meta->msize_max_value;
- ret_reg->s32_max_value = meta->msize_max_value;
- ret_reg->smin_value = -MAX_ERRNO;
- ret_reg->s32_min_value = -MAX_ERRNO;
+ reg_set_srange64(ret_reg, -MAX_ERRNO, meta->msize_max_value);
+ reg_set_srange32(ret_reg, -MAX_ERRNO, meta->msize_max_value);
reg_bounds_sync(ret_reg);
break;
case BPF_FUNC_get_smp_processor_id:
- ret_reg->umax_value = nr_cpu_ids - 1;
- ret_reg->u32_max_value = nr_cpu_ids - 1;
- ret_reg->smax_value = nr_cpu_ids - 1;
- ret_reg->s32_max_value = nr_cpu_ids - 1;
- ret_reg->umin_value = 0;
- ret_reg->u32_min_value = 0;
- ret_reg->smin_value = 0;
- ret_reg->s32_min_value = 0;
+ reg_set_urange64(ret_reg, 0, nr_cpu_ids - 1);
+ reg_set_urange32(ret_reg, 0, nr_cpu_ids - 1);
+ reg_set_srange64(ret_reg, 0, nr_cpu_ids - 1);
+ reg_set_srange32(ret_reg, 0, nr_cpu_ids - 1);
reg_bounds_sync(ret_reg);
break;
}
@@ -10400,7 +10380,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
err = mark_chain_precision(env, BPF_REG_1);
if (err)
return err;
- if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) {
+ if (cur_func(env)->callback_depth < reg_umax(®s[BPF_REG_1])) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_loop_callback_state);
} else {
@@ -13330,7 +13310,7 @@ static bool check_reg_sane_offset_scalar(struct bpf_verifier_env *env,
{
bool known = tnum_is_const(reg->var_off);
s64 val = reg->var_off.value;
- s64 smin = reg->smin_value;
+ s64 smin = reg_smin(reg);
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "math between %s pointer and %lld is not allowed\n",
@@ -13359,7 +13339,7 @@ static bool check_reg_sane_offset_ptr(struct bpf_verifier_env *env,
{
bool known = tnum_is_const(reg->var_off);
s64 val = reg->var_off.value;
- s64 smin = reg->smin_value;
+ s64 smin = reg_smin(reg);
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "%s pointer offset %lld is not allowed\n",
@@ -13401,7 +13381,7 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
break;
case PTR_TO_MAP_VALUE:
max = ptr_reg->map_ptr->value_size;
- ptr_limit = mask_to_left ? ptr_reg->smin_value : ptr_reg->umax_value;
+ ptr_limit = mask_to_left ? reg_smin(ptr_reg) : reg_umax(ptr_reg);
break;
default:
return REASON_TYPE;
@@ -13490,7 +13470,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
struct bpf_verifier_state *vstate = env->cur_state;
bool off_is_imm = tnum_is_const(off_reg->var_off);
- bool off_is_neg = off_reg->smin_value < 0;
+ bool off_is_neg = reg_smin(off_reg) < 0;
bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
@@ -13509,7 +13489,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
if (!commit_window) {
if (!tnum_is_const(off_reg->var_off) &&
- (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+ (reg_smin(off_reg) < 0) != (reg_smax(off_reg) < 0))
return REASON_BOUNDS;
info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
@@ -13703,10 +13683,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
- s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
- smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
- u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
- umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+ s64 smin_val = reg_smin(off_reg), smax_val = reg_smax(off_reg),
+ smin_ptr = reg_smin(ptr_reg), smax_ptr = reg_smax(ptr_reg);
+ u64 umin_val = reg_umin(off_reg), umax_val = reg_umax(off_reg),
+ umin_ptr = reg_umin(ptr_reg), umax_ptr = reg_umax(ptr_reg);
struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
@@ -13808,15 +13788,22 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
* added into the variable offset, and we copy the fixed offset
* from ptr_reg.
*/
- if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) ||
- check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) {
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
+ {
+ s64 smin_res, smax_res;
+ u64 umin_res, umax_res;
+
+ if (check_add_overflow(smin_ptr, smin_val, &smin_res) ||
+ check_add_overflow(smax_ptr, smax_val, &smax_res)) {
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
+ } else {
+ reg_set_srange64(dst_reg, smin_res, smax_res);
+ }
+ if (check_add_overflow(umin_ptr, umin_val, &umin_res) ||
+ check_add_overflow(umax_ptr, umax_val, &umax_res)) {
+ reg_set_urange64(dst_reg, 0, U64_MAX);
+ } else {
+ reg_set_urange64(dst_reg, umin_res, umax_res);
}
- if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) ||
- check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) {
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
}
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->raw = ptr_reg->raw;
@@ -13852,20 +13839,23 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
/* A new variable offset is created. If the subtrahend is known
* nonnegative, then any reg->range we had before is still good.
*/
- if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) ||
- check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) {
+ {
+ s64 smin_res, smax_res;
+
+ if (check_sub_overflow(smin_ptr, smax_val, &smin_res) ||
+ check_sub_overflow(smax_ptr, smin_val, &smax_res)) {
/* Overflow possible, we know nothing */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
+ } else {
+ reg_set_srange64(dst_reg, smin_res, smax_res);
+ }
}
if (umin_ptr < umax_val) {
/* Overflow possible, we know nothing */
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
+ reg_set_urange64(dst_reg, 0, U64_MAX);
} else {
/* Cannot overflow (as long as bounds are consistent) */
- dst_reg->umin_value = umin_ptr - umax_val;
- dst_reg->umax_value = umax_ptr - umin_val;
+ reg_set_urange64(dst_reg, umin_ptr - umax_val, umax_ptr - umin_val);
}
dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
dst_reg->raw = ptr_reg->raw;
@@ -13923,18 +13913,18 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 *dst_smin = &dst_reg->s32_min_value;
- s32 *dst_smax = &dst_reg->s32_max_value;
- u32 *dst_umin = &dst_reg->u32_min_value;
- u32 *dst_umax = &dst_reg->u32_max_value;
- u32 umin_val = src_reg->u32_min_value;
- u32 umax_val = src_reg->u32_max_value;
+ s32 smin = reg_s32_min(dst_reg);
+ s32 smax = reg_s32_max(dst_reg);
+ u32 umin = reg_u32_min(dst_reg);
+ u32 umax = reg_u32_max(dst_reg);
+ u32 umin_val = reg_u32_min(src_reg);
+ u32 umax_val = reg_u32_max(src_reg);
bool min_overflow, max_overflow;
- if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) ||
- check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) {
- *dst_smin = S32_MIN;
- *dst_smax = S32_MAX;
+ if (check_add_overflow(smin, reg_s32_min(src_reg), &smin) ||
+ check_add_overflow(smax, reg_s32_max(src_reg), &smax)) {
+ smin = S32_MIN;
+ smax = S32_MAX;
}
/* If either all additions overflow or no additions overflow, then
@@ -13942,30 +13932,33 @@ static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
* dst_umax + src_umax. Otherwise (some additions overflow), set
* the output bounds to unbounded.
*/
- min_overflow = check_add_overflow(*dst_umin, umin_val, dst_umin);
- max_overflow = check_add_overflow(*dst_umax, umax_val, dst_umax);
+ min_overflow = check_add_overflow(umin, umin_val, &umin);
+ max_overflow = check_add_overflow(umax, umax_val, &umax);
if (!min_overflow && max_overflow) {
- *dst_umin = 0;
- *dst_umax = U32_MAX;
+ umin = 0;
+ umax = U32_MAX;
}
+
+ reg_set_srange32(dst_reg, smin, smax);
+ reg_set_urange32(dst_reg, umin, umax);
}
static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 *dst_smin = &dst_reg->smin_value;
- s64 *dst_smax = &dst_reg->smax_value;
- u64 *dst_umin = &dst_reg->umin_value;
- u64 *dst_umax = &dst_reg->umax_value;
- u64 umin_val = src_reg->umin_value;
- u64 umax_val = src_reg->umax_value;
+ s64 smin = reg_smin(dst_reg);
+ s64 smax = reg_smax(dst_reg);
+ u64 umin = reg_umin(dst_reg);
+ u64 umax = reg_umax(dst_reg);
+ u64 umin_val = reg_umin(src_reg);
+ u64 umax_val = reg_umax(src_reg);
bool min_overflow, max_overflow;
- if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
- check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
- *dst_smin = S64_MIN;
- *dst_smax = S64_MAX;
+ if (check_add_overflow(smin, reg_smin(src_reg), &smin) ||
+ check_add_overflow(smax, reg_smax(src_reg), &smax)) {
+ smin = S64_MIN;
+ smax = S64_MAX;
}
/* If either all additions overflow or no additions overflow, then
@@ -13973,31 +13966,34 @@ static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
* dst_umax + src_umax. Otherwise (some additions overflow), set
* the output bounds to unbounded.
*/
- min_overflow = check_add_overflow(*dst_umin, umin_val, dst_umin);
- max_overflow = check_add_overflow(*dst_umax, umax_val, dst_umax);
+ min_overflow = check_add_overflow(umin, umin_val, &umin);
+ max_overflow = check_add_overflow(umax, umax_val, &umax);
if (!min_overflow && max_overflow) {
- *dst_umin = 0;
- *dst_umax = U64_MAX;
+ umin = 0;
+ umax = U64_MAX;
}
+
+ reg_set_srange64(dst_reg, smin, smax);
+ reg_set_urange64(dst_reg, umin, umax);
}
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 *dst_smin = &dst_reg->s32_min_value;
- s32 *dst_smax = &dst_reg->s32_max_value;
- u32 *dst_umin = &dst_reg->u32_min_value;
- u32 *dst_umax = &dst_reg->u32_max_value;
- u32 umin_val = src_reg->u32_min_value;
- u32 umax_val = src_reg->u32_max_value;
+ s32 smin = reg_s32_min(dst_reg);
+ s32 smax = reg_s32_max(dst_reg);
+ u32 umin = reg_u32_min(dst_reg);
+ u32 umax = reg_u32_max(dst_reg);
+ u32 umin_val = reg_u32_min(src_reg);
+ u32 umax_val = reg_u32_max(src_reg);
bool min_underflow, max_underflow;
- if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) ||
- check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) {
+ if (check_sub_overflow(smin, reg_s32_max(src_reg), &smin) ||
+ check_sub_overflow(smax, reg_s32_min(src_reg), &smax)) {
/* Overflow possible, we know nothing */
- *dst_smin = S32_MIN;
- *dst_smax = S32_MAX;
+ smin = S32_MIN;
+ smax = S32_MAX;
}
/* If either all subtractions underflow or no subtractions
@@ -14005,31 +14001,34 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
* dst_umax = dst_umax - src_umin. Otherwise (some subtractions
* underflow), set the output bounds to unbounded.
*/
- min_underflow = check_sub_overflow(*dst_umin, umax_val, dst_umin);
- max_underflow = check_sub_overflow(*dst_umax, umin_val, dst_umax);
+ min_underflow = check_sub_overflow(umin, umax_val, &umin);
+ max_underflow = check_sub_overflow(umax, umin_val, &umax);
if (min_underflow && !max_underflow) {
- *dst_umin = 0;
- *dst_umax = U32_MAX;
+ umin = 0;
+ umax = U32_MAX;
}
+
+ reg_set_srange32(dst_reg, smin, smax);
+ reg_set_urange32(dst_reg, umin, umax);
}
static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 *dst_smin = &dst_reg->smin_value;
- s64 *dst_smax = &dst_reg->smax_value;
- u64 *dst_umin = &dst_reg->umin_value;
- u64 *dst_umax = &dst_reg->umax_value;
- u64 umin_val = src_reg->umin_value;
- u64 umax_val = src_reg->umax_value;
+ s64 smin = reg_smin(dst_reg);
+ s64 smax = reg_smax(dst_reg);
+ u64 umin = reg_umin(dst_reg);
+ u64 umax = reg_umax(dst_reg);
+ u64 umin_val = reg_umin(src_reg);
+ u64 umax_val = reg_umax(src_reg);
bool min_underflow, max_underflow;
- if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
- check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
+ if (check_sub_overflow(smin, reg_smax(src_reg), &smin) ||
+ check_sub_overflow(smax, reg_smin(src_reg), &smax)) {
/* Overflow possible, we know nothing */
- *dst_smin = S64_MIN;
- *dst_smax = S64_MAX;
+ smin = S64_MIN;
+ smax = S64_MAX;
}
/* If either all subtractions underflow or no subtractions
@@ -14037,113 +14036,116 @@ static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
* dst_umax = dst_umax - src_umin. Otherwise (some subtractions
* underflow), set the output bounds to unbounded.
*/
- min_underflow = check_sub_overflow(*dst_umin, umax_val, dst_umin);
- max_underflow = check_sub_overflow(*dst_umax, umin_val, dst_umax);
+ min_underflow = check_sub_overflow(umin, umax_val, &umin);
+ max_underflow = check_sub_overflow(umax, umin_val, &umax);
if (min_underflow && !max_underflow) {
- *dst_umin = 0;
- *dst_umax = U64_MAX;
+ umin = 0;
+ umax = U64_MAX;
}
+
+ reg_set_srange64(dst_reg, smin, smax);
+ reg_set_urange64(dst_reg, umin, umax);
}
static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 *dst_smin = &dst_reg->s32_min_value;
- s32 *dst_smax = &dst_reg->s32_max_value;
- u32 *dst_umin = &dst_reg->u32_min_value;
- u32 *dst_umax = &dst_reg->u32_max_value;
+ s32 smin = reg_s32_min(dst_reg);
+ s32 smax = reg_s32_max(dst_reg);
+ u32 umin = reg_u32_min(dst_reg);
+ u32 umax = reg_u32_max(dst_reg);
s32 tmp_prod[4];
- if (check_mul_overflow(*dst_umax, src_reg->u32_max_value, dst_umax) ||
- check_mul_overflow(*dst_umin, src_reg->u32_min_value, dst_umin)) {
+ if (check_mul_overflow(umax, reg_u32_max(src_reg), &umax) ||
+ check_mul_overflow(umin, reg_u32_min(src_reg), &umin)) {
/* Overflow possible, we know nothing */
- *dst_umin = 0;
- *dst_umax = U32_MAX;
+ umin = 0;
+ umax = U32_MAX;
}
- if (check_mul_overflow(*dst_smin, src_reg->s32_min_value, &tmp_prod[0]) ||
- check_mul_overflow(*dst_smin, src_reg->s32_max_value, &tmp_prod[1]) ||
- check_mul_overflow(*dst_smax, src_reg->s32_min_value, &tmp_prod[2]) ||
- check_mul_overflow(*dst_smax, src_reg->s32_max_value, &tmp_prod[3])) {
+ if (check_mul_overflow(smin, reg_s32_min(src_reg), &tmp_prod[0]) ||
+ check_mul_overflow(smin, reg_s32_max(src_reg), &tmp_prod[1]) ||
+ check_mul_overflow(smax, reg_s32_min(src_reg), &tmp_prod[2]) ||
+ check_mul_overflow(smax, reg_s32_max(src_reg), &tmp_prod[3])) {
/* Overflow possible, we know nothing */
- *dst_smin = S32_MIN;
- *dst_smax = S32_MAX;
+ smin = S32_MIN;
+ smax = S32_MAX;
} else {
- *dst_smin = min_array(tmp_prod, 4);
- *dst_smax = max_array(tmp_prod, 4);
+ smin = min_array(tmp_prod, 4);
+ smax = max_array(tmp_prod, 4);
}
+
+ reg_set_srange32(dst_reg, smin, smax);
+ reg_set_urange32(dst_reg, umin, umax);
}
static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 *dst_smin = &dst_reg->smin_value;
- s64 *dst_smax = &dst_reg->smax_value;
- u64 *dst_umin = &dst_reg->umin_value;
- u64 *dst_umax = &dst_reg->umax_value;
+ s64 smin = reg_smin(dst_reg);
+ s64 smax = reg_smax(dst_reg);
+ u64 umin = reg_umin(dst_reg);
+ u64 umax = reg_umax(dst_reg);
s64 tmp_prod[4];
- if (check_mul_overflow(*dst_umax, src_reg->umax_value, dst_umax) ||
- check_mul_overflow(*dst_umin, src_reg->umin_value, dst_umin)) {
+ if (check_mul_overflow(umax, reg_umax(src_reg), &umax) ||
+ check_mul_overflow(umin, reg_umin(src_reg), &umin)) {
/* Overflow possible, we know nothing */
- *dst_umin = 0;
- *dst_umax = U64_MAX;
+ umin = 0;
+ umax = U64_MAX;
}
- if (check_mul_overflow(*dst_smin, src_reg->smin_value, &tmp_prod[0]) ||
- check_mul_overflow(*dst_smin, src_reg->smax_value, &tmp_prod[1]) ||
- check_mul_overflow(*dst_smax, src_reg->smin_value, &tmp_prod[2]) ||
- check_mul_overflow(*dst_smax, src_reg->smax_value, &tmp_prod[3])) {
+ if (check_mul_overflow(smin, reg_smin(src_reg), &tmp_prod[0]) ||
+ check_mul_overflow(smin, reg_smax(src_reg), &tmp_prod[1]) ||
+ check_mul_overflow(smax, reg_smin(src_reg), &tmp_prod[2]) ||
+ check_mul_overflow(smax, reg_smax(src_reg), &tmp_prod[3])) {
/* Overflow possible, we know nothing */
- *dst_smin = S64_MIN;
- *dst_smax = S64_MAX;
+ smin = S64_MIN;
+ smax = S64_MAX;
} else {
- *dst_smin = min_array(tmp_prod, 4);
- *dst_smax = max_array(tmp_prod, 4);
+ smin = min_array(tmp_prod, 4);
+ smax = max_array(tmp_prod, 4);
}
+
+ reg_set_srange64(dst_reg, smin, smax);
+ reg_set_urange64(dst_reg, umin, umax);
}
static void scalar32_min_max_udiv(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u32 *dst_umin = &dst_reg->u32_min_value;
- u32 *dst_umax = &dst_reg->u32_max_value;
- u32 src_val = src_reg->u32_min_value; /* non-zero, const divisor */
+ u32 src_val = reg_u32_min(src_reg); /* non-zero, const divisor */
- *dst_umin = *dst_umin / src_val;
- *dst_umax = *dst_umax / src_val;
+ reg_set_urange32(dst_reg, reg_u32_min(dst_reg) / src_val,
+ reg_u32_max(dst_reg) / src_val);
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
+ reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
reset_reg64_and_tnum(dst_reg);
}
static void scalar_min_max_udiv(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u64 *dst_umin = &dst_reg->umin_value;
- u64 *dst_umax = &dst_reg->umax_value;
- u64 src_val = src_reg->umin_value; /* non-zero, const divisor */
+ u64 src_val = reg_umin(src_reg); /* non-zero, const divisor */
- *dst_umin = div64_u64(*dst_umin, src_val);
- *dst_umax = div64_u64(*dst_umax, src_val);
+ reg_set_urange64(dst_reg, div64_u64(reg_umin(dst_reg), src_val),
+ div64_u64(reg_umax(dst_reg), src_val));
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
reset_reg32_and_tnum(dst_reg);
}
static void scalar32_min_max_sdiv(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 *dst_smin = &dst_reg->s32_min_value;
- s32 *dst_smax = &dst_reg->s32_max_value;
- s32 src_val = src_reg->s32_min_value; /* non-zero, const divisor */
+ s32 smin = reg_s32_min(dst_reg);
+ s32 smax = reg_s32_max(dst_reg);
+ s32 src_val = reg_s32_min(src_reg); /* non-zero, const divisor */
s32 res1, res2;
/* BPF div specification: S32_MIN / -1 = S32_MIN */
- if (*dst_smin == S32_MIN && src_val == -1) {
+ if (smin == S32_MIN && src_val == -1) {
/*
* If the dividend range contains more than just S32_MIN,
* we cannot precisely track the result, so it becomes unbounded.
@@ -14152,35 +14154,35 @@ static void scalar32_min_max_sdiv(struct bpf_reg_state *dst_reg,
* = {S32_MIN} U [S32_MAX-9, S32_MAX] = [S32_MIN, S32_MAX]
* Otherwise (if dividend is exactly S32_MIN), result remains S32_MIN.
*/
- if (*dst_smax != S32_MIN) {
- *dst_smin = S32_MIN;
- *dst_smax = S32_MAX;
+ if (smax != S32_MIN) {
+ smin = S32_MIN;
+ smax = S32_MAX;
}
goto reset;
}
- res1 = *dst_smin / src_val;
- res2 = *dst_smax / src_val;
- *dst_smin = min(res1, res2);
- *dst_smax = max(res1, res2);
+ res1 = smin / src_val;
+ res2 = smax / src_val;
+ smin = min(res1, res2);
+ smax = max(res1, res2);
reset:
+ reg_set_srange32(dst_reg, smin, smax);
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->u32_min_value = 0;
- dst_reg->u32_max_value = U32_MAX;
+ reg_set_urange32(dst_reg, 0, U32_MAX);
reset_reg64_and_tnum(dst_reg);
}
static void scalar_min_max_sdiv(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 *dst_smin = &dst_reg->smin_value;
- s64 *dst_smax = &dst_reg->smax_value;
- s64 src_val = src_reg->smin_value; /* non-zero, const divisor */
+ s64 smin = reg_smin(dst_reg);
+ s64 smax = reg_smax(dst_reg);
+ s64 src_val = reg_smin(src_reg); /* non-zero, const divisor */
s64 res1, res2;
/* BPF div specification: S64_MIN / -1 = S64_MIN */
- if (*dst_smin == S64_MIN && src_val == -1) {
+ if (smin == S64_MIN && src_val == -1) {
/*
* If the dividend range contains more than just S64_MIN,
* we cannot precisely track the result, so it becomes unbounded.
@@ -14189,79 +14191,69 @@ static void scalar_min_max_sdiv(struct bpf_reg_state *dst_reg,
* = {S64_MIN} U [S64_MAX-9, S64_MAX] = [S64_MIN, S64_MAX]
* Otherwise (if dividend is exactly S64_MIN), result remains S64_MIN.
*/
- if (*dst_smax != S64_MIN) {
- *dst_smin = S64_MIN;
- *dst_smax = S64_MAX;
+ if (smax != S64_MIN) {
+ smin = S64_MIN;
+ smax = S64_MAX;
}
goto reset;
}
- res1 = div64_s64(*dst_smin, src_val);
- res2 = div64_s64(*dst_smax, src_val);
- *dst_smin = min(res1, res2);
- *dst_smax = max(res1, res2);
+ res1 = div64_s64(smin, src_val);
+ res2 = div64_s64(smax, src_val);
+ smin = min(res1, res2);
+ smax = max(res1, res2);
reset:
+ reg_set_srange64(dst_reg, smin, smax);
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
+ reg_set_urange64(dst_reg, 0, U64_MAX);
reset_reg32_and_tnum(dst_reg);
}
static void scalar32_min_max_umod(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u32 *dst_umin = &dst_reg->u32_min_value;
- u32 *dst_umax = &dst_reg->u32_max_value;
- u32 src_val = src_reg->u32_min_value; /* non-zero, const divisor */
+ u32 src_val = reg_u32_min(src_reg); /* non-zero, const divisor */
u32 res_max = src_val - 1;
/*
* If dst_umax <= res_max, the result remains unchanged.
* e.g., [2, 5] % 10 = [2, 5].
*/
- if (*dst_umax <= res_max)
+ if (reg_u32_max(dst_reg) <= res_max)
return;
- *dst_umin = 0;
- *dst_umax = min(*dst_umax, res_max);
+ reg_set_urange32(dst_reg, 0, min(reg_u32_max(dst_reg), res_max));
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
+ reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
reset_reg64_and_tnum(dst_reg);
}
static void scalar_min_max_umod(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u64 *dst_umin = &dst_reg->umin_value;
- u64 *dst_umax = &dst_reg->umax_value;
- u64 src_val = src_reg->umin_value; /* non-zero, const divisor */
+ u64 src_val = reg_umin(src_reg); /* non-zero, const divisor */
u64 res_max = src_val - 1;
/*
* If dst_umax <= res_max, the result remains unchanged.
* e.g., [2, 5] % 10 = [2, 5].
*/
- if (*dst_umax <= res_max)
+ if (reg_umax(dst_reg) <= res_max)
return;
- *dst_umin = 0;
- *dst_umax = min(*dst_umax, res_max);
+ reg_set_urange64(dst_reg, 0, min(reg_umax(dst_reg), res_max));
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
reset_reg32_and_tnum(dst_reg);
}
static void scalar32_min_max_smod(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 *dst_smin = &dst_reg->s32_min_value;
- s32 *dst_smax = &dst_reg->s32_max_value;
- s32 src_val = src_reg->s32_min_value; /* non-zero, const divisor */
+ s32 src_val = reg_s32_min(src_reg); /* non-zero, const divisor */
/*
* Safe absolute value calculation:
@@ -14281,33 +14273,27 @@ static void scalar32_min_max_smod(struct bpf_reg_state *dst_reg,
* If the dividend is already within the result range,
* the result remains unchanged. e.g., [-2, 5] % 10 = [-2, 5].
*/
- if (*dst_smin >= -res_max_abs && *dst_smax <= res_max_abs)
+ if (reg_s32_min(dst_reg) >= -res_max_abs && reg_s32_max(dst_reg) <= res_max_abs)
return;
/* General case: result has the same sign as the dividend. */
- if (*dst_smin >= 0) {
- *dst_smin = 0;
- *dst_smax = min(*dst_smax, res_max_abs);
- } else if (*dst_smax <= 0) {
- *dst_smax = 0;
- *dst_smin = max(*dst_smin, -res_max_abs);
+ if (reg_s32_min(dst_reg) >= 0) {
+ reg_set_srange32(dst_reg, 0, min(reg_s32_max(dst_reg), res_max_abs));
+ } else if (reg_s32_max(dst_reg) <= 0) {
+ reg_set_srange32(dst_reg, max(reg_s32_min(dst_reg), -res_max_abs), 0);
} else {
- *dst_smin = -res_max_abs;
- *dst_smax = res_max_abs;
+ reg_set_srange32(dst_reg, -res_max_abs, res_max_abs);
}
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->u32_min_value = 0;
- dst_reg->u32_max_value = U32_MAX;
+ reg_set_urange32(dst_reg, 0, U32_MAX);
reset_reg64_and_tnum(dst_reg);
}
static void scalar_min_max_smod(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 *dst_smin = &dst_reg->smin_value;
- s64 *dst_smax = &dst_reg->smax_value;
- s64 src_val = src_reg->smin_value; /* non-zero, const divisor */
+ s64 src_val = reg_smin(src_reg); /* non-zero, const divisor */
/*
* Safe absolute value calculation:
@@ -14327,24 +14313,20 @@ static void scalar_min_max_smod(struct bpf_reg_state *dst_reg,
* If the dividend is already within the result range,
* the result remains unchanged. e.g., [-2, 5] % 10 = [-2, 5].
*/
- if (*dst_smin >= -res_max_abs && *dst_smax <= res_max_abs)
+ if (reg_smin(dst_reg) >= -res_max_abs && reg_smax(dst_reg) <= res_max_abs)
return;
/* General case: result has the same sign as the dividend. */
- if (*dst_smin >= 0) {
- *dst_smin = 0;
- *dst_smax = min(*dst_smax, res_max_abs);
- } else if (*dst_smax <= 0) {
- *dst_smax = 0;
- *dst_smin = max(*dst_smin, -res_max_abs);
+ if (reg_smin(dst_reg) >= 0) {
+ reg_set_srange64(dst_reg, 0, min(reg_smax(dst_reg), res_max_abs));
+ } else if (reg_smax(dst_reg) <= 0) {
+ reg_set_srange64(dst_reg, max(reg_smin(dst_reg), -res_max_abs), 0);
} else {
- *dst_smin = -res_max_abs;
- *dst_smax = res_max_abs;
+ reg_set_srange64(dst_reg, -res_max_abs, res_max_abs);
}
/* Reset other ranges/tnum to unbounded/unknown. */
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
+ reg_set_urange64(dst_reg, 0, U64_MAX);
reset_reg32_and_tnum(dst_reg);
}
@@ -14354,7 +14336,7 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
bool src_known = tnum_subreg_is_const(src_reg->var_off);
bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
struct tnum var32_off = tnum_subreg(dst_reg->var_off);
- u32 umax_val = src_reg->u32_max_value;
+ u32 umax_val = reg_u32_max(src_reg);
if (src_known && dst_known) {
__mark_reg32_known(dst_reg, var32_off.value);
@@ -14364,19 +14346,15 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
- dst_reg->u32_min_value = var32_off.value;
- dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
+ reg_set_urange32(dst_reg, var32_off.value, min(reg_u32_max(dst_reg), umax_val));
/* Safe to set s32 bounds by casting u32 result into s32 when u32
* doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
*/
- if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) {
- dst_reg->s32_min_value = dst_reg->u32_min_value;
- dst_reg->s32_max_value = dst_reg->u32_max_value;
- } else {
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
- }
+ if ((s32)reg_u32_min(dst_reg) <= (s32)reg_u32_max(dst_reg))
+ reg_set_srange32(dst_reg, reg_u32_min(dst_reg), reg_u32_max(dst_reg));
+ else
+ reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
}
static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
@@ -14384,7 +14362,7 @@ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
{
bool src_known = tnum_is_const(src_reg->var_off);
bool dst_known = tnum_is_const(dst_reg->var_off);
- u64 umax_val = src_reg->umax_value;
+ u64 umax_val = reg_umax(src_reg);
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value);
@@ -14394,19 +14372,15 @@ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
- dst_reg->umin_value = dst_reg->var_off.value;
- dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
+ reg_set_urange64(dst_reg, dst_reg->var_off.value, min(reg_umax(dst_reg), umax_val));
/* Safe to set s64 bounds by casting u64 result into s64 when u64
* doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
*/
- if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) {
- dst_reg->smin_value = dst_reg->umin_value;
- dst_reg->smax_value = dst_reg->umax_value;
- } else {
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- }
+ if ((s64)reg_umin(dst_reg) <= (s64)reg_umax(dst_reg))
+ reg_set_srange64(dst_reg, reg_umin(dst_reg), reg_umax(dst_reg));
+ else
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
}
@@ -14417,7 +14391,7 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
bool src_known = tnum_subreg_is_const(src_reg->var_off);
bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
struct tnum var32_off = tnum_subreg(dst_reg->var_off);
- u32 umin_val = src_reg->u32_min_value;
+ u32 umin_val = reg_u32_min(src_reg);
if (src_known && dst_known) {
__mark_reg32_known(dst_reg, var32_off.value);
@@ -14427,19 +14401,16 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
- dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
- dst_reg->u32_max_value = var32_off.value | var32_off.mask;
+ reg_set_urange32(dst_reg, max(reg_u32_min(dst_reg), umin_val),
+ var32_off.value | var32_off.mask);
/* Safe to set s32 bounds by casting u32 result into s32 when u32
* doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
*/
- if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) {
- dst_reg->s32_min_value = dst_reg->u32_min_value;
- dst_reg->s32_max_value = dst_reg->u32_max_value;
- } else {
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
- }
+ if ((s32)reg_u32_min(dst_reg) <= (s32)reg_u32_max(dst_reg))
+ reg_set_srange32(dst_reg, reg_u32_min(dst_reg), reg_u32_max(dst_reg));
+ else
+ reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
}
static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
@@ -14447,7 +14418,7 @@ static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
{
bool src_known = tnum_is_const(src_reg->var_off);
bool dst_known = tnum_is_const(dst_reg->var_off);
- u64 umin_val = src_reg->umin_value;
+ u64 umin_val = reg_umin(src_reg);
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value);
@@ -14457,19 +14428,16 @@ static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
- dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
- dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
+ reg_set_urange64(dst_reg, max(reg_umin(dst_reg), umin_val),
+ dst_reg->var_off.value | dst_reg->var_off.mask);
/* Safe to set s64 bounds by casting u64 result into s64 when u64
* doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
*/
- if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) {
- dst_reg->smin_value = dst_reg->umin_value;
- dst_reg->smax_value = dst_reg->umax_value;
- } else {
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- }
+ if ((s64)reg_umin(dst_reg) <= (s64)reg_umax(dst_reg))
+ reg_set_srange64(dst_reg, reg_umin(dst_reg), reg_umax(dst_reg));
+ else
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
}
@@ -14487,19 +14455,15 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
}
/* We get both minimum and maximum from the var32_off. */
- dst_reg->u32_min_value = var32_off.value;
- dst_reg->u32_max_value = var32_off.value | var32_off.mask;
+ reg_set_urange32(dst_reg, var32_off.value, var32_off.value | var32_off.mask);
/* Safe to set s32 bounds by casting u32 result into s32 when u32
* doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
*/
- if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) {
- dst_reg->s32_min_value = dst_reg->u32_min_value;
- dst_reg->s32_max_value = dst_reg->u32_max_value;
- } else {
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
- }
+ if ((s32)reg_u32_min(dst_reg) <= (s32)reg_u32_max(dst_reg))
+ reg_set_srange32(dst_reg, reg_u32_min(dst_reg), reg_u32_max(dst_reg));
+ else
+ reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
}
static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
@@ -14515,19 +14479,16 @@ static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
}
/* We get both minimum and maximum from the var_off. */
- dst_reg->umin_value = dst_reg->var_off.value;
- dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
+ reg_set_urange64(dst_reg, dst_reg->var_off.value,
+ dst_reg->var_off.value | dst_reg->var_off.mask);
/* Safe to set s64 bounds by casting u64 result into s64 when u64
* doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
*/
- if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) {
- dst_reg->smin_value = dst_reg->umin_value;
- dst_reg->smax_value = dst_reg->umax_value;
- } else {
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
- }
+ if ((s64)reg_umin(dst_reg) <= (s64)reg_umax(dst_reg))
+ reg_set_srange64(dst_reg, reg_umin(dst_reg), reg_umax(dst_reg));
+ else
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
__update_reg_bounds(dst_reg);
}
@@ -14538,23 +14499,20 @@ static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
+ reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
/* If we might shift our top bit out, then we know nothing */
- if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
- dst_reg->u32_min_value = 0;
- dst_reg->u32_max_value = U32_MAX;
- } else {
- dst_reg->u32_min_value <<= umin_val;
- dst_reg->u32_max_value <<= umax_val;
- }
+ if (umax_val > 31 || reg_u32_max(dst_reg) > 1ULL << (31 - umax_val))
+ reg_set_urange32(dst_reg, 0, U32_MAX);
+ else
+ reg_set_urange32(dst_reg, reg_u32_min(dst_reg) << umin_val,
+ reg_u32_max(dst_reg) << umax_val);
}
static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u32 umax_val = src_reg->u32_max_value;
- u32 umin_val = src_reg->u32_min_value;
+ u32 umax_val = reg_u32_max(src_reg);
+ u32 umin_val = reg_u32_min(src_reg);
/* u32 alu operation will zext upper bits */
struct tnum subreg = tnum_subreg(dst_reg->var_off);
@@ -14576,29 +14534,25 @@ static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
* because s32 bounds don't flip sign when shifting to the left by
* 32bits.
*/
- if (umin_val == 32 && umax_val == 32) {
- dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
- dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
- } else {
- dst_reg->smax_value = S64_MAX;
- dst_reg->smin_value = S64_MIN;
- }
+ if (umin_val == 32 && umax_val == 32)
+ reg_set_srange64(dst_reg, (s64)reg_s32_min(dst_reg) << 32,
+ (s64)reg_s32_max(dst_reg) << 32);
+ else
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
/* If we might shift our top bit out, then we know nothing */
- if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
- } else {
- dst_reg->umin_value <<= umin_val;
- dst_reg->umax_value <<= umax_val;
- }
+ if (reg_umax(dst_reg) > 1ULL << (63 - umax_val))
+ reg_set_urange64(dst_reg, 0, U64_MAX);
+ else
+ reg_set_urange64(dst_reg, reg_umin(dst_reg) << umin_val,
+ reg_umax(dst_reg) << umax_val);
}
static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u64 umax_val = src_reg->umax_value;
- u64 umin_val = src_reg->umin_value;
+ u64 umax_val = reg_umax(src_reg);
+ u64 umin_val = reg_umin(src_reg);
/* scalar64 calc uses 32bit unshifted bounds so must be called first */
__scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
@@ -14613,8 +14567,8 @@ static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
struct tnum subreg = tnum_subreg(dst_reg->var_off);
- u32 umax_val = src_reg->u32_max_value;
- u32 umin_val = src_reg->u32_min_value;
+ u32 umax_val = reg_u32_max(src_reg);
+ u32 umin_val = reg_u32_min(src_reg);
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
@@ -14630,12 +14584,11 @@ static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
- dst_reg->s32_min_value = S32_MIN;
- dst_reg->s32_max_value = S32_MAX;
+ reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
dst_reg->var_off = tnum_rshift(subreg, umin_val);
- dst_reg->u32_min_value >>= umax_val;
- dst_reg->u32_max_value >>= umin_val;
+ reg_set_urange32(dst_reg, reg_u32_min(dst_reg) >> umax_val,
+ reg_u32_max(dst_reg) >> umin_val);
__mark_reg64_unbounded(dst_reg);
__update_reg32_bounds(dst_reg);
@@ -14644,8 +14597,8 @@ static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u64 umax_val = src_reg->umax_value;
- u64 umin_val = src_reg->umin_value;
+ u64 umax_val = reg_umax(src_reg);
+ u64 umin_val = reg_umin(src_reg);
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
@@ -14661,11 +14614,10 @@ static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
- dst_reg->smin_value = S64_MIN;
- dst_reg->smax_value = S64_MAX;
+ reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
- dst_reg->umin_value >>= umax_val;
- dst_reg->umax_value >>= umin_val;
+ reg_set_urange64(dst_reg, reg_umin(dst_reg) >> umax_val,
+ reg_umax(dst_reg) >> umin_val);
/* Its not easy to operate on alu32 bounds here because it depends
* on bits being shifted in. Take easy way out and mark unbounded
@@ -14678,21 +14630,21 @@ static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u64 umin_val = src_reg->u32_min_value;
+ u64 umin_val = reg_u32_min(src_reg);
/* Upon reaching here, src_known is true and
* umax_val is equal to umin_val.
*/
- dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
- dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
+ reg_set_srange32(dst_reg,
+ (u32)(((s32)reg_s32_min(dst_reg)) >> umin_val),
+ (u32)(((s32)reg_s32_max(dst_reg)) >> umin_val));
dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
*/
- dst_reg->u32_min_value = 0;
- dst_reg->u32_max_value = U32_MAX;
+ reg_set_urange32(dst_reg, 0, U32_MAX);
__mark_reg64_unbounded(dst_reg);
__update_reg32_bounds(dst_reg);
@@ -14701,21 +14653,20 @@ static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- u64 umin_val = src_reg->umin_value;
+ u64 umin_val = reg_umin(src_reg);
/* Upon reaching here, src_known is true and umax_val is equal
* to umin_val.
*/
- dst_reg->smin_value >>= umin_val;
- dst_reg->smax_value >>= umin_val;
+ reg_set_srange64(dst_reg, reg_smin(dst_reg) >> umin_val,
+ reg_smax(dst_reg) >> umin_val);
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
*/
- dst_reg->umin_value = 0;
- dst_reg->umax_value = U64_MAX;
+ reg_set_urange64(dst_reg, 0, U64_MAX);
/* Its not easy to operate on alu32 bounds here because it depends
* on bits being shifted in from upper 32-bits. Take easy way out
@@ -14782,13 +14733,13 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
if (insn_bitness == 32) {
if (tnum_subreg_is_const(src_reg->var_off)
- && src_reg->s32_min_value == src_reg->s32_max_value
- && src_reg->u32_min_value == src_reg->u32_max_value)
+ && reg_s32_min(src_reg) == reg_s32_max(src_reg)
+ && reg_u32_min(src_reg) == reg_u32_max(src_reg))
src_is_const = true;
} else {
if (tnum_is_const(src_reg->var_off)
- && src_reg->smin_value == src_reg->smax_value
- && src_reg->umin_value == src_reg->umax_value)
+ && reg_smin(src_reg) == reg_smax(src_reg)
+ && reg_umin(src_reg) == reg_umax(src_reg))
src_is_const = true;
}
@@ -14818,7 +14769,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
case BPF_LSH:
case BPF_RSH:
case BPF_ARSH:
- return (src_is_const && src_reg->umax_value < insn_bitness);
+ return (src_is_const && reg_umax(src_reg) < insn_bitness);
default:
return false;
}
@@ -14831,9 +14782,9 @@ static int maybe_fork_scalars(struct bpf_verifier_env *env, struct bpf_insn *ins
struct bpf_reg_state *regs;
bool alu32;
- if (dst_reg->smin_value == -1 && dst_reg->smax_value == 0)
+ if (reg_smin(dst_reg) == -1 && reg_smax(dst_reg) == 0)
alu32 = false;
- else if (dst_reg->s32_min_value == -1 && dst_reg->s32_max_value == 0)
+ else if (reg_s32_min(dst_reg) == -1 && reg_s32_max(dst_reg) == 0)
alu32 = true;
else
return 0;
@@ -14917,7 +14868,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
break;
case BPF_DIV:
/* BPF div specification: x / 0 = 0 */
- if ((alu32 && src_reg.u32_min_value == 0) || (!alu32 && src_reg.umin_value == 0)) {
+ if ((alu32 && reg_u32_min(&src_reg) == 0) || (!alu32 && reg_umin(&src_reg) == 0)) {
___mark_reg_known(dst_reg, 0);
break;
}
@@ -14934,7 +14885,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
break;
case BPF_MOD:
/* BPF mod specification: x % 0 = x */
- if ((alu32 && src_reg.u32_min_value == 0) || (!alu32 && src_reg.umin_value == 0))
+ if ((alu32 && reg_u32_min(&src_reg) == 0) || (!alu32 && reg_umin(&src_reg) == 0))
break;
if (alu32)
if (off == 1)
@@ -15122,7 +15073,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
* umax_value before the ALU operation. After adjust_scalar_min_max_vals(),
* alu32 ops will have zero-extended the result, making umax_value <= U32_MAX.
*/
- u64 dst_umax = dst_reg->umax_value;
+ u64 dst_umax = reg_umax(dst_reg);
err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
if (err)
@@ -15264,7 +15215,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
} else if (src_reg->type == SCALAR_VALUE) {
bool no_sext;
- no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
+ no_sext = reg_umax(src_reg) < (1ULL << (insn->off - 1));
if (no_sext)
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
@@ -15299,7 +15250,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
dst_reg->subreg_def = env->insn_idx + 1;
} else {
/* case: W1 = (s8, s16)W2 */
- bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
+ bool no_sext = reg_umax(src_reg) < (1ULL << (insn->off - 1));
if (no_sext)
assign_scalar_id_before_mov(env, src_reg);
@@ -15381,17 +15332,17 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *reg;
int new_range;
- if (dst_reg->umax_value == 0 && range_right_open)
+ if (reg_umax(dst_reg) == 0 && range_right_open)
/* This doesn't give us any range */
return;
- if (dst_reg->umax_value > MAX_PACKET_OFF)
+ if (reg_umax(dst_reg) > MAX_PACKET_OFF)
/* Risk of overflow. For instance, ptr + (1<<63) may be less
* than pkt_end, but that's because it's also less than pkt.
*/
return;
- new_range = dst_reg->umax_value;
+ new_range = reg_umax(dst_reg);
if (range_right_open)
new_range++;
@@ -15440,7 +15391,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
/* If our ids match, then we must have the same max_value. And we
* don't care about the other reg's fixed offset, since if it's too big
* the range won't allow anything.
- * dst_reg->umax_value is known < MAX_PACKET_OFF, therefore it fits in a u16.
+ * reg_umax(dst_reg) is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/
bpf_for_each_reg_in_vstate(vstate, state, reg, ({
if (reg->type == type && reg->id == dst_reg->id)
@@ -15496,14 +15447,14 @@ static int is_scalar_branch_taken(struct bpf_verifier_env *env, struct bpf_reg_s
{
struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off;
struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off;
- u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value;
- u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value;
- s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value;
- s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value;
- u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value;
- u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value;
- s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value;
- s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value;
+ u64 umin1 = is_jmp32 ? (u64)reg_u32_min(reg1) : reg_umin(reg1);
+ u64 umax1 = is_jmp32 ? (u64)reg_u32_max(reg1) : reg_umax(reg1);
+ s64 smin1 = is_jmp32 ? (s64)reg_s32_min(reg1) : reg_smin(reg1);
+ s64 smax1 = is_jmp32 ? (s64)reg_s32_max(reg1) : reg_smax(reg1);
+ u64 umin2 = is_jmp32 ? (u64)reg_u32_min(reg2) : reg_umin(reg2);
+ u64 umax2 = is_jmp32 ? (u64)reg_u32_max(reg2) : reg_umax(reg2);
+ s64 smin2 = is_jmp32 ? (s64)reg_s32_min(reg2) : reg_smin(reg2);
+ s64 smax2 = is_jmp32 ? (s64)reg_s32_max(reg2) : reg_smax(reg2);
if (reg1 == reg2) {
switch (opcode) {
@@ -15548,11 +15499,11 @@ static int is_scalar_branch_taken(struct bpf_verifier_env *env, struct bpf_reg_s
* utilize 32-bit subrange knowledge to eliminate
* branches that can't be taken a priori
*/
- if (reg1->u32_min_value > reg2->u32_max_value ||
- reg1->u32_max_value < reg2->u32_min_value)
+ if (reg_u32_min(reg1) > reg_u32_max(reg2) ||
+ reg_u32_max(reg1) < reg_u32_min(reg2))
return 0;
- if (reg1->s32_min_value > reg2->s32_max_value ||
- reg1->s32_max_value < reg2->s32_min_value)
+ if (reg_s32_min(reg1) > reg_s32_max(reg2) ||
+ reg_s32_max(reg1) < reg_s32_min(reg2))
return 0;
}
break;
@@ -15574,11 +15525,11 @@ static int is_scalar_branch_taken(struct bpf_verifier_env *env, struct bpf_reg_s
* utilize 32-bit subrange knowledge to eliminate
* branches that can't be taken a priori
*/
- if (reg1->u32_min_value > reg2->u32_max_value ||
- reg1->u32_max_value < reg2->u32_min_value)
+ if (reg_u32_min(reg1) > reg_u32_max(reg2) ||
+ reg_u32_max(reg1) < reg_u32_min(reg2))
return 1;
- if (reg1->s32_min_value > reg2->s32_max_value ||
- reg1->s32_max_value < reg2->s32_min_value)
+ if (reg_s32_min(reg1) > reg_s32_max(reg2) ||
+ reg_s32_max(reg1) < reg_s32_min(reg2))
return 1;
}
break;
@@ -15805,27 +15756,23 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
switch (opcode) {
case BPF_JEQ:
if (is_jmp32) {
- reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value);
- reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value);
- reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value);
- reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value);
- reg2->u32_min_value = reg1->u32_min_value;
- reg2->u32_max_value = reg1->u32_max_value;
- reg2->s32_min_value = reg1->s32_min_value;
- reg2->s32_max_value = reg1->s32_max_value;
+ reg_set_urange32(reg1, max(reg_u32_min(reg1), reg_u32_min(reg2)),
+ min(reg_u32_max(reg1), reg_u32_max(reg2)));
+ reg_set_srange32(reg1, max(reg_s32_min(reg1), reg_s32_min(reg2)),
+ min(reg_s32_max(reg1), reg_s32_max(reg2)));
+ reg_set_urange32(reg2, reg_u32_min(reg1), reg_u32_max(reg1));
+ reg_set_srange32(reg2, reg_s32_min(reg1), reg_s32_max(reg1));
t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off));
reg1->var_off = tnum_with_subreg(reg1->var_off, t);
reg2->var_off = tnum_with_subreg(reg2->var_off, t);
} else {
- reg1->umin_value = max(reg1->umin_value, reg2->umin_value);
- reg1->umax_value = min(reg1->umax_value, reg2->umax_value);
- reg1->smin_value = max(reg1->smin_value, reg2->smin_value);
- reg1->smax_value = min(reg1->smax_value, reg2->smax_value);
- reg2->umin_value = reg1->umin_value;
- reg2->umax_value = reg1->umax_value;
- reg2->smin_value = reg1->smin_value;
- reg2->smax_value = reg1->smax_value;
+ reg_set_urange64(reg1, max(reg_umin(reg1), reg_umin(reg2)),
+ min(reg_umax(reg1), reg_umax(reg2)));
+ reg_set_srange64(reg1, max(reg_smin(reg1), reg_smin(reg2)),
+ min(reg_smax(reg1), reg_smax(reg2)));
+ reg_set_urange64(reg2, reg_umin(reg1), reg_umax(reg1));
+ reg_set_srange64(reg2, reg_smin(reg1), reg_smax(reg1));
reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off);
reg2->var_off = reg1->var_off;
@@ -15842,8 +15789,8 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
*/
val = reg_const_value(reg2, is_jmp32);
if (is_jmp32) {
- /* u32_min_value is not equal to 0xffffffff at this point,
- * because otherwise u32_max_value is 0xffffffff as well,
+ /* u32_min is not equal to 0xffffffff at this point,
+ * because otherwise u32_max is 0xffffffff as well,
* in such a case both reg1 and reg2 would be constants,
* jump would be predicted and regs_refine_cond_op()
* wouldn't be called.
@@ -15851,23 +15798,23 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
* Same reasoning works for all {u,s}{min,max}{32,64} cases
* below.
*/
- if (reg1->u32_min_value == (u32)val)
- reg1->u32_min_value++;
- if (reg1->u32_max_value == (u32)val)
- reg1->u32_max_value--;
- if (reg1->s32_min_value == (s32)val)
- reg1->s32_min_value++;
- if (reg1->s32_max_value == (s32)val)
- reg1->s32_max_value--;
+ if (reg_u32_min(reg1) == (u32)val)
+ reg_set_urange32(reg1, reg_u32_min(reg1) + 1, reg_u32_max(reg1));
+ if (reg_u32_max(reg1) == (u32)val)
+ reg_set_urange32(reg1, reg_u32_min(reg1), reg_u32_max(reg1) - 1);
+ if (reg_s32_min(reg1) == (s32)val)
+ reg_set_srange32(reg1, reg_s32_min(reg1) + 1, reg_s32_max(reg1));
+ if (reg_s32_max(reg1) == (s32)val)
+ reg_set_srange32(reg1, reg_s32_min(reg1), reg_s32_max(reg1) - 1);
} else {
- if (reg1->umin_value == (u64)val)
- reg1->umin_value++;
- if (reg1->umax_value == (u64)val)
- reg1->umax_value--;
- if (reg1->smin_value == (s64)val)
- reg1->smin_value++;
- if (reg1->smax_value == (s64)val)
- reg1->smax_value--;
+ if (reg_umin(reg1) == (u64)val)
+ reg_set_urange64(reg1, reg_umin(reg1) + 1, reg_umax(reg1));
+ if (reg_umax(reg1) == (u64)val)
+ reg_set_urange64(reg1, reg_umin(reg1), reg_umax(reg1) - 1);
+ if (reg_smin(reg1) == (s64)val)
+ reg_set_srange64(reg1, reg_smin(reg1) + 1, reg_smax(reg1));
+ if (reg_smax(reg1) == (s64)val)
+ reg_set_srange64(reg1, reg_smin(reg1), reg_smax(reg1) - 1);
}
break;
case BPF_JSET:
@@ -15914,38 +15861,38 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
break;
case BPF_JLE:
if (is_jmp32) {
- reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value);
- reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value);
+ reg_set_urange32(reg1, reg_u32_min(reg1), min(reg_u32_max(reg1), reg_u32_max(reg2)));
+ reg_set_urange32(reg2, max(reg_u32_min(reg1), reg_u32_min(reg2)), reg_u32_max(reg2));
} else {
- reg1->umax_value = min(reg1->umax_value, reg2->umax_value);
- reg2->umin_value = max(reg1->umin_value, reg2->umin_value);
+ reg_set_urange64(reg1, reg_umin(reg1), min(reg_umax(reg1), reg_umax(reg2)));
+ reg_set_urange64(reg2, max(reg_umin(reg1), reg_umin(reg2)), reg_umax(reg2));
}
break;
case BPF_JLT:
if (is_jmp32) {
- reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1);
- reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value);
+ reg_set_urange32(reg1, reg_u32_min(reg1), min(reg_u32_max(reg1), reg_u32_max(reg2) - 1));
+ reg_set_urange32(reg2, max(reg_u32_min(reg1) + 1, reg_u32_min(reg2)), reg_u32_max(reg2));
} else {
- reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1);
- reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value);
+ reg_set_urange64(reg1, reg_umin(reg1), min(reg_umax(reg1), reg_umax(reg2) - 1));
+ reg_set_urange64(reg2, max(reg_umin(reg1) + 1, reg_umin(reg2)), reg_umax(reg2));
}
break;
case BPF_JSLE:
if (is_jmp32) {
- reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value);
- reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value);
+ reg_set_srange32(reg1, reg_s32_min(reg1), min(reg_s32_max(reg1), reg_s32_max(reg2)));
+ reg_set_srange32(reg2, max(reg_s32_min(reg1), reg_s32_min(reg2)), reg_s32_max(reg2));
} else {
- reg1->smax_value = min(reg1->smax_value, reg2->smax_value);
- reg2->smin_value = max(reg1->smin_value, reg2->smin_value);
+ reg_set_srange64(reg1, reg_smin(reg1), min(reg_smax(reg1), reg_smax(reg2)));
+ reg_set_srange64(reg2, max(reg_smin(reg1), reg_smin(reg2)), reg_smax(reg2));
}
break;
case BPF_JSLT:
if (is_jmp32) {
- reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1);
- reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value);
+ reg_set_srange32(reg1, reg_s32_min(reg1), min(reg_s32_max(reg1), reg_s32_max(reg2) - 1));
+ reg_set_srange32(reg2, max(reg_s32_min(reg1) + 1, reg_s32_min(reg2)), reg_s32_max(reg2));
} else {
- reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1);
- reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value);
+ reg_set_srange64(reg1, reg_smin(reg1), min(reg_smax(reg1), reg_smax(reg2) - 1));
+ reg_set_srange64(reg2, max(reg_smin(reg1) + 1, reg_smin(reg2)), reg_smax(reg2));
}
break;
default:
@@ -17446,16 +17393,16 @@ static int indirect_jump_min_max_index(struct bpf_verifier_env *env,
u32 *pmin_index, u32 *pmax_index)
{
struct bpf_reg_state *reg = reg_state(env, regno);
- u64 min_index = reg->umin_value;
- u64 max_index = reg->umax_value;
+ u64 min_index = reg_umin(reg);
+ u64 max_index = reg_umax(reg);
const u32 size = 8;
if (min_index > (u64) U32_MAX * size) {
- verbose(env, "the sum of R%u umin_value %llu is too big\n", regno, reg->umin_value);
+ verbose(env, "the sum of R%u umin_value %llu is too big\n", regno, reg_umin(reg));
return -ERANGE;
}
if (max_index > (u64) U32_MAX * size) {
- verbose(env, "the sum of R%u umax_value %llu is too big\n", regno, reg->umax_value);
+ verbose(env, "the sum of R%u umax_value %llu is too big\n", regno, reg_umax(reg));
return -ERANGE;
}
--
2.53.0
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH RFC bpf-next 3/4] bpf: replace min/max fields with struct cnum{32,64}
2026-04-21 10:28 [PATCH RFC bpf-next 0/4] bpf: replace min/max fields with struct cnum{32,64} Eduard Zingerman
2026-04-21 10:28 ` [PATCH RFC bpf-next 1/4] bpf: representation and basic operations on circular numbers Eduard Zingerman
2026-04-21 10:28 ` [PATCH RFC bpf-next 2/4] bpf: use accessor functions for bpf_reg_state min/max fields Eduard Zingerman
@ 2026-04-21 10:28 ` Eduard Zingerman
2026-04-21 11:16 ` bot+bpf-ci
` (2 more replies)
2026-04-21 10:28 ` [PATCH RFC bpf-next 4/4] selftests/bpf: new cases handled by 32->64 range refinements Eduard Zingerman
` (3 subsequent siblings)
6 siblings, 3 replies; 24+ messages in thread
From: Eduard Zingerman @ 2026-04-21 10:28 UTC (permalink / raw)
To: bpf, ast, andrii
Cc: daniel, martin.lau, kernel-team, yonghong.song, eddyz87,
shung-hsi.yu, paul.chaignon, harishankar.vishwanathan
Replace eight independent s64, u64, s32, u32 min/max fields in
bpf_reg_state with two circular number fields:
- cnum64 for a unified signed/unsigned 64-bit range tracking;
- cnum32 for a unified signed/unsigned 32-bit range tracking.
Each cnum represents a range as a single arc on the circular number
line (base + size), from which signed and unsigned bounds are derived
on demand via accessor functions introduced in the preceding commit.
Notable changes:
- Signed<->unsigned deductions in __reg_deduce_bounds() are removed.
- 64<->32 bit deductions are replaced with:
- reg->r32 = cnum32_intersect(reg->r32, cnum32_from_cnum64(reg->r64));
this is functionally equivalent to the old code.
- reg->r64 = cnum64_cnum32_intersect(reg->r64, reg->r32);
this handles a few additional cases, see commit message for
"bpf: representation and basic operations on circular numbers".
- regs_refine_cond_op() now computes results in terms of operations on
sets, e.g. for JNE:
/* Complement of the range [val, val] as cnum64. */
lo = (struct cnum64){ val + 1, U64_MAX - 1 };
reg1->r64 = cnum64_intersect(reg1->r64, lo);
- For add, sub, mul operations on scalars replace explicit bounds
computations with cnum{32,64}_{add,neg,mul}.
- For add, sub operations on pointers deduplicate with arithmetic
operations on scalars and use cnum{32,64}_{add,neg}.
- For and, or, xor operations on scalars remove explicit signed bounds
computations.
- range_bounds_violation() reduces to checking cnum_is_empty().
- const_tnum_range_mismatch() reduces to checking cnum_is_const().
Selftest adjustments: a few existing tests are updated because a
single cnum arc cannot always represent what the old system expressed
as the intersection of independent signed and unsigned ranges.
For example, if the old system tracked u64=[0, U64_MAX-U32_MAX+2] and
s64=[S64_MIN+2, 2] independently, their intersection is a tight
two-point set. A single cnum must pick the shorter arc, losing the
other constraint. These cases are documented with comments in the
adjusted tests.
reg_bounds.c is updated with logic similar to
cnum64_cnum32_intersect(). Instead of using cnums it inspects
intersection between 'b' and first / last / next-after-first /
previous-before-last sub-ranges of 'a'.
reg_bounds.c is also updated to skip test cases that rely
in signed and unsigned ranges intersecting in two intervals,
as such cases are not representable by a single cnum.
The following "crafted" test cases are affected:
- reg_bounds_crafted/(s64)[0xffffffffffff8000; 0x7fff] (u32)<op> [0; 0x1f]
- reg_bounds_crafted/(s64)[0; 0x1f] (u32)<op> [0xffffffffffffff80; 0x7f]
- reg_bounds_crafted/(s64)[0xffffffffffffff80; 0x7f] (u32)<op> [0; 0x1f]
- reg_bounds_crafted/(u64)[0; 1] (s32)<op> [1; 2147483648]
- reg_bounds_crafted/(u64)[1; 2147483648] (s32)<op> [0; 1]
- reg_bounds_crafted/(u64)[0; 0xffffffff00000000] (s64)<op> 0
- reg_bounds_crafted/(u64)0 (s64)<op> [0; 0xffffffff00000000]
- reg_bounds_crafted/(u64)[0; 0xffffffff00000000] (s32)<op> 0
- reg_bounds_crafted/(u64)0 (s32)<op> [0; 0xffffffff00000000]
- reg_bounds_crafted/(s64)[S64_MIN; 0] (u64)<op> S64_MIN
- reg_bounds_crafted/(s64)S64_MIN (u64)<op> [S64_MIN; 0]
- reg_bounds_crafted/(s32)[S32_MIN; 0] (u32)<op> S32_MIN
- reg_bounds_crafted/(s32)S32_MIN (u32)<op> [S32_MIN; 0]
- reg_bounds_crafted/(s64)[0; 0x1f] (u32)<op> [0xffffffff80000000; 0x7fffffff]
- reg_bounds_crafted/(s64)[0xffffffff80000000; 0x7fffffff] (u32)<op> [0; 0x1f]
- reg_bounds_crafted/(s64)[0; 0x1f] (u32)<op> [0xffffffffffff8000; 0x7fff]
As well as some reg_bounds_roand_{consts,ranges}_A_B, where A and B
differ in sign domain.
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
include/linux/bpf_verifier.h | 39 +-
kernel/bpf/verifier.c | 908 +++------------------
.../testing/selftests/bpf/prog_tests/reg_bounds.c | 90 +-
.../testing/selftests/bpf/progs/verifier_bounds.c | 9 +-
.../testing/selftests/bpf/progs/verifier_subreg.c | 6 +-
5 files changed, 236 insertions(+), 816 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b44d399adbb2..72e03c417364 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -8,6 +8,7 @@
#include <linux/btf.h> /* for struct btf and btf_id() */
#include <linux/filter.h> /* for MAX_BPF_STACK */
#include <linux/tnum.h>
+#include <linux/cnum.h>
/* Maximum variable offset umax_value permitted when resolving memory accesses.
* In practice this is far bigger than any realistic pointer offset; this limit
@@ -120,14 +121,8 @@ struct bpf_reg_state {
* These refer to the same value as var_off, not necessarily the actual
* contents of the register.
*/
- s64 smin_value; /* minimum possible (s64)value */
- s64 smax_value; /* maximum possible (s64)value */
- u64 umin_value; /* minimum possible (u64)value */
- u64 umax_value; /* maximum possible (u64)value */
- s32 s32_min_value; /* minimum possible (s32)value */
- s32 s32_max_value; /* maximum possible (s32)value */
- u32 u32_min_value; /* minimum possible (u32)value */
- u32 u32_max_value; /* maximum possible (u32)value */
+ struct cnum64 r64; /* 64-bit range as circular number */
+ struct cnum32 r32; /* 32-bit range as circular number */
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
@@ -211,66 +206,62 @@ struct bpf_reg_state {
static inline s64 reg_smin(const struct bpf_reg_state *reg)
{
- return reg->smin_value;
+ return cnum64_smin(reg->r64);
}
static inline s64 reg_smax(const struct bpf_reg_state *reg)
{
- return reg->smax_value;
+ return cnum64_smax(reg->r64);
}
static inline u64 reg_umin(const struct bpf_reg_state *reg)
{
- return reg->umin_value;
+ return cnum64_umin(reg->r64);
}
static inline u64 reg_umax(const struct bpf_reg_state *reg)
{
- return reg->umax_value;
+ return cnum64_umax(reg->r64);
}
static inline s32 reg_s32_min(const struct bpf_reg_state *reg)
{
- return reg->s32_min_value;
+ return cnum32_smin(reg->r32);
}
static inline s32 reg_s32_max(const struct bpf_reg_state *reg)
{
- return reg->s32_max_value;
+ return cnum32_smax(reg->r32);
}
static inline u32 reg_u32_min(const struct bpf_reg_state *reg)
{
- return reg->u32_min_value;
+ return cnum32_umin(reg->r32);
}
static inline u32 reg_u32_max(const struct bpf_reg_state *reg)
{
- return reg->u32_max_value;
+ return cnum32_umax(reg->r32);
}
static inline void reg_set_srange32(struct bpf_reg_state *reg, s32 smin, s32 smax)
{
- reg->s32_min_value = smin;
- reg->s32_max_value = smax;
+ reg->r32 = cnum32_from_srange(smin, smax);
}
static inline void reg_set_urange32(struct bpf_reg_state *reg, u32 umin, u32 umax)
{
- reg->u32_min_value = umin;
- reg->u32_max_value = umax;
+ reg->r32 = cnum32_from_urange(umin, umax);
}
static inline void reg_set_srange64(struct bpf_reg_state *reg, s64 smin, s64 smax)
{
- reg->smin_value = smin;
- reg->smax_value = smax;
+ reg->r64 = cnum64_from_srange(smin, smax);
}
static inline void reg_set_urange64(struct bpf_reg_state *reg, u64 umin, u64 umax)
{
- reg->umin_value = umin;
- reg->umax_value = umax;
+ reg->r64 = cnum64_from_urange(umin, umax);
}
enum bpf_stack_slot_type {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e896f5d92a2..90ed32f06465 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -26,6 +26,7 @@
#include <linux/poison.h>
#include <linux/module.h>
#include <linux/cpumask.h>
+#include <linux/cnum.h>
#include <linux/bpf_mem_alloc.h>
#include <net/xdp.h>
#include <linux/trace_events.h>
@@ -1750,10 +1751,8 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
reg->var_off = tnum_const(imm);
- reg_set_srange64(reg, (s64)imm, (s64)imm);
- reg_set_urange64(reg, imm, imm);
- reg_set_srange32(reg, (s32)imm, (s32)imm);
- reg_set_urange32(reg, (u32)imm, (u32)imm);
+ reg->r64 = cnum64_from_urange(imm, imm);
+ reg->r32 = cnum32_from_urange((u32)imm, (u32)imm);
}
/* Mark the unknown part of a register (variable offset or scalar value) as
@@ -1772,8 +1771,7 @@ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
{
reg->var_off = tnum_const_subreg(reg->var_off, imm);
- reg_set_srange32(reg, (s32)imm, (s32)imm);
- reg_set_urange32(reg, (u32)imm, (u32)imm);
+ reg->r32 = cnum32_from_urange((u32)imm, (u32)imm);
}
/* Mark the 'variable offset' part of a register as zero. This should be
@@ -1886,23 +1884,19 @@ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
{
- reg_set_srange32(reg, S32_MIN, S32_MAX);
- reg_set_urange32(reg, 0, U32_MAX);
+ reg->r32 = CNUM32_UNBOUNDED;
}
-/* Reset the min/max bounds of a register */
-static void __mark_reg_unbounded(struct bpf_reg_state *reg)
+static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
{
- reg_set_srange64(reg, S64_MIN, S64_MAX);
- reg_set_urange64(reg, 0, U64_MAX);
-
- __mark_reg32_unbounded(reg);
+ reg->r64 = CNUM64_UNBOUNDED;
}
-static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
+/* Reset the min/max bounds of a register */
+static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{
- reg_set_srange64(reg, S64_MIN, S64_MAX);
- reg_set_urange64(reg, 0, U64_MAX);
+ __mark_reg64_unbounded(reg);
+ __mark_reg32_unbounded(reg);
}
static void reset_reg64_and_tnum(struct bpf_reg_state *reg)
@@ -1917,18 +1911,32 @@ static void reset_reg32_and_tnum(struct bpf_reg_state *reg)
reg->var_off = tnum_unknown;
}
-static void __update_reg32_bounds(struct bpf_reg_state *reg)
+static struct cnum32 cnum32_from_tnum(struct tnum tnum)
{
- struct tnum var32_off = tnum_subreg(reg->var_off);
+ tnum = tnum_subreg(tnum);
+ if ((tnum.mask & S32_MIN) || (tnum.value & S32_MIN))
+ /* min signed is max(sign bit) | min(other bits) */
+ /* max signed is min(sign bit) | max(other bits) */
+ return cnum32_from_srange(tnum.value | (tnum.mask & S32_MIN),
+ tnum.value | (tnum.mask & S32_MAX));
+ else
+ return cnum32_from_urange(tnum.value, (tnum.value | tnum.mask));
+}
- reg_set_srange32(reg,
- /* min signed is max(sign bit) | min(other bits) */
- max_t(s32, reg_s32_min(reg), var32_off.value | (var32_off.mask & S32_MIN)),
- /* max signed is min(sign bit) | max(other bits) */
- min_t(s32, reg_s32_max(reg), var32_off.value | (var32_off.mask & S32_MAX)));
- reg_set_urange32(reg,
- max_t(u32, reg_u32_min(reg), (u32)var32_off.value),
- min(reg_u32_max(reg), (u32)(var32_off.value | var32_off.mask)));
+static struct cnum64 cnum64_from_tnum(struct tnum tnum)
+{
+ if ((tnum.mask & S64_MIN) || (tnum.value & S64_MIN))
+ /* min signed is max(sign bit) | min(other bits) */
+ /* max signed is min(sign bit) | max(other bits) */
+ return cnum64_from_srange(tnum.value | (tnum.mask & S64_MIN),
+ tnum.value | (tnum.mask & S64_MAX));
+ else
+ return cnum64_from_urange(tnum.value, (tnum.value | tnum.mask));
+}
+
+static void __update_reg32_bounds(struct bpf_reg_state *reg)
+{
+ reg->r32 = cnum32_intersect(reg->r32, cnum32_from_tnum(reg->var_off));
}
static void __update_reg64_bounds(struct bpf_reg_state *reg)
@@ -1936,17 +1944,7 @@ static void __update_reg64_bounds(struct bpf_reg_state *reg)
u64 tnum_next, tmax;
bool umin_in_tnum;
- /* min signed is max(sign bit) | min(other bits) */
- /* max signed is min(sign bit) | max(other bits) */
- reg_set_srange64(reg,
- max_t(s64, reg_smin(reg),
- reg->var_off.value | (reg->var_off.mask & S64_MIN)),
- min_t(s64, reg_smax(reg),
- reg->var_off.value | (reg->var_off.mask & S64_MAX)));
- reg_set_urange64(reg,
- max(reg_umin(reg), reg->var_off.value),
- min(reg_umax(reg),
- reg->var_off.value | reg->var_off.mask));
+ reg->r64 = cnum64_intersect(reg->r64, cnum64_from_tnum(reg->var_off));
/* Check if u64 and tnum overlap in a single value */
tnum_next = tnum_step(reg->var_off, reg_umin(reg));
@@ -1985,340 +1983,17 @@ static void __update_reg_bounds(struct bpf_reg_state *reg)
/* Uses signed min/max values to inform unsigned, and vice-versa */
static void deduce_bounds_32_from_64(struct bpf_reg_state *reg)
{
- /* If upper 32 bits of u64/s64 range don't change, we can use lower 32
- * bits to improve our u32/s32 boundaries.
- *
- * E.g., the case where we have upper 32 bits as zero ([10, 20] in
- * u64) is pretty trivial, it's obvious that in u32 we'll also have
- * [10, 20] range. But this property holds for any 64-bit range as
- * long as upper 32 bits in that entire range of values stay the same.
- *
- * E.g., u64 range [0x10000000A, 0x10000000F] ([4294967306, 4294967311]
- * in decimal) has the same upper 32 bits throughout all the values in
- * that range. As such, lower 32 bits form a valid [0xA, 0xF] ([10, 15])
- * range.
- *
- * Note also, that [0xA, 0xF] is a valid range both in u32 and in s32,
- * following the rules outlined below about u64/s64 correspondence
- * (which equally applies to u32 vs s32 correspondence). In general it
- * depends on actual hexadecimal values of 32-bit range. They can form
- * only valid u32, or only valid s32 ranges in some cases.
- *
- * So we use all these insights to derive bounds for subregisters here.
- */
- if ((reg_umin(reg) >> 32) == (reg_umax(reg) >> 32)) {
- /* u64 to u32 casting preserves validity of low 32 bits as
- * a range, if upper 32 bits are the same
- */
- reg_set_urange32(reg,
- max_t(u32, reg_u32_min(reg), (u32)reg_umin(reg)),
- min_t(u32, reg_u32_max(reg), (u32)reg_umax(reg)));
-
- if ((s32)reg_umin(reg) <= (s32)reg_umax(reg)) {
- reg_set_srange32(reg,
- max_t(s32, reg_s32_min(reg), (s32)reg_umin(reg)),
- min_t(s32, reg_s32_max(reg), (s32)reg_umax(reg)));
- }
- }
- if ((reg_smin(reg) >> 32) == (reg_smax(reg) >> 32)) {
- /* low 32 bits should form a proper u32 range */
- if ((u32)reg_smin(reg) <= (u32)reg_smax(reg)) {
- reg_set_urange32(reg,
- max_t(u32, reg_u32_min(reg), (u32)reg_smin(reg)),
- min_t(u32, reg_u32_max(reg), (u32)reg_smax(reg)));
- }
- /* low 32 bits should form a proper s32 range */
- if ((s32)reg_smin(reg) <= (s32)reg_smax(reg)) {
- reg_set_srange32(reg,
- max_t(s32, reg_s32_min(reg), (s32)reg_smin(reg)),
- min_t(s32, reg_s32_max(reg), (s32)reg_smax(reg)));
- }
- }
- /* Special case where upper bits form a small sequence of two
- * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
- * 0x00000000 is also valid), while lower bits form a proper s32 range
- * going from negative numbers to positive numbers. E.g., let's say we
- * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]).
- * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff,
- * 0x0000000000000000, 0x00000000000001}). Ignoring upper 32 bits,
- * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]).
- * Note that it doesn't have to be 0xffffffff going to 0x00000000 in
- * upper 32 bits. As a random example, s64 range
- * [0xfffffff0fffffff0; 0xfffffff100000010], forms a valid s32 range
- * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister.
- */
- if ((u32)(reg_umin(reg) >> 32) + 1 == (u32)(reg_umax(reg) >> 32) &&
- (s32)reg_umin(reg) < 0 && (s32)reg_umax(reg) >= 0) {
- reg_set_srange32(reg,
- max_t(s32, reg_s32_min(reg), (s32)reg_umin(reg)),
- min_t(s32, reg_s32_max(reg), (s32)reg_umax(reg)));
- }
- if ((u32)(reg_smin(reg) >> 32) + 1 == (u32)(reg_smax(reg) >> 32) &&
- (s32)reg_smin(reg) < 0 && (s32)reg_smax(reg) >= 0) {
- reg_set_srange32(reg,
- max_t(s32, reg_s32_min(reg), (s32)reg_smin(reg)),
- min_t(s32, reg_s32_max(reg), (s32)reg_smax(reg)));
- }
-}
-
-static void deduce_bounds_32_from_32(struct bpf_reg_state *reg)
-{
- /* if u32 range forms a valid s32 range (due to matching sign bit),
- * try to learn from that
- */
- if ((s32)reg_u32_min(reg) <= (s32)reg_u32_max(reg)) {
- reg_set_srange32(reg,
- max_t(s32, reg_s32_min(reg), reg_u32_min(reg)),
- min_t(s32, reg_s32_max(reg), reg_u32_max(reg)));
- }
- /* If we cannot cross the sign boundary, then signed and unsigned bounds
- * are the same, so combine. This works even in the negative case, e.g.
- * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
- */
- if ((u32)reg_s32_min(reg) <= (u32)reg_s32_max(reg)) {
- reg_set_urange32(reg,
- max_t(u32, reg_s32_min(reg), reg_u32_min(reg)),
- min_t(u32, reg_s32_max(reg), reg_u32_max(reg)));
- } else {
- if (reg_u32_max(reg) < (u32)reg_s32_min(reg)) {
- /* See __reg64_deduce_bounds() for detailed explanation.
- * Refine ranges in the following situation:
- *
- * 0 U32_MAX
- * | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] |
- * |----------------------------|----------------------------|
- * |xxxxx s32 range xxxxxxxxx] [xxxxxxx|
- * 0 S32_MAX S32_MIN -1
- */
- reg_set_srange32(reg, (s32)reg_u32_min(reg), reg_s32_max(reg));
- reg_set_urange32(reg,
- reg_u32_min(reg),
- min_t(u32, reg_u32_max(reg), reg_s32_max(reg)));
- } else if ((u32)reg_s32_max(reg) < reg_u32_min(reg)) {
- /*
- * 0 U32_MAX
- * | [xxxxxxxxxxxxxx u32 range xxxxxxxxxxxxxx] |
- * |----------------------------|----------------------------|
- * |xxxxxxxxx] [xxxxxxxxxxxx s32 range |
- * 0 S32_MAX S32_MIN -1
- */
- reg_set_srange32(reg, reg_s32_min(reg), (s32)reg_u32_max(reg));
- reg_set_urange32(reg,
- max_t(u32, reg_u32_min(reg), reg_s32_min(reg)),
- reg_u32_max(reg));
- }
- }
-}
-
-static void deduce_bounds_64_from_64(struct bpf_reg_state *reg)
-{
- /* If u64 range forms a valid s64 range (due to matching sign bit),
- * try to learn from that. Let's do a bit of ASCII art to see when
- * this is happening. Let's take u64 range first:
- *
- * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
- * |-------------------------------|--------------------------------|
- *
- * Valid u64 range is formed when umin and umax are anywhere in the
- * range [0, U64_MAX], and umin <= umax. u64 case is simple and
- * straightforward. Let's see how s64 range maps onto the same range
- * of values, annotated below the line for comparison:
- *
- * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
- * |-------------------------------|--------------------------------|
- * 0 S64_MAX S64_MIN -1
- *
- * So s64 values basically start in the middle and they are logically
- * contiguous to the right of it, wrapping around from -1 to 0, and
- * then finishing as S64_MAX (0x7fffffffffffffff) right before
- * S64_MIN. We can try drawing the continuity of u64 vs s64 values
- * more visually as mapped to sign-agnostic range of hex values.
- *
- * u64 start u64 end
- * _______________________________________________________________
- * / \
- * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
- * |-------------------------------|--------------------------------|
- * 0 S64_MAX S64_MIN -1
- * / \
- * >------------------------------ ------------------------------->
- * s64 continues... s64 end s64 start s64 "midpoint"
- *
- * What this means is that, in general, we can't always derive
- * something new about u64 from any random s64 range, and vice versa.
- *
- * But we can do that in two particular cases. One is when entire
- * u64/s64 range is *entirely* contained within left half of the above
- * diagram or when it is *entirely* contained in the right half. I.e.:
- *
- * |-------------------------------|--------------------------------|
- * ^ ^ ^ ^
- * A B C D
- *
- * [A, B] and [C, D] are contained entirely in their respective halves
- * and form valid contiguous ranges as both u64 and s64 values. [A, B]
- * will be non-negative both as u64 and s64 (and in fact it will be
- * identical ranges no matter the signedness). [C, D] treated as s64
- * will be a range of negative values, while in u64 it will be
- * non-negative range of values larger than 0x8000000000000000.
- *
- * Now, any other range here can't be represented in both u64 and s64
- * simultaneously. E.g., [A, C], [A, D], [B, C], [B, D] are valid
- * contiguous u64 ranges, but they are discontinuous in s64. [B, C]
- * in s64 would be properly presented as [S64_MIN, C] and [B, S64_MAX],
- * for example. Similarly, valid s64 range [D, A] (going from negative
- * to positive values), would be two separate [D, U64_MAX] and [0, A]
- * ranges as u64. Currently reg_state can't represent two segments per
- * numeric domain, so in such situations we can only derive maximal
- * possible range ([0, U64_MAX] for u64, and [S64_MIN, S64_MAX] for s64).
- *
- * So we use these facts to derive umin/umax from smin/smax and vice
- * versa only if they stay within the same "half". This is equivalent
- * to checking sign bit: lower half will have sign bit as zero, upper
- * half have sign bit 1. Below in code we simplify this by just
- * casting umin/umax as smin/smax and checking if they form valid
- * range, and vice versa. Those are equivalent checks.
- */
- if ((s64)reg_umin(reg) <= (s64)reg_umax(reg)) {
- reg_set_srange64(reg,
- max_t(s64, reg_smin(reg), reg_umin(reg)),
- min_t(s64, reg_smax(reg), reg_umax(reg)));
- }
- /* If we cannot cross the sign boundary, then signed and unsigned bounds
- * are the same, so combine. This works even in the negative case, e.g.
- * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
- */
- if ((u64)reg_smin(reg) <= (u64)reg_smax(reg)) {
- reg_set_urange64(reg,
- max_t(u64, reg_smin(reg), reg_umin(reg)),
- min_t(u64, reg_smax(reg), reg_umax(reg)));
- } else {
- /* If the s64 range crosses the sign boundary, then it's split
- * between the beginning and end of the U64 domain. In that
- * case, we can derive new bounds if the u64 range overlaps
- * with only one end of the s64 range.
- *
- * In the following example, the u64 range overlaps only with
- * positive portion of the s64 range.
- *
- * 0 U64_MAX
- * | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
- * |----------------------------|----------------------------|
- * |xxxxx s64 range xxxxxxxxx] [xxxxxxx|
- * 0 S64_MAX S64_MIN -1
- *
- * We can thus derive the following new s64 and u64 ranges.
- *
- * 0 U64_MAX
- * | [xxxxxx u64 range xxxxx] |
- * |----------------------------|----------------------------|
- * | [xxxxxx s64 range xxxxx] |
- * 0 S64_MAX S64_MIN -1
- *
- * If they overlap in two places, we can't derive anything
- * because reg_state can't represent two ranges per numeric
- * domain.
- *
- * 0 U64_MAX
- * | [xxxxxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxxxxx] |
- * |----------------------------|----------------------------|
- * |xxxxx s64 range xxxxxxxxx] [xxxxxxxxxx|
- * 0 S64_MAX S64_MIN -1
- *
- * The first condition below corresponds to the first diagram
- * above.
- */
- if (reg_umax(reg) < (u64)reg_smin(reg)) {
- reg_set_srange64(reg, (s64)reg_umin(reg), reg_smax(reg));
- reg_set_urange64(reg, reg_umin(reg), min_t(u64, reg_umax(reg), reg_smax(reg)));
- } else if ((u64)reg_smax(reg) < reg_umin(reg)) {
- /* This second condition considers the case where the u64 range
- * overlaps with the negative portion of the s64 range:
- *
- * 0 U64_MAX
- * | [xxxxxxxxxxxxxx u64 range xxxxxxxxxxxxxx] |
- * |----------------------------|----------------------------|
- * |xxxxxxxxx] [xxxxxxxxxxxx s64 range |
- * 0 S64_MAX S64_MIN -1
- */
- reg_set_srange64(reg, reg_smin(reg), (s64)reg_umax(reg));
- reg_set_urange64(reg, max_t(u64, reg_umin(reg), reg_smin(reg)), reg_umax(reg));
- }
- }
+ reg->r32 = cnum32_intersect(reg->r32, cnum32_from_cnum64(reg->r64));
}
static void deduce_bounds_64_from_32(struct bpf_reg_state *reg)
{
- /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit
- * values on both sides of 64-bit range in hope to have tighter range.
- * E.g., if r1 is [0x1'00000000, 0x3'80000000], and we learn from
- * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff].
- * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound
- * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of
- * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a
- * better overall bounds for r1 as [0x1'000000001; 0x3'7fffffff].
- * We just need to make sure that derived bounds we are intersecting
- * with are well-formed ranges in respective s64 or u64 domain, just
- * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments.
- */
- __u64 new_umin, new_umax;
- __s64 new_smin, new_smax;
-
- /* u32 -> u64 tightening, it's always well-formed */
- new_umin = (reg_umin(reg) & ~0xffffffffULL) | reg_u32_min(reg);
- new_umax = (reg_umax(reg) & ~0xffffffffULL) | reg_u32_max(reg);
- reg_set_urange64(reg,
- max_t(u64, reg_umin(reg), new_umin),
- min_t(u64, reg_umax(reg), new_umax));
- /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */
- new_smin = (reg_smin(reg) & ~0xffffffffULL) | reg_u32_min(reg);
- new_smax = (reg_smax(reg) & ~0xffffffffULL) | reg_u32_max(reg);
- reg_set_srange64(reg,
- max_t(s64, reg_smin(reg), new_smin),
- min_t(s64, reg_smax(reg), new_smax));
-
- /* Here we would like to handle a special case after sign extending load,
- * when upper bits for a 64-bit range are all 1s or all 0s.
- *
- * Upper bits are all 1s when register is in a range:
- * [0xffff_ffff_0000_0000, 0xffff_ffff_ffff_ffff]
- * Upper bits are all 0s when register is in a range:
- * [0x0000_0000_0000_0000, 0x0000_0000_ffff_ffff]
- * Together this forms are continuous range:
- * [0xffff_ffff_0000_0000, 0x0000_0000_ffff_ffff]
- *
- * Now, suppose that register range is in fact tighter:
- * [0xffff_ffff_8000_0000, 0x0000_0000_ffff_ffff] (R)
- * Also suppose that it's 32-bit range is positive,
- * meaning that lower 32-bits of the full 64-bit register
- * are in the range:
- * [0x0000_0000, 0x7fff_ffff] (W)
- *
- * If this happens, then any value in a range:
- * [0xffff_ffff_0000_0000, 0xffff_ffff_7fff_ffff]
- * is smaller than a lowest bound of the range (R):
- * 0xffff_ffff_8000_0000
- * which means that upper bits of the full 64-bit register
- * can't be all 1s, when lower bits are in range (W).
- *
- * Note that:
- * - 0xffff_ffff_8000_0000 == (s64)S32_MIN
- * - 0x0000_0000_7fff_ffff == (s64)S32_MAX
- * These relations are used in the conditions below.
- */
- if (reg_s32_min(reg) >= 0 && reg_smin(reg) >= S32_MIN && reg_smax(reg) <= S32_MAX) {
- reg_set_srange64(reg, reg_s32_min(reg), reg_s32_max(reg));
- reg_set_urange64(reg, reg_s32_min(reg), reg_s32_max(reg));
- reg->var_off = tnum_intersect(reg->var_off,
- tnum_range(reg_smin(reg), reg_smax(reg)));
- }
+ reg->r64 = cnum64_cnum32_intersect(reg->r64, reg->r32);
}
static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{
- deduce_bounds_64_from_64(reg);
deduce_bounds_32_from_64(reg);
- deduce_bounds_32_from_32(reg);
deduce_bounds_64_from_32(reg);
}
@@ -2356,35 +2031,25 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
__update_reg_bounds(reg);
}
-static bool range_bounds_violation(struct bpf_reg_state *reg)
-{
- return (reg_umin(reg) > reg_umax(reg) || reg_smin(reg) > reg_smax(reg) ||
- reg_u32_min(reg) > reg_u32_max(reg) ||
- reg_s32_min(reg) > reg_s32_max(reg));
-}
-
static bool const_tnum_range_mismatch(struct bpf_reg_state *reg)
{
- u64 uval = reg->var_off.value;
- s64 sval = (s64)uval;
-
if (!tnum_is_const(reg->var_off))
return false;
- return reg_umin(reg) != uval || reg_umax(reg) != uval ||
- reg_smin(reg) != sval || reg_smax(reg) != sval;
+ return !cnum64_is_const(reg->r64) || reg->r64.base != reg->var_off.value;
}
static bool const_tnum_range_mismatch_32(struct bpf_reg_state *reg)
{
- u32 uval32 = tnum_subreg(reg->var_off).value;
- s32 sval32 = (s32)uval32;
-
if (!tnum_subreg_is_const(reg->var_off))
return false;
- return reg_u32_min(reg) != uval32 || reg_u32_max(reg) != uval32 ||
- reg_s32_min(reg) != sval32 || reg_s32_max(reg) != sval32;
+ return !cnum32_is_const(reg->r32) || reg->r32.base != tnum_subreg(reg->var_off).value;
+}
+
+static bool range_bounds_violation(struct bpf_reg_state *reg)
+{
+ return cnum32_is_empty(reg->r32) || cnum64_is_empty(reg->r64);
}
static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
@@ -2409,12 +2074,11 @@ static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
return 0;
out:
- verifier_bug(env, "REG INVARIANTS VIOLATION (%s): %s u64=[%#llx, %#llx] "
- "s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)",
- ctx, msg, reg_umin(reg), reg_umax(reg),
- reg_smin(reg), reg_smax(reg),
- reg_u32_min(reg), reg_u32_max(reg),
- reg_s32_min(reg), reg_s32_max(reg),
+ verifier_bug(env, "REG INVARIANTS VIOLATION (%s): %s r64={.base=%#llx, .size=%#llx} "
+ "r32={.base=%#x, .size=%#x} var_off=(%#llx, %#llx)",
+ ctx, msg,
+ reg->r64.base, reg->r64.size,
+ reg->r32.base, reg->r32.size,
reg->var_off.value, reg->var_off.mask);
if (env->test_reg_invariants)
return -EFAULT;
@@ -2422,26 +2086,6 @@ static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
return 0;
}
-static bool __reg32_bound_s64(s32 a)
-{
- return a >= 0 && a <= S32_MAX;
-}
-
-static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
-{
- reg_set_urange64(reg, reg_u32_min(reg), reg_u32_max(reg));
-
- /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
- * be positive otherwise set to worse case bounds and refine later
- * from tnum.
- */
- if (__reg32_bound_s64(reg_s32_min(reg)) &&
- __reg32_bound_s64(reg_s32_max(reg)))
- reg_set_srange64(reg, reg_s32_min(reg), reg_s32_max(reg));
- else
- reg_set_srange64(reg, 0, U32_MAX);
-}
-
/* Mark a register as having a completely unknown (scalar) value. */
void bpf_mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
{
@@ -5605,7 +5249,7 @@ static int check_buffer_access(struct bpf_verifier_env *env,
static void zext_32_to_64(struct bpf_reg_state *reg)
{
reg->var_off = tnum_subreg(reg->var_off);
- __reg_assign_32_into_64(reg);
+ reg_set_urange64(reg, reg_u32_min(reg), reg_u32_max(reg));
}
/* truncate register to smaller size (in bytes)
@@ -5620,12 +5264,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
/* fix arithmetic bounds */
mask = ((u64)1 << (size * 8)) - 1;
- if ((reg_umin(reg) & ~mask) == (reg_umax(reg) & ~mask)) {
+ if ((reg_umin(reg) & ~mask) == (reg_umax(reg) & ~mask))
reg_set_urange64(reg, reg_umin(reg) & mask, reg_umax(reg) & mask);
- } else {
+ else
reg_set_urange64(reg, 0, mask);
- }
- reg_set_srange64(reg, reg_umin(reg), reg_umax(reg));
/* If size is smaller than 32bit register the 32bit register
* values are also truncated so we push 64-bit bounds into
@@ -5650,8 +5292,6 @@ static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
reg_set_srange64(reg, S32_MIN, S32_MAX);
reg_set_srange32(reg, S32_MIN, S32_MAX);
}
- reg_set_urange64(reg, 0, U64_MAX);
- reg_set_urange32(reg, 0, U32_MAX);
reg->var_off = tnum_unknown;
}
@@ -5672,10 +5312,8 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
reg->var_off = tnum_const((s32)u64_cval);
u64_cval = reg->var_off.value;
- reg_set_srange64(reg, u64_cval, u64_cval);
- reg_set_urange64(reg, u64_cval, u64_cval);
- reg_set_srange32(reg, u64_cval, u64_cval);
- reg_set_urange32(reg, u64_cval, u64_cval);
+ reg->r64 = cnum64_from_urange(u64_cval, u64_cval);
+ reg->r32 = cnum32_from_urange((u32)u64_cval, (u32)u64_cval);
return;
}
@@ -5703,9 +5341,7 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
/* both of s64_max/s64_min positive or negative */
if ((s64_max >= 0) == (s64_min >= 0)) {
reg_set_srange64(reg, s64_min, s64_max);
- reg_set_urange64(reg, s64_min, s64_max);
reg_set_srange32(reg, s64_min, s64_max);
- reg_set_urange32(reg, s64_min, s64_max);
reg->var_off = tnum_range(s64_min, s64_max);
return;
}
@@ -5721,7 +5357,6 @@ static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
else
/* size == 2 */
reg_set_srange32(reg, S16_MIN, S16_MAX);
- reg_set_urange32(reg, 0, U32_MAX);
reg->var_off = tnum_subreg(tnum_unknown);
}
@@ -5740,7 +5375,6 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
u32_val = reg->var_off.value;
reg_set_srange32(reg, u32_val, u32_val);
- reg_set_urange32(reg, u32_val, u32_val);
return;
}
@@ -5764,7 +5398,6 @@ static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
if ((s32_min >= 0) == (s32_max >= 0)) {
reg_set_srange32(reg, s32_min, s32_max);
- reg_set_urange32(reg, (u32)s32_min, (u32)s32_max);
reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max));
return;
}
@@ -9914,8 +9547,6 @@ static int do_refine_retval_range(struct bpf_verifier_env *env,
case BPF_FUNC_get_smp_processor_id:
reg_set_urange64(ret_reg, 0, nr_cpu_ids - 1);
reg_set_urange32(ret_reg, 0, nr_cpu_ids - 1);
- reg_set_srange64(ret_reg, 0, nr_cpu_ids - 1);
- reg_set_srange32(ret_reg, 0, nr_cpu_ids - 1);
reg_bounds_sync(ret_reg);
break;
}
@@ -13683,10 +13314,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *dst_reg;
bool known = tnum_is_const(off_reg->var_off);
- s64 smin_val = reg_smin(off_reg), smax_val = reg_smax(off_reg),
- smin_ptr = reg_smin(ptr_reg), smax_ptr = reg_smax(ptr_reg);
- u64 umin_val = reg_umin(off_reg), umax_val = reg_umax(off_reg),
- umin_ptr = reg_umin(ptr_reg), umax_ptr = reg_umax(ptr_reg);
+ s64 smin_val = reg_smin(off_reg), smax_val = reg_smax(off_reg);
+ u64 umin_val = reg_umin(off_reg), umax_val = reg_umax(off_reg);
struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
@@ -13788,23 +13417,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
* added into the variable offset, and we copy the fixed offset
* from ptr_reg.
*/
- {
- s64 smin_res, smax_res;
- u64 umin_res, umax_res;
-
- if (check_add_overflow(smin_ptr, smin_val, &smin_res) ||
- check_add_overflow(smax_ptr, smax_val, &smax_res)) {
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
- } else {
- reg_set_srange64(dst_reg, smin_res, smax_res);
- }
- if (check_add_overflow(umin_ptr, umin_val, &umin_res) ||
- check_add_overflow(umax_ptr, umax_val, &umax_res)) {
- reg_set_urange64(dst_reg, 0, U64_MAX);
- } else {
- reg_set_urange64(dst_reg, umin_res, umax_res);
- }
- }
+ dst_reg->r64 = cnum64_add(ptr_reg->r64, off_reg->r64);
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
@@ -13836,27 +13449,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst);
return -EACCES;
}
- /* A new variable offset is created. If the subtrahend is known
- * nonnegative, then any reg->range we had before is still good.
- */
- {
- s64 smin_res, smax_res;
-
- if (check_sub_overflow(smin_ptr, smax_val, &smin_res) ||
- check_sub_overflow(smax_ptr, smin_val, &smax_res)) {
- /* Overflow possible, we know nothing */
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
- } else {
- reg_set_srange64(dst_reg, smin_res, smax_res);
- }
- }
- if (umin_ptr < umax_val) {
- /* Overflow possible, we know nothing */
- reg_set_urange64(dst_reg, 0, U64_MAX);
- } else {
- /* Cannot overflow (as long as bounds are consistent) */
- reg_set_urange64(dst_reg, umin_ptr - umax_val, umax_ptr - umin_val);
- }
+ dst_reg->r64 = cnum64_add(ptr_reg->r64, cnum64_negate(off_reg->r64));
dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
dst_reg->raw = ptr_reg->raw;
if (reg_is_pkt_pointer(ptr_reg)) {
@@ -13913,201 +13506,37 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 smin = reg_s32_min(dst_reg);
- s32 smax = reg_s32_max(dst_reg);
- u32 umin = reg_u32_min(dst_reg);
- u32 umax = reg_u32_max(dst_reg);
- u32 umin_val = reg_u32_min(src_reg);
- u32 umax_val = reg_u32_max(src_reg);
- bool min_overflow, max_overflow;
-
- if (check_add_overflow(smin, reg_s32_min(src_reg), &smin) ||
- check_add_overflow(smax, reg_s32_max(src_reg), &smax)) {
- smin = S32_MIN;
- smax = S32_MAX;
- }
-
- /* If either all additions overflow or no additions overflow, then
- * it is okay to set: dst_umin = dst_umin + src_umin, dst_umax =
- * dst_umax + src_umax. Otherwise (some additions overflow), set
- * the output bounds to unbounded.
- */
- min_overflow = check_add_overflow(umin, umin_val, &umin);
- max_overflow = check_add_overflow(umax, umax_val, &umax);
-
- if (!min_overflow && max_overflow) {
- umin = 0;
- umax = U32_MAX;
- }
-
- reg_set_srange32(dst_reg, smin, smax);
- reg_set_urange32(dst_reg, umin, umax);
+ dst_reg->r32 = cnum32_add(dst_reg->r32, src_reg->r32);
}
static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 smin = reg_smin(dst_reg);
- s64 smax = reg_smax(dst_reg);
- u64 umin = reg_umin(dst_reg);
- u64 umax = reg_umax(dst_reg);
- u64 umin_val = reg_umin(src_reg);
- u64 umax_val = reg_umax(src_reg);
- bool min_overflow, max_overflow;
-
- if (check_add_overflow(smin, reg_smin(src_reg), &smin) ||
- check_add_overflow(smax, reg_smax(src_reg), &smax)) {
- smin = S64_MIN;
- smax = S64_MAX;
- }
-
- /* If either all additions overflow or no additions overflow, then
- * it is okay to set: dst_umin = dst_umin + src_umin, dst_umax =
- * dst_umax + src_umax. Otherwise (some additions overflow), set
- * the output bounds to unbounded.
- */
- min_overflow = check_add_overflow(umin, umin_val, &umin);
- max_overflow = check_add_overflow(umax, umax_val, &umax);
-
- if (!min_overflow && max_overflow) {
- umin = 0;
- umax = U64_MAX;
- }
-
- reg_set_srange64(dst_reg, smin, smax);
- reg_set_urange64(dst_reg, umin, umax);
+ dst_reg->r64 = cnum64_add(dst_reg->r64, src_reg->r64);
}
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 smin = reg_s32_min(dst_reg);
- s32 smax = reg_s32_max(dst_reg);
- u32 umin = reg_u32_min(dst_reg);
- u32 umax = reg_u32_max(dst_reg);
- u32 umin_val = reg_u32_min(src_reg);
- u32 umax_val = reg_u32_max(src_reg);
- bool min_underflow, max_underflow;
-
- if (check_sub_overflow(smin, reg_s32_max(src_reg), &smin) ||
- check_sub_overflow(smax, reg_s32_min(src_reg), &smax)) {
- /* Overflow possible, we know nothing */
- smin = S32_MIN;
- smax = S32_MAX;
- }
-
- /* If either all subtractions underflow or no subtractions
- * underflow, it is okay to set: dst_umin = dst_umin - src_umax,
- * dst_umax = dst_umax - src_umin. Otherwise (some subtractions
- * underflow), set the output bounds to unbounded.
- */
- min_underflow = check_sub_overflow(umin, umax_val, &umin);
- max_underflow = check_sub_overflow(umax, umin_val, &umax);
-
- if (min_underflow && !max_underflow) {
- umin = 0;
- umax = U32_MAX;
- }
-
- reg_set_srange32(dst_reg, smin, smax);
- reg_set_urange32(dst_reg, umin, umax);
+ dst_reg->r32 = cnum32_add(dst_reg->r32, cnum32_negate(src_reg->r32));
}
static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 smin = reg_smin(dst_reg);
- s64 smax = reg_smax(dst_reg);
- u64 umin = reg_umin(dst_reg);
- u64 umax = reg_umax(dst_reg);
- u64 umin_val = reg_umin(src_reg);
- u64 umax_val = reg_umax(src_reg);
- bool min_underflow, max_underflow;
-
- if (check_sub_overflow(smin, reg_smax(src_reg), &smin) ||
- check_sub_overflow(smax, reg_smin(src_reg), &smax)) {
- /* Overflow possible, we know nothing */
- smin = S64_MIN;
- smax = S64_MAX;
- }
-
- /* If either all subtractions underflow or no subtractions
- * underflow, it is okay to set: dst_umin = dst_umin - src_umax,
- * dst_umax = dst_umax - src_umin. Otherwise (some subtractions
- * underflow), set the output bounds to unbounded.
- */
- min_underflow = check_sub_overflow(umin, umax_val, &umin);
- max_underflow = check_sub_overflow(umax, umin_val, &umax);
-
- if (min_underflow && !max_underflow) {
- umin = 0;
- umax = U64_MAX;
- }
-
- reg_set_srange64(dst_reg, smin, smax);
- reg_set_urange64(dst_reg, umin, umax);
+ dst_reg->r64 = cnum64_add(dst_reg->r64, cnum64_negate(src_reg->r64));
}
static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s32 smin = reg_s32_min(dst_reg);
- s32 smax = reg_s32_max(dst_reg);
- u32 umin = reg_u32_min(dst_reg);
- u32 umax = reg_u32_max(dst_reg);
- s32 tmp_prod[4];
-
- if (check_mul_overflow(umax, reg_u32_max(src_reg), &umax) ||
- check_mul_overflow(umin, reg_u32_min(src_reg), &umin)) {
- /* Overflow possible, we know nothing */
- umin = 0;
- umax = U32_MAX;
- }
- if (check_mul_overflow(smin, reg_s32_min(src_reg), &tmp_prod[0]) ||
- check_mul_overflow(smin, reg_s32_max(src_reg), &tmp_prod[1]) ||
- check_mul_overflow(smax, reg_s32_min(src_reg), &tmp_prod[2]) ||
- check_mul_overflow(smax, reg_s32_max(src_reg), &tmp_prod[3])) {
- /* Overflow possible, we know nothing */
- smin = S32_MIN;
- smax = S32_MAX;
- } else {
- smin = min_array(tmp_prod, 4);
- smax = max_array(tmp_prod, 4);
- }
-
- reg_set_srange32(dst_reg, smin, smax);
- reg_set_urange32(dst_reg, umin, umax);
+ dst_reg->r32 = cnum32_mul(dst_reg->r32, src_reg->r32);
}
static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
- s64 smin = reg_smin(dst_reg);
- s64 smax = reg_smax(dst_reg);
- u64 umin = reg_umin(dst_reg);
- u64 umax = reg_umax(dst_reg);
- s64 tmp_prod[4];
-
- if (check_mul_overflow(umax, reg_umax(src_reg), &umax) ||
- check_mul_overflow(umin, reg_umin(src_reg), &umin)) {
- /* Overflow possible, we know nothing */
- umin = 0;
- umax = U64_MAX;
- }
- if (check_mul_overflow(smin, reg_smin(src_reg), &tmp_prod[0]) ||
- check_mul_overflow(smin, reg_smax(src_reg), &tmp_prod[1]) ||
- check_mul_overflow(smax, reg_smin(src_reg), &tmp_prod[2]) ||
- check_mul_overflow(smax, reg_smax(src_reg), &tmp_prod[3])) {
- /* Overflow possible, we know nothing */
- smin = S64_MIN;
- smax = S64_MAX;
- } else {
- smin = min_array(tmp_prod, 4);
- smax = max_array(tmp_prod, 4);
- }
-
- reg_set_srange64(dst_reg, smin, smax);
- reg_set_urange64(dst_reg, umin, umax);
+ dst_reg->r64 = cnum64_mul(dst_reg->r64, src_reg->r64);
}
static void scalar32_min_max_udiv(struct bpf_reg_state *dst_reg,
@@ -14119,7 +13548,6 @@ static void scalar32_min_max_udiv(struct bpf_reg_state *dst_reg,
reg_u32_max(dst_reg) / src_val);
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
reset_reg64_and_tnum(dst_reg);
}
@@ -14132,7 +13560,6 @@ static void scalar_min_max_udiv(struct bpf_reg_state *dst_reg,
div64_u64(reg_umax(dst_reg), src_val));
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
reset_reg32_and_tnum(dst_reg);
}
@@ -14169,7 +13596,6 @@ static void scalar32_min_max_sdiv(struct bpf_reg_state *dst_reg,
reset:
reg_set_srange32(dst_reg, smin, smax);
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_urange32(dst_reg, 0, U32_MAX);
reset_reg64_and_tnum(dst_reg);
}
@@ -14206,7 +13632,6 @@ static void scalar_min_max_sdiv(struct bpf_reg_state *dst_reg,
reset:
reg_set_srange64(dst_reg, smin, smax);
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_urange64(dst_reg, 0, U64_MAX);
reset_reg32_and_tnum(dst_reg);
}
@@ -14226,7 +13651,6 @@ static void scalar32_min_max_umod(struct bpf_reg_state *dst_reg,
reg_set_urange32(dst_reg, 0, min(reg_u32_max(dst_reg), res_max));
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
reset_reg64_and_tnum(dst_reg);
}
@@ -14246,7 +13670,6 @@ static void scalar_min_max_umod(struct bpf_reg_state *dst_reg,
reg_set_urange64(dst_reg, 0, min(reg_umax(dst_reg), res_max));
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
reset_reg32_and_tnum(dst_reg);
}
@@ -14286,7 +13709,6 @@ static void scalar32_min_max_smod(struct bpf_reg_state *dst_reg,
}
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_urange32(dst_reg, 0, U32_MAX);
reset_reg64_and_tnum(dst_reg);
}
@@ -14326,7 +13748,6 @@ static void scalar_min_max_smod(struct bpf_reg_state *dst_reg,
}
/* Reset other ranges/tnum to unbounded/unknown. */
- reg_set_urange64(dst_reg, 0, U64_MAX);
reset_reg32_and_tnum(dst_reg);
}
@@ -14346,15 +13767,9 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
- reg_set_urange32(dst_reg, var32_off.value, min(reg_u32_max(dst_reg), umax_val));
-
- /* Safe to set s32 bounds by casting u32 result into s32 when u32
- * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
- */
- if ((s32)reg_u32_min(dst_reg) <= (s32)reg_u32_max(dst_reg))
- reg_set_srange32(dst_reg, reg_u32_min(dst_reg), reg_u32_max(dst_reg));
- else
- reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
+ reg_set_urange32(dst_reg,
+ var32_off.value,
+ min(reg_u32_max(dst_reg), umax_val));
}
static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
@@ -14372,15 +13787,10 @@ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
- reg_set_urange64(dst_reg, dst_reg->var_off.value, min(reg_umax(dst_reg), umax_val));
+ reg_set_urange64(dst_reg,
+ dst_reg->var_off.value,
+ min(reg_umax(dst_reg), umax_val));
- /* Safe to set s64 bounds by casting u64 result into s64 when u64
- * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
- */
- if ((s64)reg_umin(dst_reg) <= (s64)reg_umax(dst_reg))
- reg_set_srange64(dst_reg, reg_umin(dst_reg), reg_umax(dst_reg));
- else
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
}
@@ -14401,16 +13811,9 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
- reg_set_urange32(dst_reg, max(reg_u32_min(dst_reg), umin_val),
+ reg_set_urange32(dst_reg,
+ max(reg_u32_min(dst_reg), umin_val),
var32_off.value | var32_off.mask);
-
- /* Safe to set s32 bounds by casting u32 result into s32 when u32
- * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
- */
- if ((s32)reg_u32_min(dst_reg) <= (s32)reg_u32_max(dst_reg))
- reg_set_srange32(dst_reg, reg_u32_min(dst_reg), reg_u32_max(dst_reg));
- else
- reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
}
static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
@@ -14428,16 +13831,10 @@ static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
- reg_set_urange64(dst_reg, max(reg_umin(dst_reg), umin_val),
+ reg_set_urange64(dst_reg,
+ max(reg_umin(dst_reg), umin_val),
dst_reg->var_off.value | dst_reg->var_off.mask);
- /* Safe to set s64 bounds by casting u64 result into s64 when u64
- * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
- */
- if ((s64)reg_umin(dst_reg) <= (s64)reg_umax(dst_reg))
- reg_set_srange64(dst_reg, reg_umin(dst_reg), reg_umax(dst_reg));
- else
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
}
@@ -14456,14 +13853,6 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
/* We get both minimum and maximum from the var32_off. */
reg_set_urange32(dst_reg, var32_off.value, var32_off.value | var32_off.mask);
-
- /* Safe to set s32 bounds by casting u32 result into s32 when u32
- * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded.
- */
- if ((s32)reg_u32_min(dst_reg) <= (s32)reg_u32_max(dst_reg))
- reg_set_srange32(dst_reg, reg_u32_min(dst_reg), reg_u32_max(dst_reg));
- else
- reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
}
static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
@@ -14479,31 +13868,21 @@ static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
}
/* We get both minimum and maximum from the var_off. */
- reg_set_urange64(dst_reg, dst_reg->var_off.value,
+ reg_set_urange64(dst_reg,
+ dst_reg->var_off.value,
dst_reg->var_off.value | dst_reg->var_off.mask);
-
- /* Safe to set s64 bounds by casting u64 result into s64 when u64
- * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded.
- */
- if ((s64)reg_umin(dst_reg) <= (s64)reg_umax(dst_reg))
- reg_set_srange64(dst_reg, reg_umin(dst_reg), reg_umax(dst_reg));
- else
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
-
- __update_reg_bounds(dst_reg);
}
static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
u64 umin_val, u64 umax_val)
{
- /* We lose all sign bit information (except what we can pick
- * up from var_off)
- */
- reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
/* If we might shift our top bit out, then we know nothing */
if (umax_val > 31 || reg_u32_max(dst_reg) > 1ULL << (31 - umax_val))
reg_set_urange32(dst_reg, 0, U32_MAX);
else
+ /* We lose all sign bit information (except what we can pick
+ * up from var_off)
+ */
reg_set_urange32(dst_reg, reg_u32_min(dst_reg) << umin_val,
reg_u32_max(dst_reg) << umax_val);
}
@@ -14529,23 +13908,27 @@ static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
u64 umin_val, u64 umax_val)
{
+ struct cnum64 u, s;
+
/* Special case <<32 because it is a common compiler pattern to sign
* extend subreg by doing <<32 s>>32. smin/smax assignments are correct
* because s32 bounds don't flip sign when shifting to the left by
* 32bits.
*/
if (umin_val == 32 && umax_val == 32)
- reg_set_srange64(dst_reg, (s64)reg_s32_min(dst_reg) << 32,
- (s64)reg_s32_max(dst_reg) << 32);
+ s = cnum64_from_srange((s64)reg_s32_min(dst_reg) << 32,
+ (s64)reg_s32_max(dst_reg) << 32);
else
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
+ s = CNUM64_UNBOUNDED;
/* If we might shift our top bit out, then we know nothing */
if (reg_umax(dst_reg) > 1ULL << (63 - umax_val))
- reg_set_urange64(dst_reg, 0, U64_MAX);
+ u = CNUM64_UNBOUNDED;
else
- reg_set_urange64(dst_reg, reg_umin(dst_reg) << umin_val,
- reg_umax(dst_reg) << umax_val);
+ u = cnum64_from_urange(reg_umin(dst_reg) << umin_val,
+ reg_umax(dst_reg) << umax_val);
+
+ dst_reg->r64 = cnum64_intersect(u, s);
}
static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
@@ -14584,7 +13967,6 @@ static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
- reg_set_srange32(dst_reg, S32_MIN, S32_MAX);
dst_reg->var_off = tnum_rshift(subreg, umin_val);
reg_set_urange32(dst_reg, reg_u32_min(dst_reg) >> umax_val,
@@ -14614,7 +13996,6 @@ static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
- reg_set_srange64(dst_reg, S64_MIN, S64_MAX);
dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
reg_set_urange64(dst_reg, reg_umin(dst_reg) >> umax_val,
reg_umax(dst_reg) >> umin_val);
@@ -14634,6 +14015,8 @@ static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
/* Upon reaching here, src_known is true and
* umax_val is equal to umin_val.
+ * Blow away the dst_reg umin_value/umax_value and rely on
+ * dst_reg var_off to refine the result.
*/
reg_set_srange32(dst_reg,
(u32)(((s32)reg_s32_min(dst_reg)) >> umin_val),
@@ -14641,11 +14024,6 @@ static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
- /* blow away the dst_reg umin_value/umax_value and rely on
- * dst_reg var_off to refine the result.
- */
- reg_set_urange32(dst_reg, 0, U32_MAX);
-
__mark_reg64_unbounded(dst_reg);
__update_reg32_bounds(dst_reg);
}
@@ -14663,11 +14041,6 @@ static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
- /* blow away the dst_reg umin_value/umax_value and rely on
- * dst_reg var_off to refine the result.
- */
- reg_set_urange64(dst_reg, 0, U64_MAX);
-
/* Its not easy to operate on alu32 bounds here because it depends
* on bits being shifted in from upper 32-bits. Take easy way out
* and mark unbounded so we can recalculate later from tnum.
@@ -15737,6 +15110,8 @@ static u8 rev_opcode(u8 opcode)
static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
u8 opcode, bool is_jmp32)
{
+ struct cnum32 lo32, hi32;
+ struct cnum64 lo, hi;
struct tnum t;
u64 val;
@@ -15756,23 +15131,15 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
switch (opcode) {
case BPF_JEQ:
if (is_jmp32) {
- reg_set_urange32(reg1, max(reg_u32_min(reg1), reg_u32_min(reg2)),
- min(reg_u32_max(reg1), reg_u32_max(reg2)));
- reg_set_srange32(reg1, max(reg_s32_min(reg1), reg_s32_min(reg2)),
- min(reg_s32_max(reg1), reg_s32_max(reg2)));
- reg_set_urange32(reg2, reg_u32_min(reg1), reg_u32_max(reg1));
- reg_set_srange32(reg2, reg_s32_min(reg1), reg_s32_max(reg1));
+ reg1->r32 = cnum32_intersect(reg1->r32, reg2->r32);
+ reg2->r32 = reg1->r32;
t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off));
reg1->var_off = tnum_with_subreg(reg1->var_off, t);
reg2->var_off = tnum_with_subreg(reg2->var_off, t);
} else {
- reg_set_urange64(reg1, max(reg_umin(reg1), reg_umin(reg2)),
- min(reg_umax(reg1), reg_umax(reg2)));
- reg_set_srange64(reg1, max(reg_smin(reg1), reg_smin(reg2)),
- min(reg_smax(reg1), reg_smax(reg2)));
- reg_set_urange64(reg2, reg_umin(reg1), reg_umax(reg1));
- reg_set_srange64(reg2, reg_smin(reg1), reg_smax(reg1));
+ reg1->r64 = cnum64_intersect(reg1->r64, reg2->r64);
+ reg2->r64 = reg1->r64;
reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off);
reg2->var_off = reg1->var_off;
@@ -15789,32 +15156,13 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
*/
val = reg_const_value(reg2, is_jmp32);
if (is_jmp32) {
- /* u32_min is not equal to 0xffffffff at this point,
- * because otherwise u32_max is 0xffffffff as well,
- * in such a case both reg1 and reg2 would be constants,
- * jump would be predicted and regs_refine_cond_op()
- * wouldn't be called.
- *
- * Same reasoning works for all {u,s}{min,max}{32,64} cases
- * below.
- */
- if (reg_u32_min(reg1) == (u32)val)
- reg_set_urange32(reg1, reg_u32_min(reg1) + 1, reg_u32_max(reg1));
- if (reg_u32_max(reg1) == (u32)val)
- reg_set_urange32(reg1, reg_u32_min(reg1), reg_u32_max(reg1) - 1);
- if (reg_s32_min(reg1) == (s32)val)
- reg_set_srange32(reg1, reg_s32_min(reg1) + 1, reg_s32_max(reg1));
- if (reg_s32_max(reg1) == (s32)val)
- reg_set_srange32(reg1, reg_s32_min(reg1), reg_s32_max(reg1) - 1);
+ /* Complement of the range [val, val] as cnum32. */
+ lo32 = (struct cnum32){ val + 1, U32_MAX - 1 };
+ reg1->r32 = cnum32_intersect(reg1->r32, lo32);
} else {
- if (reg_umin(reg1) == (u64)val)
- reg_set_urange64(reg1, reg_umin(reg1) + 1, reg_umax(reg1));
- if (reg_umax(reg1) == (u64)val)
- reg_set_urange64(reg1, reg_umin(reg1), reg_umax(reg1) - 1);
- if (reg_smin(reg1) == (s64)val)
- reg_set_srange64(reg1, reg_smin(reg1) + 1, reg_smax(reg1));
- if (reg_smax(reg1) == (s64)val)
- reg_set_srange64(reg1, reg_smin(reg1), reg_smax(reg1) - 1);
+ /* Complement of the range [val, val] as cnum64. */
+ lo = (struct cnum64){ val + 1, U64_MAX - 1 };
+ reg1->r64 = cnum64_intersect(reg1->r64, lo);
}
break;
case BPF_JSET:
@@ -15861,38 +15209,54 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
break;
case BPF_JLE:
if (is_jmp32) {
- reg_set_urange32(reg1, reg_u32_min(reg1), min(reg_u32_max(reg1), reg_u32_max(reg2)));
- reg_set_urange32(reg2, max(reg_u32_min(reg1), reg_u32_min(reg2)), reg_u32_max(reg2));
+ lo32 = cnum32_from_urange(0, reg_u32_max(reg2));
+ hi32 = cnum32_from_urange(reg_u32_min(reg1), U32_MAX);
+ reg1->r32 = cnum32_intersect(reg1->r32, lo32);
+ reg2->r32 = cnum32_intersect(reg2->r32, hi32);
} else {
- reg_set_urange64(reg1, reg_umin(reg1), min(reg_umax(reg1), reg_umax(reg2)));
- reg_set_urange64(reg2, max(reg_umin(reg1), reg_umin(reg2)), reg_umax(reg2));
+ lo = cnum64_from_urange(0, reg_umax(reg2));
+ hi = cnum64_from_urange(reg_umin(reg1), U64_MAX);
+ reg1->r64 = cnum64_intersect(reg1->r64, lo);
+ reg2->r64 = cnum64_intersect(reg2->r64, hi);
}
break;
case BPF_JLT:
if (is_jmp32) {
- reg_set_urange32(reg1, reg_u32_min(reg1), min(reg_u32_max(reg1), reg_u32_max(reg2) - 1));
- reg_set_urange32(reg2, max(reg_u32_min(reg1) + 1, reg_u32_min(reg2)), reg_u32_max(reg2));
+ lo32 = cnum32_from_urange(0, reg_u32_max(reg2) - 1);
+ hi32 = cnum32_from_urange(reg_u32_min(reg1) + 1, U32_MAX);
+ reg1->r32 = cnum32_intersect(reg1->r32, lo32);
+ reg2->r32 = cnum32_intersect(reg2->r32, hi32);
} else {
- reg_set_urange64(reg1, reg_umin(reg1), min(reg_umax(reg1), reg_umax(reg2) - 1));
- reg_set_urange64(reg2, max(reg_umin(reg1) + 1, reg_umin(reg2)), reg_umax(reg2));
+ lo = cnum64_from_urange(0, reg_umax(reg2) - 1);
+ hi = cnum64_from_urange(reg_umin(reg1) + 1, U64_MAX);
+ reg1->r64 = cnum64_intersect(reg1->r64, lo);
+ reg2->r64 = cnum64_intersect(reg2->r64, hi);
}
break;
case BPF_JSLE:
if (is_jmp32) {
- reg_set_srange32(reg1, reg_s32_min(reg1), min(reg_s32_max(reg1), reg_s32_max(reg2)));
- reg_set_srange32(reg2, max(reg_s32_min(reg1), reg_s32_min(reg2)), reg_s32_max(reg2));
+ lo32 = cnum32_from_srange(S32_MIN, reg_s32_max(reg2));
+ hi32 = cnum32_from_srange(reg_s32_min(reg1), S32_MAX);
+ reg1->r32 = cnum32_intersect(reg1->r32, lo32);
+ reg2->r32 = cnum32_intersect(reg2->r32, hi32);
} else {
- reg_set_srange64(reg1, reg_smin(reg1), min(reg_smax(reg1), reg_smax(reg2)));
- reg_set_srange64(reg2, max(reg_smin(reg1), reg_smin(reg2)), reg_smax(reg2));
+ lo = cnum64_from_srange(S64_MIN, reg_smax(reg2));
+ hi = cnum64_from_srange(reg_smin(reg1), S64_MAX);
+ reg1->r64 = cnum64_intersect(reg1->r64, lo);
+ reg2->r64 = cnum64_intersect(reg2->r64, hi);
}
break;
case BPF_JSLT:
if (is_jmp32) {
- reg_set_srange32(reg1, reg_s32_min(reg1), min(reg_s32_max(reg1), reg_s32_max(reg2) - 1));
- reg_set_srange32(reg2, max(reg_s32_min(reg1) + 1, reg_s32_min(reg2)), reg_s32_max(reg2));
+ lo32 = cnum32_from_urange(S32_MIN, reg_s32_max(reg2) - 1);
+ hi32 = cnum32_from_urange(reg_s32_min(reg1) + 1, S32_MAX);
+ reg1->r32 = cnum32_intersect(reg1->r32, lo32);
+ reg2->r32 = cnum32_intersect(reg2->r32, hi32);
} else {
- reg_set_srange64(reg1, reg_smin(reg1), min(reg_smax(reg1), reg_smax(reg2) - 1));
- reg_set_srange64(reg2, max(reg_smin(reg1) + 1, reg_smin(reg2)), reg_smax(reg2));
+ lo = cnum64_from_urange(S64_MIN, reg_smax(reg2) - 1);
+ hi = cnum64_from_urange(reg_smin(reg1) + 1, S64_MAX);
+ reg1->r64 = cnum64_intersect(reg1->r64, lo);
+ reg2->r64 = cnum64_intersect(reg2->r64, hi);
}
break;
default:
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index 71f5240cc5b7..7f170a69d1d8 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -478,6 +478,52 @@ static struct range range_refine_in_halves(enum num_t x_t, struct range x,
}
+static __always_inline u64 next_u32_block(u64 x) { return x + (1ULL << 32); }
+static __always_inline u64 prev_u32_block(u64 x) { return x - (1ULL << 32); }
+
+/* Is v within the circular u64 range [base, base + len]? */
+static __always_inline bool u64_range_contains(u64 v, u64 base, u64 len)
+{
+ return v - base <= len;
+}
+
+/* Is v within the circular u32 range [base, base + len]? */
+static __always_inline bool u32_range_contains(u32 v, u32 base, u32 len)
+{
+ return v - base <= len;
+}
+
+static bool range64_range32_intersect(enum num_t a_t,
+ struct range a /* 64 */,
+ struct range b /* 32 */,
+ struct range *out /* 64 */)
+{
+ u64 b_len = (u32)(b.b - b.a);
+ u64 a_len = a.b - a.a;
+ u64 lo, hi;
+
+ if (u32_range_contains((u32)a.a, (u32)b.a, b_len)) {
+ lo = a.a;
+ } else {
+ lo = swap_low32(a.a, (u32)b.a);
+ if (!u64_range_contains(lo, a.a, a_len))
+ lo = next_u32_block(lo);
+ if (!u64_range_contains(lo, a.a, a_len))
+ return false;
+ }
+ if (u32_range_contains(a.b, (u32)b.a, b_len)) {
+ hi = a.b;
+ } else {
+ hi = swap_low32(a.b, (u32)b.b);
+ if (!u64_range_contains(hi, a.a, a_len))
+ hi = prev_u32_block(hi);
+ if (!u64_range_contains(hi, a.a, a_len))
+ return false;
+ }
+ *out = range(a_t, lo, hi);
+ return true;
+}
+
static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
{
struct range y_cast;
@@ -533,23 +579,12 @@ static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t,
}
}
- /* the case when new range knowledge, *y*, is a 32-bit subregister
- * range, while previous range knowledge, *x*, is a full register
- * 64-bit range, needs special treatment to take into account upper 32
- * bits of full register range
- */
if (t_is_32(y_t) && !t_is_32(x_t)) {
- struct range x_swap;
+ struct range x1;
- /* some combinations of upper 32 bits and sign bit can lead to
- * invalid ranges, in such cases it's easier to detect them
- * after cast/swap than try to enumerate all the conditions
- * under which transformation and knowledge transfer is valid
- */
- x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
- if (!is_valid_range(x_t, x_swap))
- return x;
- return range_intersection(x_t, x, x_swap);
+ if (range64_range32_intersect(x_t, x, y, &x1))
+ return x1;
+ return x;
}
/* otherwise, plain range cast and intersection works */
@@ -1300,6 +1335,26 @@ static bool assert_range_eq(enum num_t t, struct range x, struct range y,
return false;
}
+/* For a pair of signed/unsigned t1/t2 checks if r1/r2 intersect in two intervals. */
+static bool needs_two_arcs(enum num_t t1, struct range r1,
+ enum num_t t2, struct range r2)
+{
+ u64 lo = cast_t(t1, r2.a);
+ u64 hi = cast_t(t1, r2.b);
+
+ /* does r2 wrap in t1's domain: [0, hi] ∪ [lo, MAX]? */
+ return lo > hi && r1.a <= hi && r1.b >= lo;
+}
+
+static bool reg_state_needs_two_arcs(struct reg_state *s)
+{
+ if (!s->valid)
+ return false;
+
+ return needs_two_arcs(U64, s->r[U64], S64, s->r[S64]) ||
+ needs_two_arcs(U32, s->r[U32], S32, s->r[S32]);
+}
+
/* Validate that register states match, and print details if they don't */
static bool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, const char *ctx)
{
@@ -1524,6 +1579,11 @@ static int verify_case_op(enum num_t init_t, enum num_t cond_t,
!assert_reg_state_eq(&fr2, &fe2, "false_reg2") ||
!assert_reg_state_eq(&tr1, &te1, "true_reg1") ||
!assert_reg_state_eq(&tr2, &te2, "true_reg2")) {
+ if (reg_state_needs_two_arcs(&fe1) || reg_state_needs_two_arcs(&fe2) ||
+ reg_state_needs_two_arcs(&te1) || reg_state_needs_two_arcs(&te2)) {
+ test__skip();
+ return 0;
+ }
failed = true;
}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c
index c1ae013dee29..f0b3fbbbb627 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bounds.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c
@@ -1239,7 +1239,8 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("multiply mixed sign bounds. test 1")
__success __log_level(2)
-__msg("r6 *= r7 {{.*}}; R6=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
+__msg("r6 *= r7 {{.*}}; R6=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))")
+/* cnum can't represent both [0, 0xffff_feff] and [0x8000_0000, 0x7fff_feff], so it picks one */
__naked void mult_mixed0_sign(void)
{
asm volatile (
@@ -1648,7 +1649,8 @@ l0_%=: r0 = 0; \
SEC("socket")
__description("bounds deduction cross sign boundary, two overlaps")
__failure
-__msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)")
+__msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=-128,smax=smax32=127)")
+/* smin=-128 includes point 0xffffffffffffff80 */
__msg("frame pointer is read only")
__naked void bounds_deduct_two_overlaps(void)
{
@@ -2043,7 +2045,8 @@ __naked void signed_unsigned_intersection32_case2(void *ctx)
*/
SEC("socket")
__description("bounds refinement: 64bits ranges not overwritten by 32bits ranges")
-__msg("3: (65) if r0 s> 0x2 {{.*}} R0=scalar(smin=0x8000000000000002,smax=2,umin=smin32=umin32=2,umax=0xffffffff00000003,smax32=umax32=3")
+__msg("3: (65) if r0 s> 0x2 {{.*}} R0=scalar(smin=0x8000000000000002,smax=2,smin32=umin32=2,smax32=umax32=3,var_off{{.*}}))")
+/* Can't represent both [S64_MIN+2, 2] and [2, U64_MAX - U32_MAX + 2] at the same time, picks shorter interval */
__msg("4: (25) if r0 > 0x13 {{.*}} R0=2")
__success __log_level(2)
__naked void refinement_32bounds_not_overwriting_64bounds(void *ctx)
diff --git a/tools/testing/selftests/bpf/progs/verifier_subreg.c b/tools/testing/selftests/bpf/progs/verifier_subreg.c
index 31832a306f91..73b5b0cf6706 100644
--- a/tools/testing/selftests/bpf/progs/verifier_subreg.c
+++ b/tools/testing/selftests/bpf/progs/verifier_subreg.c
@@ -558,7 +558,8 @@ __description("arsh32 imm sign negative extend check")
__success __retval(0)
__log_level(2)
__msg("3: (17) r6 -= 4095 ; R6=scalar(smin=smin32=-4095,smax=smax32=0)")
-__msg("4: (67) r6 <<= 32 ; R6=scalar(smin=0xfffff00100000000,smax=smax32=umax32=0,umax=0xffffffff00000000,smin32=0,var_off=(0x0; 0xffffffff00000000))")
+__msg("4: (67) r6 <<= 32 ; R6=scalar(smin=0xfffff00100000000,smax=smax32=umax32=0,smin32=0,var_off=(0x0; 0xffffffff00000000))")
+/* represents shorter of signed / unsigned 64-bit ranges */
__msg("5: (c7) r6 s>>= 32 ; R6=scalar(smin=smin32=-4095,smax=smax32=0)")
__naked void arsh32_imm_sign_extend_negative_check(void)
{
@@ -581,7 +582,8 @@ __description("arsh32 imm sign extend check")
__success __retval(0)
__log_level(2)
__msg("3: (17) r6 -= 2047 ; R6=scalar(smin=smin32=-2047,smax=smax32=2048)")
-__msg("4: (67) r6 <<= 32 ; R6=scalar(smin=0xfffff80100000000,smax=0x80000000000,umax=0xffffffff00000000,smin32=0,smax32=umax32=0,var_off=(0x0; 0xffffffff00000000))")
+__msg("4: (67) r6 <<= 32 ; R6=scalar(smin=0xfffff80100000000,smax=0x80000000000,smin32=0,smax32=umax32=0,var_off=(0x0; 0xffffffff00000000))")
+/* represents shorter of signed / unsigned 64-bit ranges */
__msg("5: (c7) r6 s>>= 32 ; R6=scalar(smin=smin32=-2047,smax=smax32=2048)")
__naked void arsh32_imm_sign_extend_check(void)
{
--
2.53.0
^ permalink raw reply related [flat|nested] 24+ messages in thread