public inbox for bpf@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH bpf-next] bpf/tests: Exhaustive test coverage for signed division and modulo
@ 2026-03-26 20:50 Jie Meng
  2026-04-02 22:16 ` Alexei Starovoitov
  0 siblings, 1 reply; 6+ messages in thread
From: Jie Meng @ 2026-03-26 20:50 UTC (permalink / raw)
  To: bpf, ast, andrii, daniel; +Cc: Jie Meng

Extend lib/test_bpf.c to provide comprehensive test coverage for BPF
signed division (SDIV) and signed modulo (SMOD) instructions, both
32-bit and 64-bit variants with immediate operands.

Changes:
- __bpf_alu_result(): add off parameter to select signed vs unsigned
  div/mod semantics. When off != 0, uses div64_s64() / s64 modulo with
  truncation toward zero, matching BPF signed division spec. Skips
  divisor == -1 cases (handled by verifier).
- __bpf_emit_alu64_imm(), __bpf_emit_alu32_imm(): propagate off to
  use BPF_ALU{32,64}_IMM_OFF() instruction encoding and signed result
  computation. For ALU32 signed, cast operands to s32 before computing
  reference results.
- __bpf_fill_alu_imm_regs(): add off parameter, use negative test
  values for signed cases (-76543210 / -7654321076543210LL) to exercise
  sign-related edge cases.
- New test fill functions: bpf_fill_alu{32,64}_{sdiv,smod}_imm() and
  bpf_fill_alu{32,64}_{sdiv,smod}_imm_regs(), each testing all
  immediate value magnitudes and all register pair combinations.
- All existing unsigned tests updated to pass off=0, preserving
  backward compatibility.

8 new test cases added:
  ALU64_SDIV_K, ALU64_SMOD_K (immediate magnitudes + register combos)
  ALU32_SDIV_K, ALU32_SMOD_K (immediate magnitudes + register combos)

Assisted-by: Claude:claude-opus-4-6
Signed-off-by: Jie Meng <jmeng@fb.com>
---
 lib/test_bpf.c | 299 +++++++++++++++++++++++++++++++++++++------------
 1 file changed, 226 insertions(+), 73 deletions(-)

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5892c0f17ddc..2f1b252f2a50 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -560,7 +560,7 @@ static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
 }
 
 /* ALU result computation used in tests */
-static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op, s16 off)
 {
 	*res = 0;
 	switch (op) {
@@ -599,12 +599,28 @@ static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
 	case BPF_DIV:
 		if (v2 == 0)
 			return false;
-		*res = div64_u64(v1, v2);
+		if (off == 0) {
+			*res = div64_u64(v1, v2);
+		} else {
+			if ((s64)v2 == -1) /* Handled by verifier */
+				return false;
+			*res = (u64)div64_s64(v1, v2);
+		}
 		break;
 	case BPF_MOD:
 		if (v2 == 0)
 			return false;
-		div64_u64_rem(v1, v2, res);
+		if (off == 0) {
+			div64_u64_rem(v1, v2, res);
+		} else {
+			if ((s64)v2 == -1)
+				return false;
+			/*
+			 * Avoid s64 % s64 which generates __moddi3 on
+			 * 32-bit architectures. Use div64_s64 instead.
+			 */
+			*res = (u64)((s64)v1 - div64_s64(v1, v2) * (s64)v2);
+		}
 		break;
 	}
 	return true;
@@ -653,14 +669,14 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 					reg = (s32)reg;
 				else
 					reg = (u32)reg;
-				__bpf_alu_result(&val, reg, imm, op);
+				__bpf_alu_result(&val, reg, imm, op, 0);
 				val = (u32)val;
 			} else {
 				if (mode == BPF_K)
 					insn[i++] = BPF_ALU64_IMM(op, R1, imm);
 				else
 					insn[i++] = BPF_ALU64_REG(op, R1, R2);
-				__bpf_alu_result(&val, reg, imm, op);
+				__bpf_alu_result(&val, reg, imm, op, 0);
 			}
 
 			/*
@@ -776,7 +792,7 @@ static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
 			insn[i++] = BPF_ALU64_REG(op, R1, R1);
 
 		/* Compute the reference result */
-		__bpf_alu_result(&res, val, val, op);
+		__bpf_alu_result(&res, val, val, op, 0);
 		if (alu32)
 			res = (u32)res;
 		i += __bpf_ld_imm64(&insn[i], R2, res);
@@ -936,17 +952,19 @@ static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
 static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
 				struct bpf_insn *insns, s64 dst, s64 imm)
 {
-	int op = *(int *)arg;
+	int *a = arg;
+	int op = a[0];
+	s16 off = a[1];
 	int i = 0;
 	u64 res;
 
 	if (!insns)
 		return 7;
 
-	if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+	if (__bpf_alu_result(&res, dst, (s32)imm, op, off)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R3, res);
-		insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+		insns[i++] = BPF_ALU64_IMM_OFF(op, R1, imm, off);
 		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
 		insns[i++] = BPF_EXIT_INSN();
 	}
@@ -957,17 +975,28 @@ static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
 static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
 				struct bpf_insn *insns, s64 dst, s64 imm)
 {
-	int op = *(int *)arg;
+	int *a = arg;
+	int op = a[0];
+	s16 off = a[1];
 	int i = 0;
 	u64 res;
+	u64 v1, v2;
 
 	if (!insns)
 		return 7;
 
-	if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+	if (off) {
+		v1 = (s32)dst;
+		v2 = (s32)imm;
+	} else {
+		v1 = (u32)dst;
+		v2 = (u32)imm;
+	}
+
+	if (__bpf_alu_result(&res, v1, v2, op, off)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
-		insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+		insns[i++] = BPF_ALU32_IMM_OFF(op, R1, imm, off);
 		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
 		insns[i++] = BPF_EXIT_INSN();
 	}
@@ -985,7 +1014,7 @@ static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
 	if (!insns)
 		return 9;
 
-	if (__bpf_alu_result(&res, dst, src, op)) {
+	if (__bpf_alu_result(&res, dst, src, op, 0)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R2, src);
 		i += __bpf_ld_imm64(&insns[i], R3, res);
@@ -1007,7 +1036,7 @@ static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
 	if (!insns)
 		return 9;
 
-	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op, 0)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R2, src);
 		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
@@ -1019,16 +1048,20 @@ static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
 	return i;
 }
 
-static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op, s16 off)
 {
-	return __bpf_fill_pattern(self, &op, 64, 32,
+	int arg[2] = {op, off};
+
+	return __bpf_fill_pattern(self, &arg, 64, 32,
 				  PATTERN_BLOCK1, PATTERN_BLOCK2,
 				  &__bpf_emit_alu64_imm);
 }
 
-static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op, s16 off)
 {
-	return __bpf_fill_pattern(self, &op, 64, 32,
+	int arg[2] = {op, off};
+
+	return __bpf_fill_pattern(self, &arg, 64, 32,
 				  PATTERN_BLOCK1, PATTERN_BLOCK2,
 				  &__bpf_emit_alu32_imm);
 }
@@ -1050,93 +1083,115 @@ static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
 /* ALU64 immediate operations */
 static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MOV);
+	return __bpf_fill_alu64_imm(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu64_and_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_AND);
+	return __bpf_fill_alu64_imm(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu64_or_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_OR);
+	return __bpf_fill_alu64_imm(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_XOR);
+	return __bpf_fill_alu64_imm(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu64_add_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_ADD);
+	return __bpf_fill_alu64_imm(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_SUB);
+	return __bpf_fill_alu64_imm(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MUL);
+	return __bpf_fill_alu64_imm(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu64_div_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_DIV);
+	return __bpf_fill_alu64_imm(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MOD);
+	return __bpf_fill_alu64_imm(self, BPF_MOD, 0);
+}
+
+/* Signed ALU64 immediate operations */
+static int bpf_fill_alu64_sdiv_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_DIV, 1);
+}
+
+static int bpf_fill_alu64_smod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_MOD, 1);
+}
+
+/* Signed ALU32 immediate operations */
+static int bpf_fill_alu32_sdiv_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_DIV, 1);
+}
+
+static int bpf_fill_alu32_smod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_MOD, 1);
 }
 
 /* ALU32 immediate operations */
 static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MOV);
+	return __bpf_fill_alu32_imm(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu32_and_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_AND);
+	return __bpf_fill_alu32_imm(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu32_or_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_OR);
+	return __bpf_fill_alu32_imm(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_XOR);
+	return __bpf_fill_alu32_imm(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu32_add_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_ADD);
+	return __bpf_fill_alu32_imm(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_SUB);
+	return __bpf_fill_alu32_imm(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MUL);
+	return __bpf_fill_alu32_imm(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu32_div_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_DIV);
+	return __bpf_fill_alu32_imm(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MOD);
+	return __bpf_fill_alu32_imm(self, BPF_MOD, 0);
 }
 
 /* ALU64 register operations */
@@ -1235,7 +1290,8 @@ static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
  * Test JITs that implement complex ALU operations as function
  * calls, and must re-arrange operands for argument passing.
  */
-static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
+static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op,
+				    bool alu32, s16 off)
 {
 	int len = 2 + 10 * 10;
 	struct bpf_insn *insns;
@@ -1249,16 +1305,23 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
 		return -ENOMEM;
 
 	/* Operand and result values according to operation */
-	if (alu32)
-		dst = 0x76543210U;
-	else
-		dst = 0x7edcba9876543210ULL;
+	if (off) {
+		if (alu32)
+			dst = -76543210;
+		else
+			dst = -7654321076543210LL;
+	} else {
+		if (alu32)
+			dst = 0x76543210U;
+		else
+			dst = 0x7edcba9876543210ULL;
+	}
 	imm = 0x01234567U;
 
 	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
 		imm &= 31;
 
-	__bpf_alu_result(&res, dst, imm, op);
+	__bpf_alu_result(&res, dst, imm, op, off);
 
 	if (alu32)
 		res = (u32)res;
@@ -1268,9 +1331,9 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
 		i += __bpf_ld_imm64(&insns[i], rd, dst);
 
 		if (alu32)
-			insns[i++] = BPF_ALU32_IMM(op, rd, imm);
+			insns[i++] = BPF_ALU32_IMM_OFF(op, rd, imm, off);
 		else
-			insns[i++] = BPF_ALU64_IMM(op, rd, imm);
+			insns[i++] = BPF_ALU64_IMM_OFF(op, rd, imm, off);
 
 		insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
 		insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
@@ -1295,123 +1358,145 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
 /* ALU64 K registers */
 static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, false, 0);
 }
 
 static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, false, 0);
 }
 
 static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, false, 0);
 }
 
 static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, false, 0);
 }
 
 static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, false, 0);
 }
 
 static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, false, 0);
 }
 
 static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false, 0);
 }
 
 static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, false, 0);
 }
 
 static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, false, 0);
 }
 
 static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, false, 0);
 }
 
 static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, false, 0);
 }
 
 static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, false, 0);
+}
+
+/* Signed ALU64 K registers */
+static int bpf_fill_alu64_sdiv_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, false, 1);
+}
+
+static int bpf_fill_alu64_smod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, false, 1);
 }
 
 /* ALU32 K registers */
 static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, true, 0);
 }
 
 static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, true, 0);
 }
 
 static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, true, 0);
 }
 
 static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, true, 0);
 }
 
 static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, true, 0);
 }
 
 static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, true, 0);
 }
 
 static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true, 0);
 }
 
 static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, true, 0);
 }
 
 static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, true, 0);
 }
 
 static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, true, 0);
 }
 
 static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, true, 0);
 }
 
 static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, true, 0);
+}
+
+/* Signed ALU32 K registers */
+static int bpf_fill_alu32_sdiv_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, true, 1);
+}
+
+static int bpf_fill_alu32_smod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, true, 1);
 }
 
 /*
@@ -1442,8 +1527,8 @@ static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
 	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
 		src &= 31;
 
-	__bpf_alu_result(&res, dst, src, op);
-	__bpf_alu_result(&same, src, src, op);
+	__bpf_alu_result(&res, dst, src, op, 0);
+	__bpf_alu_result(&same, src, src, op, 0);
 
 	if (alu32) {
 		res = (u32)res;
@@ -1626,7 +1711,7 @@ static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
 		res = src;
 		break;
 	default:
-		__bpf_alu_result(&res, dst, src, BPF_OP(op));
+		__bpf_alu_result(&res, dst, src, BPF_OP(op), 0);
 	}
 
 	keep = 0x0123456789abcdefULL;
@@ -1673,7 +1758,7 @@ static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
 		res = src;
 		break;
 	default:
-		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op), 0);
 	}
 
 	keep = 0x0123456789abcdefULL;
@@ -1939,7 +2024,7 @@ static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
 		res = mem;
 		break;
 	default:
-		__bpf_alu_result(&res, mem, upd, BPF_OP(op));
+		__bpf_alu_result(&res, mem, upd, BPF_OP(op), 0);
 	}
 
 	/* Test all operand registers */
@@ -12354,6 +12439,22 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 		.fill_helper = bpf_fill_alu64_mod_imm_regs,
 	},
+	{
+		"ALU64_SDIV_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sdiv_imm_regs,
+	},
+	{
+		"ALU64_SMOD_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_smod_imm_regs,
+	},
 	/* ALU32 K registers */
 	{
 		"ALU32_MOV_K: registers",
@@ -12451,6 +12552,22 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 		.fill_helper = bpf_fill_alu32_mod_imm_regs,
 	},
+	{
+		"ALU32_SDIV_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sdiv_imm_regs,
+	},
+	{
+		"ALU32_SMOD_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_smod_imm_regs,
+	},
 	/* ALU64 X register combinations */
 	{
 		"ALU64_MOV_X: register combinations",
@@ -12881,6 +12998,24 @@ static struct bpf_test tests[] = {
 		.fill_helper = bpf_fill_alu64_mod_imm,
 		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	{
+		"ALU64_SDIV_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sdiv_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ALU64_SMOD_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_smod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
 	/* ALU32 immediate magnitudes */
 	{
 		"ALU32_MOV_K: all immediate value magnitudes",
@@ -12963,6 +13098,24 @@ static struct bpf_test tests[] = {
 		.fill_helper = bpf_fill_alu32_mod_imm,
 		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	{
+		"ALU32_SDIV_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sdiv_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ALU32_SMOD_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_smod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
 	/* ALU64 register magnitudes */
 	{
 		"ALU64_MOV_X: all register value magnitudes",
-- 
2.52.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next] bpf/tests: Exhaustive test coverage for signed division and modulo
  2026-03-26 20:50 [PATCH bpf-next] bpf/tests: Exhaustive test coverage for signed division and modulo Jie Meng
@ 2026-04-02 22:16 ` Alexei Starovoitov
  2026-04-06 20:39   ` [PATCH v2] " Jie Meng
  0 siblings, 1 reply; 6+ messages in thread
From: Alexei Starovoitov @ 2026-04-02 22:16 UTC (permalink / raw)
  To: Jie Meng; +Cc: bpf, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann

On Thu, Mar 26, 2026 at 1:51 PM Jie Meng <jmeng@fb.com> wrote:
>
> Extend lib/test_bpf.c to provide comprehensive test coverage for BPF
> signed division (SDIV) and signed modulo (SMOD) instructions, both
> 32-bit and 64-bit variants with immediate operands.
>
> Changes:
> - __bpf_alu_result(): add off parameter to select signed vs unsigned
>   div/mod semantics. When off != 0, uses div64_s64() / s64 modulo with
>   truncation toward zero, matching BPF signed division spec. Skips
>   divisor == -1 cases (handled by verifier).

isa is using 'off' in encoding, but that's not a reason to
name variables in such a cryptic way.

Add another bool for signed vs unsigned, or better yet
convert bool alu32 into 'u32 flags' and have meaningful names
enum {
  F_ALU32,
  F_SIGNED
};

then all of the callsites will be more readable too.
true, 0
true, 1
false, 0
false, 1
are hard to read.

pw-bot: cr

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2] bpf/tests: Exhaustive test coverage for signed division and modulo
  2026-04-02 22:16 ` Alexei Starovoitov
@ 2026-04-06 20:39   ` Jie Meng
  2026-04-11 17:40     ` Alexei Starovoitov
  0 siblings, 1 reply; 6+ messages in thread
From: Jie Meng @ 2026-04-06 20:39 UTC (permalink / raw)
  To: alexei.starovoitov; +Cc: andrii, ast, bpf, daniel, Jie Meng

Extend lib/test_bpf.c to provide comprehensive test coverage for BPF
signed division (SDIV) and signed modulo (SMOD) instructions, both
32-bit and 64-bit variants with immediate operands.

Introduce F_ALU32 and F_SIGNED flags to replace the less readable
bool alu32 and s16 off parameters throughout the test helpers. The
BPF instruction 'off' field is derived from flags only at the point
of instruction encoding.

Changes:
- Add enum { F_ALU32 = 1, F_SIGNED = 2 } for readable test flags.
- __bpf_alu_result(): add is_signed parameter to select signed vs
  unsigned div/mod semantics. When is_signed, uses div64_s64() / s64
  modulo with truncation toward zero, matching BPF signed division
  spec. Skips divisor == -1 cases (handled by verifier).
- __bpf_emit_alu64_imm(), __bpf_emit_alu32_imm(): extract flags from
  arg array, derive is_signed and off for instruction encoding.
  For ALU32 signed, cast operands to s32 before computing reference
  results.
- __bpf_fill_alu_imm_regs(): take u32 flags instead of bool alu32
  and s16 off. Use F_ALU32/F_SIGNED for operand setup and result
  computation. Properly cast operands to (s32) for ALU32 signed
  cases to match 32-bit signed division semantics.
- __bpf_fill_alu_shift(), __bpf_fill_alu_shift_same_reg(): convert
  bool alu32 parameter to u32 flags for consistency.
- New test fill functions: bpf_fill_alu{32,64}_{sdiv,smod}_imm() and
  bpf_fill_alu{32,64}_{sdiv,smod}_imm_regs(), each testing all
  immediate value magnitudes and all register pair combinations.
- All existing unsigned tests updated to use flags (0 or F_ALU32),
  preserving backward compatibility.

8 new test cases added:
  ALU64_SDIV_K, ALU64_SMOD_K (immediate magnitudes + register combos)
  ALU32_SDIV_K, ALU32_SMOD_K (immediate magnitudes + register combos)

Test results:
  test_bpf: Summary: 1061 PASSED, 0 FAILED, [1049/1049 JIT'ed]
  test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
  test_bpf: test_skb_segment: Summary: 2 PASSED, 0 FAILED

Assisted-by: Claude:claude-opus-4-6
Signed-off-by: Jie Meng <jmeng@fb.com>
---
v1 -> v2: addressed Alexei's comments about readability

 lib/test_bpf.c | 366 +++++++++++++++++++++++++++++++++++--------------
 1 file changed, 266 insertions(+), 100 deletions(-)

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5892c0f17ddc..8f29bbbf810e 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -560,7 +560,9 @@ static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
 }
 
 /* ALU result computation used in tests */
-static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+enum { F_ALU32 = 1, F_SIGNED = 2 };
+
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op, bool is_signed)
 {
 	*res = 0;
 	switch (op) {
@@ -599,12 +601,28 @@ static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
 	case BPF_DIV:
 		if (v2 == 0)
 			return false;
-		*res = div64_u64(v1, v2);
+		if (!is_signed) {
+			*res = div64_u64(v1, v2);
+		} else {
+			if ((s64)v2 == -1) /* Handled by verifier */
+				return false;
+			*res = (u64)div64_s64(v1, v2);
+		}
 		break;
 	case BPF_MOD:
 		if (v2 == 0)
 			return false;
-		div64_u64_rem(v1, v2, res);
+		if (!is_signed) {
+			div64_u64_rem(v1, v2, res);
+		} else {
+			if ((s64)v2 == -1)
+				return false;
+			/*
+			 * Avoid s64 % s64 which generates __moddi3 on
+			 * 32-bit architectures. Use div64_s64 instead.
+			 */
+			*res = (u64)((s64)v1 - div64_s64(v1, v2) * (s64)v2);
+		}
 		break;
 	}
 	return true;
@@ -612,7 +630,7 @@ static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
 
 /* Test an ALU shift operation for all valid shift values */
 static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
-				u8 mode, bool alu32)
+				u8 mode, u32 flags)
 {
 	static const s64 regs[] = {
 		0x0123456789abcdefLL, /* dword > 0, word < 0 */
@@ -620,7 +638,7 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 		0xfedcba0198765432LL, /* dword < 0, word < 0 */
 		0x0123458967abcdefLL, /* dword > 0, word > 0 */
 	};
-	int bits = alu32 ? 32 : 64;
+	int bits = (flags & F_ALU32) ? 32 : 64;
 	int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
 	struct bpf_insn *insn;
 	int imm, k;
@@ -643,7 +661,7 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 			/* Perform operation */
 			insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
 			insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
-			if (alu32) {
+			if (flags & F_ALU32) {
 				if (mode == BPF_K)
 					insn[i++] = BPF_ALU32_IMM(op, R1, imm);
 				else
@@ -653,14 +671,14 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 					reg = (s32)reg;
 				else
 					reg = (u32)reg;
-				__bpf_alu_result(&val, reg, imm, op);
+				__bpf_alu_result(&val, reg, imm, op, false);
 				val = (u32)val;
 			} else {
 				if (mode == BPF_K)
 					insn[i++] = BPF_ALU64_IMM(op, R1, imm);
 				else
 					insn[i++] = BPF_ALU64_REG(op, R1, R2);
-				__bpf_alu_result(&val, reg, imm, op);
+				__bpf_alu_result(&val, reg, imm, op, false);
 			}
 
 			/*
@@ -688,62 +706,62 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 
 static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, 0);
 }
 
 static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, 0);
 }
 
 static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, 0);
 }
 
 static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, 0);
 }
 
 static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, 0);
 }
 
 static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, 0);
 }
 
 static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, F_ALU32);
 }
 
 static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, F_ALU32);
 }
 
 /*
@@ -751,9 +769,9 @@ static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
  * for the case when the source and destination are the same.
  */
 static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
-					 bool alu32)
+					 u32 flags)
 {
-	int bits = alu32 ? 32 : 64;
+	int bits = (flags & F_ALU32) ? 32 : 64;
 	int len = 3 + 6 * bits;
 	struct bpf_insn *insn;
 	int i = 0;
@@ -770,14 +788,14 @@ static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
 
 		/* Perform operation */
 		insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
-		if (alu32)
+		if (flags & F_ALU32)
 			insn[i++] = BPF_ALU32_REG(op, R1, R1);
 		else
 			insn[i++] = BPF_ALU64_REG(op, R1, R1);
 
 		/* Compute the reference result */
-		__bpf_alu_result(&res, val, val, op);
-		if (alu32)
+		__bpf_alu_result(&res, val, val, op, false);
+		if (flags & F_ALU32)
 			res = (u32)res;
 		i += __bpf_ld_imm64(&insn[i], R2, res);
 
@@ -798,32 +816,32 @@ static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
 
 static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, 0);
 }
 
 static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, 0);
 }
 
 static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, 0);
 }
 
 static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, F_ALU32);
 }
 
 /*
@@ -936,17 +954,21 @@ static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
 static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
 				struct bpf_insn *insns, s64 dst, s64 imm)
 {
-	int op = *(int *)arg;
+	int *a = arg;
+	int op = a[0];
+	u32 flags = a[1];
+	bool is_signed = flags & F_SIGNED;
+	s16 off = is_signed ? 1 : 0;
 	int i = 0;
 	u64 res;
 
 	if (!insns)
 		return 7;
 
-	if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+	if (__bpf_alu_result(&res, dst, (s32)imm, op, is_signed)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R3, res);
-		insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+		insns[i++] = BPF_ALU64_IMM_OFF(op, R1, imm, off);
 		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
 		insns[i++] = BPF_EXIT_INSN();
 	}
@@ -957,17 +979,30 @@ static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
 static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
 				struct bpf_insn *insns, s64 dst, s64 imm)
 {
-	int op = *(int *)arg;
+	int *a = arg;
+	int op = a[0];
+	u32 flags = a[1];
+	bool is_signed = flags & F_SIGNED;
+	s16 off = is_signed ? 1 : 0;
 	int i = 0;
 	u64 res;
+	u64 v1, v2;
 
 	if (!insns)
 		return 7;
 
-	if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+	if (is_signed) {
+		v1 = (s32)dst;
+		v2 = (s32)imm;
+	} else {
+		v1 = (u32)dst;
+		v2 = (u32)imm;
+	}
+
+	if (__bpf_alu_result(&res, v1, v2, op, is_signed)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
-		insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+		insns[i++] = BPF_ALU32_IMM_OFF(op, R1, imm, off);
 		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
 		insns[i++] = BPF_EXIT_INSN();
 	}
@@ -985,7 +1020,7 @@ static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
 	if (!insns)
 		return 9;
 
-	if (__bpf_alu_result(&res, dst, src, op)) {
+	if (__bpf_alu_result(&res, dst, src, op, false)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R2, src);
 		i += __bpf_ld_imm64(&insns[i], R3, res);
@@ -1007,7 +1042,7 @@ static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
 	if (!insns)
 		return 9;
 
-	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op, false)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R2, src);
 		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
@@ -1019,16 +1054,20 @@ static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
 	return i;
 }
 
-static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op, u32 flags)
 {
-	return __bpf_fill_pattern(self, &op, 64, 32,
+	int arg[2] = {op, flags};
+
+	return __bpf_fill_pattern(self, &arg, 64, 32,
 				  PATTERN_BLOCK1, PATTERN_BLOCK2,
 				  &__bpf_emit_alu64_imm);
 }
 
-static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op, u32 flags)
 {
-	return __bpf_fill_pattern(self, &op, 64, 32,
+	int arg[2] = {op, flags};
+
+	return __bpf_fill_pattern(self, &arg, 64, 32,
 				  PATTERN_BLOCK1, PATTERN_BLOCK2,
 				  &__bpf_emit_alu32_imm);
 }
@@ -1050,93 +1089,115 @@ static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
 /* ALU64 immediate operations */
 static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MOV);
+	return __bpf_fill_alu64_imm(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu64_and_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_AND);
+	return __bpf_fill_alu64_imm(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu64_or_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_OR);
+	return __bpf_fill_alu64_imm(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_XOR);
+	return __bpf_fill_alu64_imm(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu64_add_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_ADD);
+	return __bpf_fill_alu64_imm(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_SUB);
+	return __bpf_fill_alu64_imm(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MUL);
+	return __bpf_fill_alu64_imm(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu64_div_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_DIV);
+	return __bpf_fill_alu64_imm(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MOD);
+	return __bpf_fill_alu64_imm(self, BPF_MOD, 0);
+}
+
+/* Signed ALU64 immediate operations */
+static int bpf_fill_alu64_sdiv_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_DIV, F_SIGNED);
+}
+
+static int bpf_fill_alu64_smod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_MOD, F_SIGNED);
+}
+
+/* Signed ALU32 immediate operations */
+static int bpf_fill_alu32_sdiv_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_DIV, F_SIGNED);
+}
+
+static int bpf_fill_alu32_smod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_MOD, F_SIGNED);
 }
 
 /* ALU32 immediate operations */
 static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MOV);
+	return __bpf_fill_alu32_imm(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu32_and_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_AND);
+	return __bpf_fill_alu32_imm(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu32_or_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_OR);
+	return __bpf_fill_alu32_imm(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_XOR);
+	return __bpf_fill_alu32_imm(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu32_add_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_ADD);
+	return __bpf_fill_alu32_imm(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_SUB);
+	return __bpf_fill_alu32_imm(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MUL);
+	return __bpf_fill_alu32_imm(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu32_div_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_DIV);
+	return __bpf_fill_alu32_imm(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MOD);
+	return __bpf_fill_alu32_imm(self, BPF_MOD, 0);
 }
 
 /* ALU64 register operations */
@@ -1235,7 +1296,8 @@ static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
  * Test JITs that implement complex ALU operations as function
  * calls, and must re-arrange operands for argument passing.
  */
-static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
+static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op,
+				    u32 flags)
 {
 	int len = 2 + 10 * 10;
 	struct bpf_insn *insns;
@@ -1249,28 +1311,42 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
 		return -ENOMEM;
 
 	/* Operand and result values according to operation */
-	if (alu32)
-		dst = 0x76543210U;
-	else
-		dst = 0x7edcba9876543210ULL;
+	if (flags & F_SIGNED) {
+		if (flags & F_ALU32)
+			dst = -76543210;
+		else
+			dst = -7654321076543210LL;
+	} else {
+		if (flags & F_ALU32)
+			dst = 0x76543210U;
+		else
+			dst = 0x7edcba9876543210ULL;
+	}
 	imm = 0x01234567U;
 
 	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
 		imm &= 31;
 
-	__bpf_alu_result(&res, dst, imm, op);
+	if ((flags & F_ALU32) && (flags & F_SIGNED))
+		__bpf_alu_result(&res, (u64)(s32)dst, (u64)(s32)imm, op, true);
+	else if (flags & F_ALU32)
+		__bpf_alu_result(&res, (u32)dst, (u32)imm, op, false);
+	else
+		__bpf_alu_result(&res, dst, imm, op, flags & F_SIGNED);
 
-	if (alu32)
+	if (flags & F_ALU32)
 		res = (u32)res;
 
 	/* Check all operand registers */
 	for (rd = R0; rd <= R9; rd++) {
 		i += __bpf_ld_imm64(&insns[i], rd, dst);
 
-		if (alu32)
-			insns[i++] = BPF_ALU32_IMM(op, rd, imm);
+		s16 off = (flags & F_SIGNED) ? 1 : 0;
+
+		if (flags & F_ALU32)
+			insns[i++] = BPF_ALU32_IMM_OFF(op, rd, imm, off);
 		else
-			insns[i++] = BPF_ALU64_IMM(op, rd, imm);
+			insns[i++] = BPF_ALU64_IMM_OFF(op, rd, imm, off);
 
 		insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
 		insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
@@ -1295,123 +1371,145 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
 /* ALU64 K registers */
 static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, 0);
 }
 
 static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, 0);
 }
 
 static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, 0);
 }
 
 static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, 0);
+}
+
+/* Signed ALU64 K registers */
+static int bpf_fill_alu64_sdiv_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, F_SIGNED);
+}
+
+static int bpf_fill_alu64_smod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, F_SIGNED);
 }
 
 /* ALU32 K registers */
 static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, F_ALU32);
 }
 
 static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, F_ALU32);
 }
 
 static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, F_ALU32);
 }
 
 static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, F_ALU32);
 }
 
 static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, F_ALU32);
 }
 
 static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, F_ALU32);
 }
 
 static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, F_ALU32);
 }
 
 static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, F_ALU32);
 }
 
 static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, F_ALU32);
+}
+
+/* Signed ALU32 K registers */
+static int bpf_fill_alu32_sdiv_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, F_ALU32 | F_SIGNED);
+}
+
+static int bpf_fill_alu32_smod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, F_ALU32 | F_SIGNED);
 }
 
 /*
@@ -1442,8 +1540,8 @@ static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
 	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
 		src &= 31;
 
-	__bpf_alu_result(&res, dst, src, op);
-	__bpf_alu_result(&same, src, src, op);
+	__bpf_alu_result(&res, dst, src, op, false);
+	__bpf_alu_result(&same, src, src, op, false);
 
 	if (alu32) {
 		res = (u32)res;
@@ -1626,7 +1724,7 @@ static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
 		res = src;
 		break;
 	default:
-		__bpf_alu_result(&res, dst, src, BPF_OP(op));
+		__bpf_alu_result(&res, dst, src, BPF_OP(op), false);
 	}
 
 	keep = 0x0123456789abcdefULL;
@@ -1673,7 +1771,7 @@ static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
 		res = src;
 		break;
 	default:
-		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op), false);
 	}
 
 	keep = 0x0123456789abcdefULL;
@@ -1939,7 +2037,7 @@ static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
 		res = mem;
 		break;
 	default:
-		__bpf_alu_result(&res, mem, upd, BPF_OP(op));
+		__bpf_alu_result(&res, mem, upd, BPF_OP(op), false);
 	}
 
 	/* Test all operand registers */
@@ -12354,6 +12452,22 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 		.fill_helper = bpf_fill_alu64_mod_imm_regs,
 	},
+	{
+		"ALU64_SDIV_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sdiv_imm_regs,
+	},
+	{
+		"ALU64_SMOD_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_smod_imm_regs,
+	},
 	/* ALU32 K registers */
 	{
 		"ALU32_MOV_K: registers",
@@ -12451,6 +12565,22 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 		.fill_helper = bpf_fill_alu32_mod_imm_regs,
 	},
+	{
+		"ALU32_SDIV_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sdiv_imm_regs,
+	},
+	{
+		"ALU32_SMOD_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_smod_imm_regs,
+	},
 	/* ALU64 X register combinations */
 	{
 		"ALU64_MOV_X: register combinations",
@@ -12881,6 +13011,24 @@ static struct bpf_test tests[] = {
 		.fill_helper = bpf_fill_alu64_mod_imm,
 		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	{
+		"ALU64_SDIV_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sdiv_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ALU64_SMOD_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_smod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
 	/* ALU32 immediate magnitudes */
 	{
 		"ALU32_MOV_K: all immediate value magnitudes",
@@ -12963,6 +13111,24 @@ static struct bpf_test tests[] = {
 		.fill_helper = bpf_fill_alu32_mod_imm,
 		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	{
+		"ALU32_SDIV_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sdiv_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ALU32_SMOD_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_smod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
 	/* ALU64 register magnitudes */
 	{
 		"ALU64_MOV_X: all register value magnitudes",
-- 
2.52.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] bpf/tests: Exhaustive test coverage for signed division and modulo
  2026-04-06 20:39   ` [PATCH v2] " Jie Meng
@ 2026-04-11 17:40     ` Alexei Starovoitov
  2026-04-13 17:23       ` [PATCH bpf-next v3] " Jie Meng
  0 siblings, 1 reply; 6+ messages in thread
From: Alexei Starovoitov @ 2026-04-11 17:40 UTC (permalink / raw)
  To: Jie Meng; +Cc: Andrii Nakryiko, Alexei Starovoitov, bpf, Daniel Borkmann

On Mon, Apr 6, 2026 at 1:40 PM Jie Meng <jmeng@fb.com> wrote:
>
>
>  /* ALU result computation used in tests */
> -static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
> +enum { F_ALU32 = 1, F_SIGNED = 2 };
> +
> +static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op, bool is_signed)

why? Use u32 flags here too and avoid..

>
> -       __bpf_alu_result(&res, dst, imm, op);
> +       if ((flags & F_ALU32) && (flags & F_SIGNED))
> +               __bpf_alu_result(&res, (u64)(s32)dst, (u64)(s32)imm, op, true);
> +       else if (flags & F_ALU32)
> +               __bpf_alu_result(&res, (u32)dst, (u32)imm, op, false);
> +       else
> +               __bpf_alu_result(&res, dst, imm, op, flags & F_SIGNED);

this convolution.
It will be just single line
__bpf_alu_result(&res, dst, imm, op, flags);

pw-bot: cr

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH bpf-next v3] bpf/tests: Exhaustive test coverage for signed division and modulo
  2026-04-11 17:40     ` Alexei Starovoitov
@ 2026-04-13 17:23       ` Jie Meng
  2026-04-21  1:00         ` patchwork-bot+netdevbpf
  0 siblings, 1 reply; 6+ messages in thread
From: Jie Meng @ 2026-04-13 17:23 UTC (permalink / raw)
  To: alexei.starovoitov; +Cc: andrii, ast, bpf, daniel, jmeng

Extend lib/test_bpf.c to provide comprehensive test coverage for BPF
signed division (SDIV) and signed modulo (SMOD) instructions, both
32-bit and 64-bit variants with immediate operands.

Introduce F_ALU32 and F_SIGNED flags to replace the less readable
bool alu32 and s16 off parameters throughout the test helpers. The
BPF instruction 'off' field is derived from flags only at the point
of instruction encoding.

Changes:
- Add enum { F_ALU32 = 1, F_SIGNED = 2 } for readable test flags.
- __bpf_alu_result(): take u32 flags instead of separate signed/alu32
  parameters. Narrows operands internally for ALU32 (unsigned via u32
  cast, signed via s32 cast) before computing the reference result.
- __bpf_emit_alu64_imm(), __bpf_emit_alu32_imm(): pass flags through
  to __bpf_alu_result, derive 'off' for instruction encoding locally.
- __bpf_fill_alu_imm_regs(): take u32 flags, use F_ALU32/F_SIGNED for
  operand setup and single-line __bpf_alu_result() call.
- __bpf_fill_alu_shift(), __bpf_fill_alu_shift_same_reg(): convert
  bool alu32 parameter to u32 flags for consistency.
- New test fill functions: bpf_fill_alu{32,64}_{sdiv,smod}_imm() and
  bpf_fill_alu{32,64}_{sdiv,smod}_imm_regs(), each testing all
  immediate value magnitudes and all register pair combinations.
- All existing unsigned tests updated to use flags (0 or F_ALU32),
  preserving backward compatibility.

8 new test cases added:
  ALU64_SDIV_K, ALU64_SMOD_K (immediate magnitudes + register combos)
  ALU32_SDIV_K, ALU32_SMOD_K (immediate magnitudes + register combos)

Test results:
  test_bpf: Summary: 1061 PASSED, 0 FAILED, [1049/1049 JIT'ed]
  test_bpf: test_tail_calls: Summary: 10 PASSED, 0 FAILED, [10/10 JIT'ed]
  test_bpf: test_skb_segment: Summary: 2 PASSED, 0 FAILED

Assisted-by: Claude:claude-opus-4-6
Signed-off-by: Jie Meng <jmeng@fb.com>
---
v1 -> v2: addressed Alexei's comments about readability
v2 -> v3: use flags for __bpf_alu_result too

 lib/test_bpf.c | 363 +++++++++++++++++++++++++++++++++++--------------
 1 file changed, 263 insertions(+), 100 deletions(-)

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5892c0f17ddc..af6f3340c034 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -560,8 +560,23 @@ static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
 }
 
 /* ALU result computation used in tests */
-static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+enum { F_ALU32 = 1, F_SIGNED = 2 };
+
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op, u32 flags)
 {
+	bool is_signed = flags & F_SIGNED;
+
+	/* Narrow operands for ALU32 */
+	if (flags & F_ALU32) {
+		if (is_signed) {
+			v1 = (u64)(s32)v1;
+			v2 = (u64)(s32)v2;
+		} else {
+			v1 = (u32)v1;
+			v2 = (u32)v2;
+		}
+	}
+
 	*res = 0;
 	switch (op) {
 	case BPF_MOV:
@@ -599,12 +614,28 @@ static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
 	case BPF_DIV:
 		if (v2 == 0)
 			return false;
-		*res = div64_u64(v1, v2);
+		if (!is_signed) {
+			*res = div64_u64(v1, v2);
+		} else {
+			if ((s64)v2 == -1) /* Handled by verifier */
+				return false;
+			*res = (u64)div64_s64(v1, v2);
+		}
 		break;
 	case BPF_MOD:
 		if (v2 == 0)
 			return false;
-		div64_u64_rem(v1, v2, res);
+		if (!is_signed) {
+			div64_u64_rem(v1, v2, res);
+		} else {
+			if ((s64)v2 == -1)
+				return false;
+			/*
+			 * Avoid s64 % s64 which generates __moddi3 on
+			 * 32-bit architectures. Use div64_s64 instead.
+			 */
+			*res = (u64)((s64)v1 - div64_s64(v1, v2) * (s64)v2);
+		}
 		break;
 	}
 	return true;
@@ -612,7 +643,7 @@ static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
 
 /* Test an ALU shift operation for all valid shift values */
 static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
-				u8 mode, bool alu32)
+				u8 mode, u32 flags)
 {
 	static const s64 regs[] = {
 		0x0123456789abcdefLL, /* dword > 0, word < 0 */
@@ -620,7 +651,7 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 		0xfedcba0198765432LL, /* dword < 0, word < 0 */
 		0x0123458967abcdefLL, /* dword > 0, word > 0 */
 	};
-	int bits = alu32 ? 32 : 64;
+	int bits = (flags & F_ALU32) ? 32 : 64;
 	int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
 	struct bpf_insn *insn;
 	int imm, k;
@@ -643,7 +674,7 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 			/* Perform operation */
 			insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
 			insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
-			if (alu32) {
+			if (flags & F_ALU32) {
 				if (mode == BPF_K)
 					insn[i++] = BPF_ALU32_IMM(op, R1, imm);
 				else
@@ -653,14 +684,14 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 					reg = (s32)reg;
 				else
 					reg = (u32)reg;
-				__bpf_alu_result(&val, reg, imm, op);
+				__bpf_alu_result(&val, reg, imm, op, 0);
 				val = (u32)val;
 			} else {
 				if (mode == BPF_K)
 					insn[i++] = BPF_ALU64_IMM(op, R1, imm);
 				else
 					insn[i++] = BPF_ALU64_REG(op, R1, R2);
-				__bpf_alu_result(&val, reg, imm, op);
+				__bpf_alu_result(&val, reg, imm, op, 0);
 			}
 
 			/*
@@ -688,62 +719,62 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 
 static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, 0);
 }
 
 static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, 0);
 }
 
 static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, 0);
 }
 
 static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, 0);
 }
 
 static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, 0);
 }
 
 static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, 0);
 }
 
 static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, F_ALU32);
 }
 
 static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
+	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
+	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
+	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, F_ALU32);
 }
 
 /*
@@ -751,9 +782,9 @@ static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
  * for the case when the source and destination are the same.
  */
 static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
-					 bool alu32)
+					 u32 flags)
 {
-	int bits = alu32 ? 32 : 64;
+	int bits = (flags & F_ALU32) ? 32 : 64;
 	int len = 3 + 6 * bits;
 	struct bpf_insn *insn;
 	int i = 0;
@@ -770,14 +801,14 @@ static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
 
 		/* Perform operation */
 		insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
-		if (alu32)
+		if (flags & F_ALU32)
 			insn[i++] = BPF_ALU32_REG(op, R1, R1);
 		else
 			insn[i++] = BPF_ALU64_REG(op, R1, R1);
 
 		/* Compute the reference result */
-		__bpf_alu_result(&res, val, val, op);
-		if (alu32)
+		__bpf_alu_result(&res, val, val, op, 0);
+		if (flags & F_ALU32)
 			res = (u32)res;
 		i += __bpf_ld_imm64(&insn[i], R2, res);
 
@@ -798,32 +829,32 @@ static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
 
 static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, 0);
 }
 
 static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, 0);
 }
 
 static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, 0);
 }
 
 static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
 {
-	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
+	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, F_ALU32);
 }
 
 /*
@@ -936,17 +967,20 @@ static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
 static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
 				struct bpf_insn *insns, s64 dst, s64 imm)
 {
-	int op = *(int *)arg;
+	int *a = arg;
+	int op = a[0];
+	u32 flags = a[1];
+	s16 off = (flags & F_SIGNED) ? 1 : 0;
 	int i = 0;
 	u64 res;
 
 	if (!insns)
 		return 7;
 
-	if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+	if (__bpf_alu_result(&res, dst, (s32)imm, op, flags)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R3, res);
-		insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+		insns[i++] = BPF_ALU64_IMM_OFF(op, R1, imm, off);
 		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
 		insns[i++] = BPF_EXIT_INSN();
 	}
@@ -957,17 +991,20 @@ static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
 static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
 				struct bpf_insn *insns, s64 dst, s64 imm)
 {
-	int op = *(int *)arg;
+	int *a = arg;
+	int op = a[0];
+	u32 flags = a[1];
+	s16 off = (flags & F_SIGNED) ? 1 : 0;
 	int i = 0;
 	u64 res;
 
 	if (!insns)
 		return 7;
 
-	if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+	if (__bpf_alu_result(&res, dst, (s32)imm, op, flags | F_ALU32)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
-		insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+		insns[i++] = BPF_ALU32_IMM_OFF(op, R1, imm, off);
 		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
 		insns[i++] = BPF_EXIT_INSN();
 	}
@@ -985,7 +1022,7 @@ static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
 	if (!insns)
 		return 9;
 
-	if (__bpf_alu_result(&res, dst, src, op)) {
+	if (__bpf_alu_result(&res, dst, src, op, 0)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R2, src);
 		i += __bpf_ld_imm64(&insns[i], R3, res);
@@ -1007,7 +1044,7 @@ static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
 	if (!insns)
 		return 9;
 
-	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op, 0)) {
 		i += __bpf_ld_imm64(&insns[i], R1, dst);
 		i += __bpf_ld_imm64(&insns[i], R2, src);
 		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
@@ -1019,16 +1056,20 @@ static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
 	return i;
 }
 
-static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op, u32 flags)
 {
-	return __bpf_fill_pattern(self, &op, 64, 32,
+	int arg[2] = {op, flags};
+
+	return __bpf_fill_pattern(self, &arg, 64, 32,
 				  PATTERN_BLOCK1, PATTERN_BLOCK2,
 				  &__bpf_emit_alu64_imm);
 }
 
-static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op, u32 flags)
 {
-	return __bpf_fill_pattern(self, &op, 64, 32,
+	int arg[2] = {op, flags};
+
+	return __bpf_fill_pattern(self, &arg, 64, 32,
 				  PATTERN_BLOCK1, PATTERN_BLOCK2,
 				  &__bpf_emit_alu32_imm);
 }
@@ -1050,93 +1091,115 @@ static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
 /* ALU64 immediate operations */
 static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MOV);
+	return __bpf_fill_alu64_imm(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu64_and_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_AND);
+	return __bpf_fill_alu64_imm(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu64_or_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_OR);
+	return __bpf_fill_alu64_imm(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_XOR);
+	return __bpf_fill_alu64_imm(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu64_add_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_ADD);
+	return __bpf_fill_alu64_imm(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_SUB);
+	return __bpf_fill_alu64_imm(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MUL);
+	return __bpf_fill_alu64_imm(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu64_div_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_DIV);
+	return __bpf_fill_alu64_imm(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu64_imm(self, BPF_MOD);
+	return __bpf_fill_alu64_imm(self, BPF_MOD, 0);
+}
+
+/* Signed ALU64 immediate operations */
+static int bpf_fill_alu64_sdiv_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_DIV, F_SIGNED);
+}
+
+static int bpf_fill_alu64_smod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu64_imm(self, BPF_MOD, F_SIGNED);
+}
+
+/* Signed ALU32 immediate operations */
+static int bpf_fill_alu32_sdiv_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_DIV, F_SIGNED);
+}
+
+static int bpf_fill_alu32_smod_imm(struct bpf_test *self)
+{
+	return __bpf_fill_alu32_imm(self, BPF_MOD, F_SIGNED);
 }
 
 /* ALU32 immediate operations */
 static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MOV);
+	return __bpf_fill_alu32_imm(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu32_and_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_AND);
+	return __bpf_fill_alu32_imm(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu32_or_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_OR);
+	return __bpf_fill_alu32_imm(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_XOR);
+	return __bpf_fill_alu32_imm(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu32_add_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_ADD);
+	return __bpf_fill_alu32_imm(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_SUB);
+	return __bpf_fill_alu32_imm(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MUL);
+	return __bpf_fill_alu32_imm(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu32_div_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_DIV);
+	return __bpf_fill_alu32_imm(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
 {
-	return __bpf_fill_alu32_imm(self, BPF_MOD);
+	return __bpf_fill_alu32_imm(self, BPF_MOD, 0);
 }
 
 /* ALU64 register operations */
@@ -1235,7 +1298,8 @@ static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
  * Test JITs that implement complex ALU operations as function
  * calls, and must re-arrange operands for argument passing.
  */
-static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
+static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op,
+				    u32 flags)
 {
 	int len = 2 + 10 * 10;
 	struct bpf_insn *insns;
@@ -1249,28 +1313,37 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
 		return -ENOMEM;
 
 	/* Operand and result values according to operation */
-	if (alu32)
-		dst = 0x76543210U;
-	else
-		dst = 0x7edcba9876543210ULL;
+	if (flags & F_SIGNED) {
+		if (flags & F_ALU32)
+			dst = -76543210;
+		else
+			dst = -7654321076543210LL;
+	} else {
+		if (flags & F_ALU32)
+			dst = 0x76543210U;
+		else
+			dst = 0x7edcba9876543210ULL;
+	}
 	imm = 0x01234567U;
 
 	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
 		imm &= 31;
 
-	__bpf_alu_result(&res, dst, imm, op);
+	__bpf_alu_result(&res, dst, imm, op, flags);
 
-	if (alu32)
+	if (flags & F_ALU32)
 		res = (u32)res;
 
 	/* Check all operand registers */
 	for (rd = R0; rd <= R9; rd++) {
 		i += __bpf_ld_imm64(&insns[i], rd, dst);
 
-		if (alu32)
-			insns[i++] = BPF_ALU32_IMM(op, rd, imm);
+		s16 off = (flags & F_SIGNED) ? 1 : 0;
+
+		if (flags & F_ALU32)
+			insns[i++] = BPF_ALU32_IMM_OFF(op, rd, imm, off);
 		else
-			insns[i++] = BPF_ALU64_IMM(op, rd, imm);
+			insns[i++] = BPF_ALU64_IMM_OFF(op, rd, imm, off);
 
 		insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
 		insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
@@ -1295,123 +1368,145 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
 /* ALU64 K registers */
 static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, 0);
 }
 
 static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, 0);
 }
 
 static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, 0);
 }
 
 static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, 0);
 }
 
 static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, 0);
 }
 
 static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, 0);
 }
 
 static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, 0);
 }
 
 static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, 0);
 }
 
 static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, 0);
 }
 
 static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, 0);
 }
 
 static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, 0);
 }
 
 static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, 0);
+}
+
+/* Signed ALU64 K registers */
+static int bpf_fill_alu64_sdiv_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, F_SIGNED);
+}
+
+static int bpf_fill_alu64_smod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, F_SIGNED);
 }
 
 /* ALU32 K registers */
 static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOV, F_ALU32);
 }
 
 static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_AND, F_ALU32);
 }
 
 static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_OR, F_ALU32);
 }
 
 static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_XOR, F_ALU32);
 }
 
 static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_LSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_RSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, F_ALU32);
 }
 
 static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_ADD, F_ALU32);
 }
 
 static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_SUB, F_ALU32);
 }
 
 static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MUL, F_ALU32);
 }
 
 static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, F_ALU32);
 }
 
 static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
 {
-	return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, F_ALU32);
+}
+
+/* Signed ALU32 K registers */
+static int bpf_fill_alu32_sdiv_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_DIV, F_ALU32 | F_SIGNED);
+}
+
+static int bpf_fill_alu32_smod_imm_regs(struct bpf_test *self)
+{
+	return __bpf_fill_alu_imm_regs(self, BPF_MOD, F_ALU32 | F_SIGNED);
 }
 
 /*
@@ -1442,8 +1537,8 @@ static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
 	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
 		src &= 31;
 
-	__bpf_alu_result(&res, dst, src, op);
-	__bpf_alu_result(&same, src, src, op);
+	__bpf_alu_result(&res, dst, src, op, 0);
+	__bpf_alu_result(&same, src, src, op, 0);
 
 	if (alu32) {
 		res = (u32)res;
@@ -1626,7 +1721,7 @@ static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
 		res = src;
 		break;
 	default:
-		__bpf_alu_result(&res, dst, src, BPF_OP(op));
+		__bpf_alu_result(&res, dst, src, BPF_OP(op), 0);
 	}
 
 	keep = 0x0123456789abcdefULL;
@@ -1673,7 +1768,7 @@ static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
 		res = src;
 		break;
 	default:
-		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op), 0);
 	}
 
 	keep = 0x0123456789abcdefULL;
@@ -1939,7 +2034,7 @@ static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
 		res = mem;
 		break;
 	default:
-		__bpf_alu_result(&res, mem, upd, BPF_OP(op));
+		__bpf_alu_result(&res, mem, upd, BPF_OP(op), 0);
 	}
 
 	/* Test all operand registers */
@@ -12354,6 +12449,22 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 		.fill_helper = bpf_fill_alu64_mod_imm_regs,
 	},
+	{
+		"ALU64_SDIV_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sdiv_imm_regs,
+	},
+	{
+		"ALU64_SMOD_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_smod_imm_regs,
+	},
 	/* ALU32 K registers */
 	{
 		"ALU32_MOV_K: registers",
@@ -12451,6 +12562,22 @@ static struct bpf_test tests[] = {
 		{ { 0, 1 } },
 		.fill_helper = bpf_fill_alu32_mod_imm_regs,
 	},
+	{
+		"ALU32_SDIV_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sdiv_imm_regs,
+	},
+	{
+		"ALU32_SMOD_K: registers",
+		{ },
+		INTERNAL,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_smod_imm_regs,
+	},
 	/* ALU64 X register combinations */
 	{
 		"ALU64_MOV_X: register combinations",
@@ -12881,6 +13008,24 @@ static struct bpf_test tests[] = {
 		.fill_helper = bpf_fill_alu64_mod_imm,
 		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	{
+		"ALU64_SDIV_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_sdiv_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ALU64_SMOD_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu64_smod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
 	/* ALU32 immediate magnitudes */
 	{
 		"ALU32_MOV_K: all immediate value magnitudes",
@@ -12963,6 +13108,24 @@ static struct bpf_test tests[] = {
 		.fill_helper = bpf_fill_alu32_mod_imm,
 		.nr_testruns = NR_PATTERN_RUNS,
 	},
+	{
+		"ALU32_SDIV_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_sdiv_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
+	{
+		"ALU32_SMOD_K: all immediate value magnitudes",
+		{ },
+		INTERNAL | FLAG_NO_DATA,
+		{ },
+		{ { 0, 1 } },
+		.fill_helper = bpf_fill_alu32_smod_imm,
+		.nr_testruns = NR_PATTERN_RUNS,
+	},
 	/* ALU64 register magnitudes */
 	{
 		"ALU64_MOV_X: all register value magnitudes",
-- 
2.52.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH bpf-next v3] bpf/tests: Exhaustive test coverage for signed division and modulo
  2026-04-13 17:23       ` [PATCH bpf-next v3] " Jie Meng
@ 2026-04-21  1:00         ` patchwork-bot+netdevbpf
  0 siblings, 0 replies; 6+ messages in thread
From: patchwork-bot+netdevbpf @ 2026-04-21  1:00 UTC (permalink / raw)
  To: Jie Meng; +Cc: alexei.starovoitov, andrii, ast, bpf, daniel

Hello:

This patch was applied to bpf/bpf-next.git (master)
by Alexei Starovoitov <ast@kernel.org>:

On Mon, 13 Apr 2026 10:23:11 -0700 you wrote:
> Extend lib/test_bpf.c to provide comprehensive test coverage for BPF
> signed division (SDIV) and signed modulo (SMOD) instructions, both
> 32-bit and 64-bit variants with immediate operands.
> 
> Introduce F_ALU32 and F_SIGNED flags to replace the less readable
> bool alu32 and s16 off parameters throughout the test helpers. The
> BPF instruction 'off' field is derived from flags only at the point
> of instruction encoding.
> 
> [...]

Here is the summary with links:
  - [bpf-next,v3] bpf/tests: Exhaustive test coverage for signed division and modulo
    https://git.kernel.org/bpf/bpf-next/c/c8f0ee969f76

You are awesome, thank you!
-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2026-04-21  1:00 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-26 20:50 [PATCH bpf-next] bpf/tests: Exhaustive test coverage for signed division and modulo Jie Meng
2026-04-02 22:16 ` Alexei Starovoitov
2026-04-06 20:39   ` [PATCH v2] " Jie Meng
2026-04-11 17:40     ` Alexei Starovoitov
2026-04-13 17:23       ` [PATCH bpf-next v3] " Jie Meng
2026-04-21  1:00         ` patchwork-bot+netdevbpf

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox