public inbox for bpf@vger.kernel.org
 help / color / mirror / Atom feed
From: Andrii Nakryiko <andrii@kernel.org>
To: <bpf@vger.kernel.org>, <ast@kernel.org>, <daniel@iogearbox.net>,
	<martin.lau@kernel.org>
Cc: <andrii@kernel.org>, <kernel-team@meta.com>
Subject: [PATCH bpf-next 10/13] selftests/bpf: add randomized reg_bounds tests
Date: Thu, 2 Nov 2023 17:08:19 -0700	[thread overview]
Message-ID: <20231103000822.2509815-11-andrii@kernel.org> (raw)
In-Reply-To: <20231103000822.2509815-1-andrii@kernel.org>

Add random cases generation to reg_bounds.c and run them without
SLOW_TESTS=1 to increase a chance of BPF CI catching latent issues.

Suggested-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 .../selftests/bpf/prog_tests/reg_bounds.c     | 164 +++++++++++++++++-
 1 file changed, 158 insertions(+), 6 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index 16864c940548..fd6401dec0b7 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -83,6 +83,17 @@ static __always_inline u64 max_t(enum num_t t, u64 x, u64 y)
 	}
 }
 
+static __always_inline u64 cast_t(enum num_t t, u64 x)
+{
+	switch (t) {
+	case U64: return (u64)x;
+	case U32: return (u32)x;
+	case S64: return (s64)x;
+	case S32: return (u32)(s32)x;
+	default: printf("cast_t!\n"); exit(1);
+	}
+}
+
 static const char *t_str(enum num_t t)
 {
 	switch (t) {
@@ -1304,8 +1315,10 @@ struct ctx {
 	struct range *usubranges, *ssubranges;
 	int max_failure_cnt, cur_failure_cnt;
 	int total_case_cnt, case_cnt;
+	int rand_case_cnt;
+	unsigned rand_seed;
 	__u64 start_ns;
-	char progress_ctx[32];
+	char progress_ctx[64];
 };
 
 static void cleanup_ctx(struct ctx *ctx)
@@ -1636,11 +1649,6 @@ static int parse_env_vars(struct ctx *ctx)
 {
 	const char *s;
 
-	if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) {
-		test__skip();
-		return -ENOTSUP;
-	}
-
 	if ((s = getenv("REG_BOUNDS_MAX_FAILURE_CNT"))) {
 		errno = 0;
 		ctx->max_failure_cnt = strtol(s, NULL, 10);
@@ -1650,13 +1658,37 @@ static int parse_env_vars(struct ctx *ctx)
 		}
 	}
 
+	if ((s = getenv("REG_BOUNDS_RAND_CASE_CNT"))) {
+		errno = 0;
+		ctx->rand_case_cnt = strtol(s, NULL, 10);
+		if (errno || ctx->rand_case_cnt < 0) {
+			ASSERT_OK(-errno, "REG_BOUNDS_RAND_CASE_CNT");
+			return -EINVAL;
+		}
+	}
+
+	if ((s = getenv("REG_BOUNDS_RAND_SEED"))) {
+		errno = 0;
+		ctx->rand_seed = strtoul(s, NULL, 10);
+		if (errno) {
+			ASSERT_OK(-errno, "REG_BOUNDS_RAND_SEED");
+			return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
 static int prepare_gen_tests(struct ctx *ctx)
 {
+	const char *s;
 	int err;
 
+	if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) {
+		test__skip();
+		return -ENOTSUP;
+	}
+
 	err = parse_env_vars(ctx);
 	if (err)
 		return err;
@@ -1857,6 +1889,126 @@ void test_reg_bounds_gen_ranges_s32_s64(void) { validate_gen_range_vs_range(S32,
 void test_reg_bounds_gen_ranges_s32_u32(void) { validate_gen_range_vs_range(S32, U32); }
 void test_reg_bounds_gen_ranges_s32_s32(void) { validate_gen_range_vs_range(S32, S32); }
 
+#define DEFAULT_RAND_CASE_CNT 25
+
+#define RAND_21BIT_MASK ((1 << 22) - 1)
+
+static u64 rand_u64()
+{
+	/* RAND_MAX is guaranteed to be at least 1<<15, but in practice it
+	 * seems to be 1<<31, so we need to call it thrice to get full u64;
+	 * we'll use rougly equal split: 22 + 21 + 21 bits
+	 */
+	return ((u64)random() << 42) |
+	       (((u64)random() & RAND_21BIT_MASK) << 21) |
+	       (random() & RAND_21BIT_MASK);
+}
+
+static u64 rand_const(enum num_t t)
+{
+	return cast_t(t, rand_u64());
+}
+
+static struct range rand_range(enum num_t t)
+{
+	u64 x = rand_const(t), y = rand_const(t);
+
+	return range(t, min_t(t, x, y), max_t(t, x, y));
+}
+
+static void validate_rand_ranges(enum num_t init_t, enum num_t cond_t, bool const_range)
+{
+	struct ctx ctx;
+	struct range range1, range2;
+	int err, i;
+	u64 t;
+
+	memset(&ctx, 0, sizeof(ctx));
+
+	err = parse_env_vars(&ctx);
+	if (err) {
+		ASSERT_OK(err, "parse_env_vars");
+		return;
+	}
+
+	if (ctx.rand_case_cnt == 0)
+		ctx.rand_case_cnt = DEFAULT_RAND_CASE_CNT;
+	if (ctx.rand_seed == 0)
+		ctx.rand_seed = (unsigned)get_time_ns();
+
+	srandom(ctx.rand_seed);
+
+	ctx.total_case_cnt = (MAX_OP - MIN_OP + 1) * (2 * ctx.rand_case_cnt);
+	ctx.start_ns = get_time_ns();
+	snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
+		 "[RANDOM SEED %u] RANGE x %s, %s -> %s",
+		 ctx.rand_seed, const_range ? "CONST" : "RANGE",
+		 t_str(init_t), t_str(cond_t));
+	fprintf(env.stdout, "%s\n", ctx.progress_ctx);
+
+	for (i = 0; i < ctx.rand_case_cnt; i++) {
+		range1 = rand_range(init_t);
+		if (const_range) {
+			t = rand_const(init_t);
+			range2 = range(init_t, t, t);
+		} else {
+			range2 = rand_range(init_t);
+		}
+
+		/* <range1> x <range2> */
+		if (verify_case(&ctx, init_t, cond_t, range1, range2))
+			goto cleanup;
+		/* <range2> x <range1> */
+		if (verify_case(&ctx, init_t, cond_t, range2, range1))
+			goto cleanup;
+	}
+
+cleanup:
+	cleanup_ctx(&ctx);
+}
+
+/* [RANDOM] RANGE x CONST, U64 initial range */
+void test_reg_bounds_rand_consts_u64_u64(void) { validate_rand_ranges(U64, U64, true /* const */); }
+void test_reg_bounds_rand_consts_u64_s64(void) { validate_rand_ranges(U64, S64, true /* const */); }
+void test_reg_bounds_rand_consts_u64_u32(void) { validate_rand_ranges(U64, U32, true /* const */); }
+void test_reg_bounds_rand_consts_u64_s32(void) { validate_rand_ranges(U64, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, S64 initial range */
+void test_reg_bounds_rand_consts_s64_u64(void) { validate_rand_ranges(S64, U64, true /* const */); }
+void test_reg_bounds_rand_consts_s64_s64(void) { validate_rand_ranges(S64, S64, true /* const */); }
+void test_reg_bounds_rand_consts_s64_u32(void) { validate_rand_ranges(S64, U32, true /* const */); }
+void test_reg_bounds_rand_consts_s64_s32(void) { validate_rand_ranges(S64, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, U32 initial range */
+void test_reg_bounds_rand_consts_u32_u64(void) { validate_rand_ranges(U32, U64, true /* const */); }
+void test_reg_bounds_rand_consts_u32_s64(void) { validate_rand_ranges(U32, S64, true /* const */); }
+void test_reg_bounds_rand_consts_u32_u32(void) { validate_rand_ranges(U32, U32, true /* const */); }
+void test_reg_bounds_rand_consts_u32_s32(void) { validate_rand_ranges(U32, S32, true /* const */); }
+/* [RANDOM] RANGE x CONST, S32 initial range */
+void test_reg_bounds_rand_consts_s32_u64(void) { validate_rand_ranges(S32, U64, true /* const */); }
+void test_reg_bounds_rand_consts_s32_s64(void) { validate_rand_ranges(S32, S64, true /* const */); }
+void test_reg_bounds_rand_consts_s32_u32(void) { validate_rand_ranges(S32, U32, true /* const */); }
+void test_reg_bounds_rand_consts_s32_s32(void) { validate_rand_ranges(S32, S32, true /* const */); }
+
+/* [RANDOM] RANGE x RANGE, U64 initial range */
+void test_reg_bounds_rand_ranges_u64_u64(void) { validate_rand_ranges(U64, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_s64(void) { validate_rand_ranges(U64, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_u32(void) { validate_rand_ranges(U64, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_u64_s32(void) { validate_rand_ranges(U64, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, S64 initial range */
+void test_reg_bounds_rand_ranges_s64_u64(void) { validate_rand_ranges(S64, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_s64(void) { validate_rand_ranges(S64, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_u32(void) { validate_rand_ranges(S64, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_s64_s32(void) { validate_rand_ranges(S64, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, U32 initial range */
+void test_reg_bounds_rand_ranges_u32_u64(void) { validate_rand_ranges(U32, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_s64(void) { validate_rand_ranges(U32, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_u32(void) { validate_rand_ranges(U32, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_u32_s32(void) { validate_rand_ranges(U32, S32, false /* range */); }
+/* [RANDOM] RANGE x RANGE, S32 initial range */
+void test_reg_bounds_rand_ranges_s32_u64(void) { validate_rand_ranges(S32, U64, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_s64(void) { validate_rand_ranges(S32, S64, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_u32(void) { validate_rand_ranges(S32, U32, false /* range */); }
+void test_reg_bounds_rand_ranges_s32_s32(void) { validate_rand_ranges(S32, S32, false /* range */); }
+
 /* A set of hard-coded "interesting" cases to validate as part of normal
  * test_progs test runs
  */
-- 
2.34.1


  parent reply	other threads:[~2023-11-03  0:08 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-03  0:08 [PATCH bpf-next 00/13] BPF register bounds range vs range support Andrii Nakryiko
2023-11-03  0:08 ` [PATCH bpf-next 01/13] bpf: generalize reg_set_min_max() to handle non-const register comparisons Andrii Nakryiko
2023-11-03  7:52   ` Shung-Hsi Yu
2023-11-03  8:33     ` Shung-Hsi Yu
2023-11-03 20:39     ` Andrii Nakryiko
2023-11-03 20:48       ` Andrii Nakryiko
2023-11-06  2:22         ` Shung-Hsi Yu
2023-11-03 16:20   ` Eduard Zingerman
2023-11-03 20:39     ` Andrii Nakryiko
2023-11-03  0:08 ` [PATCH bpf-next 02/13] bpf: generalize is_scalar_branch_taken() logic Andrii Nakryiko
2023-11-03  0:13   ` Andrii Nakryiko
2023-11-03 16:47   ` Eduard Zingerman
2023-11-03 20:59     ` Andrii Nakryiko
2023-11-03 21:02       ` Andrii Nakryiko
2023-11-03  0:08 ` [PATCH bpf-next 03/13] bpf: enhance BPF_JEQ/BPF_JNE is_branch_taken logic Andrii Nakryiko
2023-11-03 17:28   ` Eduard Zingerman
2023-11-09  8:39   ` Shung-Hsi Yu
2023-11-03  0:08 ` [PATCH bpf-next 04/13] bpf: add register bounds sanity checks and sanitization Andrii Nakryiko
2023-11-03  2:13   ` Andrii Nakryiko
2023-11-03 17:56   ` Eduard Zingerman
2023-11-03 21:11     ` Andrii Nakryiko
2023-11-03 21:39       ` Eduard Zingerman
2023-11-09  8:30   ` Shung-Hsi Yu
2023-11-03  0:08 ` [PATCH bpf-next 05/13] bpf: remove redundant s{32,64} -> u{32,64} deduction logic Andrii Nakryiko
2023-11-03 22:16   ` Eduard Zingerman
2023-11-09  8:43   ` Shung-Hsi Yu
2023-11-03  0:08 ` [PATCH bpf-next 06/13] bpf: make __reg{32,64}_deduce_bounds logic more robust Andrii Nakryiko
2023-11-03 22:27   ` Eduard Zingerman
2023-11-09  9:02   ` Shung-Hsi Yu
2023-11-03  0:08 ` [PATCH bpf-next 07/13] selftests/bpf: BPF register range bounds tester Andrii Nakryiko
2023-11-03 19:19   ` Alexei Starovoitov
2023-11-03 21:12     ` Andrii Nakryiko
2023-11-03  0:08 ` [PATCH bpf-next 08/13] selftests/bpf: adjust OP_EQ/OP_NE handling to use subranges for branch taken Andrii Nakryiko
2023-11-03  0:08 ` [PATCH bpf-next 09/13] selftests/bpf: add range x range test to reg_bounds Andrii Nakryiko
2023-11-03  0:08 ` Andrii Nakryiko [this message]
2023-11-03  0:08 ` [PATCH bpf-next 11/13] selftests/bpf: set BPF_F_TEST_SANITY_SCRIPT by default Andrii Nakryiko
2023-11-03 22:35   ` Eduard Zingerman
2023-11-03  0:08 ` [PATCH bpf-next 12/13] veristat: add ability to set BPF_F_TEST_SANITY_STRICT flag with -r flag Andrii Nakryiko
2023-11-03  0:08 ` [PATCH bpf-next 13/13] selftests/bpf: add iter test requiring range x range logic Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231103000822.2509815-11-andrii@kernel.org \
    --to=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=kernel-team@meta.com \
    --cc=martin.lau@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox