netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Borkmann <daniel@iogearbox.net>
To: ast@kernel.org
Cc: jannh@google.com, davem@davemloft.net, netdev@vger.kernel.org,
	Daniel Borkmann <daniel@iogearbox.net>
Subject: [PATCH bpf v2 9/9] bpf: add various test cases to selftests
Date: Wed,  2 Jan 2019 00:20:46 +0100	[thread overview]
Message-ID: <20190101232046.2880-10-daniel@iogearbox.net> (raw)
In-Reply-To: <20190101232046.2880-1-daniel@iogearbox.net>

Add various map value pointer related test cases to test_verifier
kselftest to reflect recent changes and improve test coverage. The
tests include basic masking functionality, unprivileged behavior
on pointer arithmetic which goes oob, mixed bounds tests, negative
unknown scalar but resulting positive offset for access and helper
range, handling of arithmetic from multiple maps, various masking
scenarios with subsequent map value access and others including two
test cases from Jann Horn for prior fixes.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
---
 tools/testing/selftests/bpf/test_verifier.c | 1146 ++++++++++++++++++++++++++-
 1 file changed, 1124 insertions(+), 22 deletions(-)

diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 33f7d38..10d4444 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -23,6 +23,7 @@
 #include <stdbool.h>
 #include <sched.h>
 #include <limits.h>
+#include <assert.h>
 
 #include <sys/capability.h>
 
@@ -2577,6 +2578,7 @@ static struct bpf_test tests[] = {
 		},
 		.result = REJECT,
 		.errstr = "invalid stack off=-79992 size=8",
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
 	},
 	{
 		"PTR_TO_STACK store/load - out of bounds high",
@@ -3104,6 +3106,8 @@ static struct bpf_test tests[] = {
 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -3206,6 +3210,243 @@ static struct bpf_test tests[] = {
 		.retval_unpriv = 2,
 	},
 	{
+		"PTR_TO_STACK check high 1",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"PTR_TO_STACK check high 2",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"PTR_TO_STACK check high 3",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"PTR_TO_STACK check high 4",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+		.errstr = "invalid stack off=0 size=1",
+		.result = REJECT,
+	},
+	{
+		"PTR_TO_STACK check high 5",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off",
+	},
+	{
+		"PTR_TO_STACK check high 6",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off",
+	},
+	{
+		"PTR_TO_STACK check high 7",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+		.errstr = "fp pointer offset",
+	},
+	{
+		"PTR_TO_STACK check low 1",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"PTR_TO_STACK check low 2",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
+			BPF_EXIT_INSN(),
+		},
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"PTR_TO_STACK check low 3",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+		.errstr = "invalid stack off=-513 size=1",
+		.result = REJECT,
+	},
+	{
+		"PTR_TO_STACK check low 4",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "math between fp pointer",
+	},
+	{
+		"PTR_TO_STACK check low 5",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off",
+	},
+	{
+		"PTR_TO_STACK check low 6",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off",
+	},
+	{
+		"PTR_TO_STACK check low 7",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+		.errstr = "fp pointer offset",
+	},
+	{
+		"PTR_TO_STACK mixed reg/k, 1",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+			BPF_MOV64_IMM(BPF_REG_2, -3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"PTR_TO_STACK mixed reg/k, 2",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+			BPF_MOV64_IMM(BPF_REG_2, -3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
+		"PTR_TO_STACK mixed reg/k, 3",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+			BPF_MOV64_IMM(BPF_REG_2, -3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = -3,
+	},
+	{
+		"PTR_TO_STACK reg",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_MOV64_IMM(BPF_REG_2, -3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "invalid stack off=0 size=1",
+		.result = ACCEPT,
+		.retval = 42,
+	},
+	{
 		"stack pointer arithmetic",
 		.insns = {
 			BPF_MOV64_IMM(BPF_REG_1, 4),
@@ -6610,7 +6851,446 @@ static struct bpf_test tests[] = {
 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
 	},
 	{
-		"map access: known scalar += value_ptr",
+		"map access: known scalar += value_ptr from different maps",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_hash_16b = { 5 },
+		.fixup_map_array_48b = { 8 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 tried to add from different maps",
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr -= known scalar from different maps",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_hash_16b = { 5 },
+		.fixup_map_array_48b = { 8 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 min value is outside of the array range",
+		.retval = 1,
+	},
+	{
+		"map access: known scalar += value_ptr from different maps, but same value properties",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_hash_48b = { 5 },
+		.fixup_map_array_48b = { 8 },
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr += known scalar, upper oob arith, test 1",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_1, 48),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr += known scalar, upper oob arith, test 2",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_1, 49),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr += known scalar, upper oob arith, test 3",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_1, 47),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr -= known scalar, lower oob arith, test 1",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+			BPF_MOV64_IMM(BPF_REG_1, 47),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, 48),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = REJECT,
+		.errstr = "R0 min value is outside of the array range",
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+	},
+	{
+		"map access: value_ptr -= known scalar, lower oob arith, test 2",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+			BPF_MOV64_IMM(BPF_REG_1, 47),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, 48),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr -= known scalar, lower oob arith, test 3",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+			BPF_MOV64_IMM(BPF_REG_1, 47),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, 47),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+		.retval = 1,
+	},
+	{
+		"map access: known scalar += value_ptr",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr += known scalar, 1",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr += known scalar, 2",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_1, 49),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = REJECT,
+		.errstr = "invalid access to map value",
+	},
+	{
+		"map access: value_ptr += known scalar, 3",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_1, -1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = REJECT,
+		.errstr = "invalid access to map value",
+	},
+	{
+		"map access: value_ptr += known scalar, 4",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+			BPF_MOV64_IMM(BPF_REG_1, 5),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, -2),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, -1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+		.retval = 1,
+	},
+	{
+		"map access: value_ptr += known scalar, 5",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.retval = 0xabcdef12,
+	},
+	{
+		"map access: value_ptr += known scalar, 6",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+			BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.retval = 0xabcdef12,
+	},
+	{
+		"map access: unknown scalar += value_ptr, 1",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.retval = 1,
+	},
+	{
+		"map access: unknown scalar += value_ptr, 2",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.retval = 0xabcdef12,
+	},
+	{
+		"map access: unknown scalar += value_ptr, 3",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+			BPF_MOV64_IMM(BPF_REG_1, -1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_array_48b = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+		.retval = 0xabcdef12,
+	},
+	{
+		"map access: unknown scalar += value_ptr, 4",
 		.insns = {
 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -6618,19 +7298,22 @@ static struct bpf_test tests[] = {
 			BPF_LD_MAP_FD(BPF_REG_1, 0),
 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 				     BPF_FUNC_map_lookup_elem),
-			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
-			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+			BPF_MOV64_IMM(BPF_REG_1, 19),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
-			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
-			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map_array_48b = { 3 },
-		.result = ACCEPT,
-		.retval = 1,
+		.result = REJECT,
+		.errstr = "R1 max value is outside of the array range",
+		.errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
 	},
 	{
-		"map access: value_ptr += known scalar",
+		"map access: value_ptr += unknown scalar, 1",
 		.insns = {
 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -6638,8 +7321,9 @@ static struct bpf_test tests[] = {
 			BPF_LD_MAP_FD(BPF_REG_1, 0),
 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 				     BPF_FUNC_map_lookup_elem),
-			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
-			BPF_MOV64_IMM(BPF_REG_1, 4),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
 			BPF_MOV64_IMM(BPF_REG_0, 1),
@@ -6650,7 +7334,7 @@ static struct bpf_test tests[] = {
 		.retval = 1,
 	},
 	{
-		"map access: unknown scalar += value_ptr",
+		"map access: value_ptr += unknown scalar, 2",
 		.insns = {
 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -6659,19 +7343,18 @@ static struct bpf_test tests[] = {
 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 				     BPF_FUNC_map_lookup_elem),
 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
-			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
-			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
-			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
-			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
-			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map_array_48b = { 3 },
 		.result = ACCEPT,
-		.retval = 1,
+		.retval = 0xabcdef12,
 	},
 	{
-		"map access: value_ptr += unknown scalar",
+		"map access: value_ptr += unknown scalar, 3",
 		.insns = {
 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
@@ -6679,13 +7362,20 @@ static struct bpf_test tests[] = {
 			BPF_LD_MAP_FD(BPF_REG_1, 0),
 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 				     BPF_FUNC_map_lookup_elem),
-			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
-			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
-			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
+			BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
 			BPF_MOV64_IMM(BPF_REG_0, 1),
 			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
 		},
 		.fixup_map_array_48b = { 3 },
 		.result = ACCEPT,
@@ -6770,6 +7460,8 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_array_48b = { 3 },
 		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
 		.retval = 1,
 	},
 	{
@@ -6837,6 +7529,8 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_array_48b = { 3 },
 		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
 		.retval = 1,
 	},
 	{
@@ -8376,6 +9070,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8400,6 +9095,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8426,6 +9122,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8451,6 +9148,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8499,6 +9197,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8570,6 +9269,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8621,6 +9321,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8648,6 +9349,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8674,6 +9376,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8703,6 +9406,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8733,6 +9437,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 4 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
@@ -8761,6 +9466,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "unbounded min value",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 		.result_unpriv = REJECT,
 	},
@@ -8813,9 +9519,39 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+		.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
 		.result = REJECT,
 	},
 	{
+		"check subtraction on pointers for unpriv",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
+			BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+			BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_hash_8b = { 1, 9 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R9 pointer -= pointer prohibited",
+	},
+	{
 		"bounds check based on zero-extended MOV",
 		.insns = {
 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
@@ -9146,6 +9882,36 @@ static struct bpf_test tests[] = {
 		.result = REJECT
 	},
 	{
+		"bounds check after 32-bit right shift with 64-bit input",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+			/* r1 = 2 */
+			BPF_MOV64_IMM(BPF_REG_1, 2),
+			/* r1 = 1<<32 */
+			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
+			/* r1 = 0 (NOT 2!) */
+			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
+			/* r1 = 0xffff'fffe (NOT 0!) */
+			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
+			/* computes OOB pointer */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			/* OOB access */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map_hash_8b = { 3 },
+		.errstr = "R0 invalid mem access",
+		.result = REJECT,
+	},
+	{
 		"bounds check map access with off+size signed 32bit overflow. test1",
 		.insns = {
 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
@@ -9185,6 +9951,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "pointer offset 1073741822",
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
 		.result = REJECT
 	},
 	{
@@ -9206,6 +9973,7 @@ static struct bpf_test tests[] = {
 		},
 		.fixup_map_hash_8b = { 3 },
 		.errstr = "pointer offset -1073741822",
+		.errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
 		.result = REJECT
 	},
 	{
@@ -9377,6 +10145,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN()
 		},
 		.errstr = "fp pointer offset 1073741822",
+		.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
 		.result = REJECT
 	},
 	{
@@ -13719,6 +14488,328 @@ static struct bpf_test tests[] = {
 		.insn_processed = 15,
 	},
 	{
+		"masking, test out of bounds 1",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 5),
+			BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 2",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 1),
+			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 3",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 4",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 5",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, -1),
+			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 6",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, -1),
+			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 7",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, 5),
+			BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 8",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 9",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 10",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 11",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, -1),
+			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test out of bounds 12",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, -1),
+			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test in bounds 1",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 4),
+			BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 4,
+	},
+	{
+		"masking, test in bounds 2",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test in bounds 3",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
+			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0xfffffffe,
+	},
+	{
+		"masking, test in bounds 4",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
+			BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0xabcde,
+	},
+	{
+		"masking, test in bounds 5",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 0),
+			BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
+		"masking, test in bounds 6",
+		.insns = {
+			BPF_MOV32_IMM(BPF_REG_1, 46),
+			BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 46,
+	},
+	{
+		"masking, test in bounds 7",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_3, -46),
+			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+			BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 46,
+	},
+	{
+		"masking, test in bounds 8",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_3, -47),
+			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+			BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+			BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.retval = 0,
+	},
+	{
 		"reference tracking in call: free reference in subprog and outside",
 		.insns = {
 			BPF_SK_LOOKUP,
@@ -14413,6 +15504,16 @@ static int create_map(uint32_t type, uint32_t size_key,
 	return fd;
 }
 
+static void update_map(int fd, int index)
+{
+	struct test_val value = {
+		.index = (6 + 1) * sizeof(int),
+		.foo[6] = 0xabcdef12,
+	};
+
+	assert(!bpf_map_update_elem(fd, &index, &value, 0));
+}
+
 static int create_prog_dummy1(enum bpf_prog_type prog_type)
 {
 	struct bpf_insn prog[] = {
@@ -14564,6 +15665,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
 	if (*fixup_map_array_48b) {
 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 					sizeof(struct test_val), 1);
+		update_map(map_fds[3], 0);
 		do {
 			prog[*fixup_map_array_48b].imm = map_fds[3];
 			fixup_map_array_48b++;
-- 
2.9.5

      parent reply	other threads:[~2019-01-01 23:21 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-01 23:20 [PATCH bpf v2 0/9] bpf fix to prevent oob under speculation Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 1/9] bpf: move {prev_,}insn_idx into verifier env Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 2/9] bpf: move tmp variable into ax register in interpreter Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 3/9] bpf: enable access to ax register also from verifier rewrite Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 4/9] bpf: restrict map value pointer arithmetic for unprivileged Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 5/9] bpf: restrict stack " Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 6/9] bpf: restrict unknown scalars of mixed signed bounds " Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 7/9] bpf: fix check_map_access smin_value test when pointer contains offset Daniel Borkmann
2019-01-01 23:20 ` [PATCH bpf v2 8/9] bpf: prevent out of bounds speculation on pointer arithmetic Daniel Borkmann
2019-01-02 22:11   ` Jakub Kicinski
2019-01-02 23:37     ` Daniel Borkmann
2019-01-01 23:20 ` Daniel Borkmann [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190101232046.2880-10-daniel@iogearbox.net \
    --to=daniel@iogearbox.net \
    --cc=ast@kernel.org \
    --cc=davem@davemloft.net \
    --cc=jannh@google.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).