* [PATCH bpf-next v2 01/11] bpf: Fix atomic probe zero-extension
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 02/11] s390/bpf: Factor out emitting probe nops Ilya Leoshkevich
` (9 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
Zero-extending results of atomic probe operations fails with:
verifier bug. zext_dst is set, but no reg is defined
The problem is that insn_def_regno() handles BPF_ATOMICs, but not
BPF_PROBE_ATOMICs. Fix by adding the missing condition.
Fixes: d503a04f8bc0 ("bpf: Add support for certain atomics in bpf_arena to x86 JIT")
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
kernel/bpf/verifier.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d3927d819465..e25ad5fb9115 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3217,7 +3217,8 @@ static int insn_def_regno(const struct bpf_insn *insn)
case BPF_ST:
return -1;
case BPF_STX:
- if (BPF_MODE(insn->code) == BPF_ATOMIC &&
+ if ((BPF_MODE(insn->code) == BPF_ATOMIC ||
+ BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) &&
(insn->imm & BPF_FETCH)) {
if (insn->imm == BPF_CMPXCHG)
return BPF_REG_0;
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 02/11] s390/bpf: Factor out emitting probe nops
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 01/11] bpf: Fix atomic probe zero-extension Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 03/11] s390/bpf: Get rid of get_probe_mem_regno() Ilya Leoshkevich
` (8 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
The upcoming arena support for the loop-based BPF_XCHG implementation
requires emitting nop and extable entries separately. Move nop handling
into a separate function, and keep track of the nop offset.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 62 +++++++++++++++++++++++-------------
1 file changed, 40 insertions(+), 22 deletions(-)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 4be8f5cadd02..260e7009784b 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -693,24 +693,52 @@ bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
return true;
}
+/*
+ * A single BPF probe instruction
+ */
+struct bpf_jit_probe {
+ int prg; /* JITed instruction offset */
+ int nop_prg; /* JITed nop offset */
+};
+
+static void bpf_jit_probe_init(struct bpf_jit_probe *probe)
+{
+ probe->prg = -1;
+ probe->nop_prg = -1;
+}
+
+/*
+ * Handlers of certain exceptions leave psw.addr pointing to the instruction
+ * directly after the failing one. Therefore, create two exception table
+ * entries and also add a nop in case two probing instructions come directly
+ * after each other.
+ */
+static void bpf_jit_probe_emit_nop(struct bpf_jit *jit,
+ struct bpf_jit_probe *probe)
+{
+ probe->nop_prg = jit->prg;
+ /* bcr 0,%0 */
+ _EMIT2(0x0700);
+}
+
static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
- int probe_prg, int nop_prg)
+ struct bpf_jit_probe *probe)
{
struct exception_table_entry *ex;
- int reg, prg;
+ int i, prg, reg;
s64 delta;
u8 *insn;
- int i;
+ bpf_jit_probe_emit_nop(jit, probe);
if (!fp->aux->extable)
/* Do nothing during early JIT passes. */
return 0;
- insn = jit->prg_buf + probe_prg;
+ insn = jit->prg_buf + probe->prg;
reg = get_probe_mem_regno(insn);
if (WARN_ON_ONCE(reg < 0))
/* JIT bug - unexpected probe instruction. */
return -1;
- if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
+ if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg))
/* JIT bug - gap between probe and nop instructions. */
return -1;
for (i = 0; i < 2; i++) {
@@ -719,7 +747,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
return -1;
ex = &fp->aux->extable[jit->excnt];
/* Add extable entries for probe and nop instructions. */
- prg = i == 0 ? probe_prg : nop_prg;
+ prg = i == 0 ? probe->prg : probe->nop_prg;
delta = jit->prg_buf + prg - (u8 *)&ex->insn;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - code and extable must be close. */
@@ -729,7 +757,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
* Always land on the nop. Note that extable infrastructure
* ignores fixup field, it is handled by ex_handler_bpf().
*/
- delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
+ delta = jit->prg_buf + probe->nop_prg - (u8 *)&ex->fixup;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - landing pad and extable must be close. */
return -1;
@@ -782,19 +810,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
s32 branch_oc_off = insn->off;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
+ struct bpf_jit_probe probe;
int last, insn_count = 1;
u32 *addrs = jit->addrs;
s32 imm = insn->imm;
s16 off = insn->off;
- int probe_prg = -1;
unsigned int mask;
- int nop_prg;
int err;
+ bpf_jit_probe_init(&probe);
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) == BPF_PROBE_MEM ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
- probe_prg = jit->prg;
+ probe.prg = jit->prg;
switch (insn->code) {
/*
@@ -1897,18 +1925,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
return -1;
}
- if (probe_prg != -1) {
- /*
- * Handlers of certain exceptions leave psw.addr pointing to
- * the instruction directly after the failing one. Therefore,
- * create two exception table entries and also add a nop in
- * case two probing instructions come directly after each
- * other.
- */
- nop_prg = jit->prg;
- /* bcr 0,%0 */
- _EMIT2(0x0700);
- err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
+ if (probe.prg != -1) {
+ err = bpf_jit_probe_mem(jit, fp, &probe);
if (err < 0)
return err;
}
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 03/11] s390/bpf: Get rid of get_probe_mem_regno()
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 01/11] bpf: Fix atomic probe zero-extension Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 02/11] s390/bpf: Factor out emitting probe nops Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 04/11] s390/bpf: Introduce pre- and post- probe functions Ilya Leoshkevich
` (7 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
Commit 7fc8c362e782 ("s390/bpf: encode register within extable entry")
introduced explicit passing of the number of the register to be cleared
to ex_handler_bpf(), which replaced deducing it from the respective
native load instruction using get_probe_mem_regno().
Replace the second and last usage in the same manner, and remove this
function.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 33 +++++++--------------------------
1 file changed, 7 insertions(+), 26 deletions(-)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 260e7009784b..d9d79aa2be1b 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -667,25 +667,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
jit->prg += sizeof(struct bpf_plt);
}
-static int get_probe_mem_regno(const u8 *insn)
-{
- /*
- * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have
- * destination register at the same position.
- */
- if (insn[0] != 0xe3) /* common prefix */
- return -1;
- if (insn[5] != 0x90 && /* llgc */
- insn[5] != 0x91 && /* llgh */
- insn[5] != 0x16 && /* llgf */
- insn[5] != 0x04 && /* lg */
- insn[5] != 0x77 && /* lgb */
- insn[5] != 0x15 && /* lgh */
- insn[5] != 0x14) /* lgf */
- return -1;
- return insn[1] >> 4;
-}
-
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
{
regs->psw.addr = extable_fixup(x);
@@ -699,12 +680,14 @@ bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
struct bpf_jit_probe {
int prg; /* JITed instruction offset */
int nop_prg; /* JITed nop offset */
+ int reg; /* Register to clear on exception */
};
static void bpf_jit_probe_init(struct bpf_jit_probe *probe)
{
probe->prg = -1;
probe->nop_prg = -1;
+ probe->reg = -1;
}
/*
@@ -725,7 +708,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
struct bpf_jit_probe *probe)
{
struct exception_table_entry *ex;
- int i, prg, reg;
+ int i, prg;
s64 delta;
u8 *insn;
@@ -734,10 +717,6 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
/* Do nothing during early JIT passes. */
return 0;
insn = jit->prg_buf + probe->prg;
- reg = get_probe_mem_regno(insn);
- if (WARN_ON_ONCE(reg < 0))
- /* JIT bug - unexpected probe instruction. */
- return -1;
if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg))
/* JIT bug - gap between probe and nop instructions. */
return -1;
@@ -763,7 +742,7 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
return -1;
ex->fixup = delta;
ex->type = EX_TYPE_BPF;
- ex->data = reg;
+ ex->data = probe->reg;
jit->excnt++;
}
return 0;
@@ -821,8 +800,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
bpf_jit_probe_init(&probe);
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) == BPF_PROBE_MEM ||
- BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) {
probe.prg = jit->prg;
+ probe.reg = reg2hex[dst_reg];
+ }
switch (insn->code) {
/*
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 04/11] s390/bpf: Introduce pre- and post- probe functions
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (2 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 03/11] s390/bpf: Get rid of get_probe_mem_regno() Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 05/11] s390/bpf: Land on the next JITed instruction after exception Ilya Leoshkevich
` (6 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
Currently probe insns are handled by two "if" statements at the
beginning and at the end of bpf_jit_insn(). The first one needs to be
in sync with the huge insn->code statement that follows it, which was
not a problem so far, since the check is small.
The introduction of arena will make it significantly larger, and it
will no longer be obvious whether it is in sync with the opcode switch.
Move these statements to the new bpf_jit_probe_load_pre() and
bpf_jit_probe_post() functions, and call them only from cases that need
them.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 58 +++++++++++++++++++++++++++---------
1 file changed, 44 insertions(+), 14 deletions(-)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index d9d79aa2be1b..582fa3830772 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -704,14 +704,28 @@ static void bpf_jit_probe_emit_nop(struct bpf_jit *jit,
_EMIT2(0x0700);
}
-static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
- struct bpf_jit_probe *probe)
+static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn,
+ struct bpf_jit_probe *probe)
+{
+ if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
+ return;
+
+ probe->prg = jit->prg;
+ probe->reg = reg2hex[insn->dst_reg];
+}
+
+static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp,
+ struct bpf_jit_probe *probe)
{
struct exception_table_entry *ex;
int i, prg;
s64 delta;
u8 *insn;
+ if (probe->prg == -1)
+ /* The probe is not armed. */
+ return 0;
bpf_jit_probe_emit_nop(jit, probe);
if (!fp->aux->extable)
/* Do nothing during early JIT passes. */
@@ -798,12 +812,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
int err;
bpf_jit_probe_init(&probe);
- if (BPF_CLASS(insn->code) == BPF_LDX &&
- (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
- BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) {
- probe.prg = jit->prg;
- probe.reg = reg2hex[dst_reg];
- }
switch (insn->code) {
/*
@@ -1497,51 +1505,79 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* llgc %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* lgb %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* llgh %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* lgh %dst,0(off,%src) */
EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* llgf %dst,off(%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
if (insn_is_zext(&insn[1]))
insn_count = 2;
break;
case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* lgf %dst,off(%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
break;
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ bpf_jit_probe_load_pre(jit, insn, &probe);
/* lg %dst,0(off,%src) */
jit->seen |= SEEN_MEM;
EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
break;
/*
* BPF_JMP / CALL
@@ -1906,12 +1942,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
return -1;
}
- if (probe.prg != -1) {
- err = bpf_jit_probe_mem(jit, fp, &probe);
- if (err < 0)
- return err;
- }
-
return insn_count;
}
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 05/11] s390/bpf: Land on the next JITed instruction after exception
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (3 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 04/11] s390/bpf: Introduce pre- and post- probe functions Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 06/11] s390/bpf: Support BPF_PROBE_MEM32 Ilya Leoshkevich
` (5 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
Currently we land on the nop, which is unnecessary: we can just as well
begin executing the next instruction. Furthermore, the upcoming arena
support for the loop-based BPF_XCHG implementation will require landing
on an instruction that comes after the loop.
So land on the next JITed instruction, which covers both cases.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 582fa3830772..ecd53f8f0602 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -747,10 +747,11 @@ static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp,
return -1;
ex->insn = delta;
/*
- * Always land on the nop. Note that extable infrastructure
- * ignores fixup field, it is handled by ex_handler_bpf().
+ * Land on the current instruction. Note that the extable
+ * infrastructure ignores the fixup field; it is handled by
+ * ex_handler_bpf().
*/
- delta = jit->prg_buf + probe->nop_prg - (u8 *)&ex->fixup;
+ delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup;
if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
/* JIT bug - landing pad and extable must be close. */
return -1;
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 06/11] s390/bpf: Support BPF_PROBE_MEM32
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (4 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 05/11] s390/bpf: Land on the next JITed instruction after exception Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 07/11] s390/bpf: Support address space cast instruction Ilya Leoshkevich
` (4 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
BPF_PROBE_MEM32 is a new mode for LDX, ST and STX instructions. The JIT
is supposed to add the start address of the kernel arena mapping to the
%dst register, and use a probing variant of the respective memory
access.
Reuse the existing probing infrastructure for that. Put the arena
address into the literal pool, load it into %r1 and use that as an
index register. Do not clear any registers in ex_handler_bpf() for
failing ST and STX instructions.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 137 ++++++++++++++++++++++++++++-------
1 file changed, 110 insertions(+), 27 deletions(-)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index ecd53f8f0602..4b62b5162dfb 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -53,6 +53,7 @@ struct bpf_jit {
int excnt; /* Number of exception table entries */
int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
int prologue_plt; /* Start of prologue hotpatch PLT */
+ int kern_arena; /* Pool offset of kernel arena address */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -670,7 +671,8 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
{
regs->psw.addr = extable_fixup(x);
- regs->gprs[x->data] = 0;
+ if (x->data != -1)
+ regs->gprs[x->data] = 0;
return true;
}
@@ -681,6 +683,7 @@ struct bpf_jit_probe {
int prg; /* JITed instruction offset */
int nop_prg; /* JITed nop offset */
int reg; /* Register to clear on exception */
+ int arena_reg; /* Register to use for arena addressing */
};
static void bpf_jit_probe_init(struct bpf_jit_probe *probe)
@@ -688,6 +691,7 @@ static void bpf_jit_probe_init(struct bpf_jit_probe *probe)
probe->prg = -1;
probe->nop_prg = -1;
probe->reg = -1;
+ probe->arena_reg = REG_0;
}
/*
@@ -708,13 +712,31 @@ static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn,
struct bpf_jit_probe *probe)
{
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
- BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32)
return;
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ /* lgrl %r1,kern_arena */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
+ probe->arena_reg = REG_W1;
+ }
probe->prg = jit->prg;
probe->reg = reg2hex[insn->dst_reg];
}
+static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn,
+ struct bpf_jit_probe *probe)
+{
+ if (BPF_MODE(insn->code) != BPF_PROBE_MEM32)
+ return;
+
+ /* lgrl %r1,kern_arena */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
+ probe->arena_reg = REG_W1;
+ probe->prg = jit->prg;
+}
+
static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp,
struct bpf_jit_probe *probe)
{
@@ -1384,51 +1406,99 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
- /* stcy %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stcy %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
- /* sthy %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sthy %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
- /* sty %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sty %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
- /* stg %src,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stg %src,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
/* lhi %w0,imm */
EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
- /* stcy %w0,off(dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stcy %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
/* lhi %w0,imm */
EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
- /* sthy %w0,off(dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sthy %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
/* llilf %w0,imm */
EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
- /* sty %w0,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* sty %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
/* lgfi %w0,imm */
EMIT6_IMM(0xc0010000, REG_W0, imm);
- /* stg %w0,off(%dst) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
+ bpf_jit_probe_store_pre(jit, insn, &probe);
+ /* stg %w0,off(%dst,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
jit->seen |= SEEN_MEM;
break;
/*
@@ -1506,9 +1576,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
bpf_jit_probe_load_pre(jit, insn, &probe);
- /* llgc %dst,0(off,%src) */
- EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
+ /* llgc %dst,off(%src,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg,
+ probe.arena_reg, off);
err = bpf_jit_probe_post(jit, fp, &probe);
if (err < 0)
return err;
@@ -1519,7 +1591,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
bpf_jit_probe_load_pre(jit, insn, &probe);
- /* lgb %dst,0(off,%src) */
+ /* lgb %dst,off(%src) */
EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off);
err = bpf_jit_probe_post(jit, fp, &probe);
if (err < 0)
@@ -1528,9 +1600,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
break;
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
bpf_jit_probe_load_pre(jit, insn, &probe);
- /* llgh %dst,0(off,%src) */
- EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
+ /* llgh %dst,off(%src,%arena) */
+ EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg,
+ probe.arena_reg, off);
err = bpf_jit_probe_post(jit, fp, &probe);
if (err < 0)
return err;
@@ -1541,7 +1615,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
bpf_jit_probe_load_pre(jit, insn, &probe);
- /* lgh %dst,0(off,%src) */
+ /* lgh %dst,off(%src) */
EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off);
err = bpf_jit_probe_post(jit, fp, &probe);
if (err < 0)
@@ -1550,10 +1624,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
break;
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
bpf_jit_probe_load_pre(jit, insn, &probe);
/* llgf %dst,off(%src) */
jit->seen |= SEEN_MEM;
- EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
+ EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg,
+ probe.arena_reg, off);
err = bpf_jit_probe_post(jit, fp, &probe);
if (err < 0)
return err;
@@ -1572,10 +1648,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
break;
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
bpf_jit_probe_load_pre(jit, insn, &probe);
- /* lg %dst,0(off,%src) */
+ /* lg %dst,off(%src,%arena) */
jit->seen |= SEEN_MEM;
- EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
+ EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg,
+ probe.arena_reg, off);
err = bpf_jit_probe_post(jit, fp, &probe);
if (err < 0)
return err;
@@ -1988,12 +2066,17 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
bool extra_pass, u32 stack_depth)
{
int i, insn_count, lit32_size, lit64_size;
+ u64 kern_arena;
jit->lit32 = jit->lit32_start;
jit->lit64 = jit->lit64_start;
jit->prg = 0;
jit->excnt = 0;
+ kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena);
+ if (kern_arena)
+ jit->kern_arena = _EMIT_CONST_U64(kern_arena);
+
bpf_jit_prologue(jit, fp, stack_depth);
if (bpf_set_addr(jit, 0) < 0)
return -1;
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 07/11] s390/bpf: Support address space cast instruction
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (5 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 06/11] s390/bpf: Support BPF_PROBE_MEM32 Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 08/11] s390/bpf: Enable arena Ilya Leoshkevich
` (3 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
The new address cast instruction translates arena offsets to userspace
addresses. NULL pointers must not be translated.
The common code sets up the mappings in such a way that it's enough to
replace the higher 32 bits to achieve the desired result. s390x has
just an instruction for this: INSERT IMMEDIATE.
Implement the sequence using 3 instruction: LOAD AND TEST, BRANCH
RELATIVE ON CONDITION and INSERT IMMEDIATE.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 4b62b5162dfb..39c1d9aa7f1e 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -54,6 +54,7 @@ struct bpf_jit {
int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
int prologue_plt; /* Start of prologue hotpatch PLT */
int kern_arena; /* Pool offset of kernel arena address */
+ u64 user_arena; /* User arena address */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -863,6 +864,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
}
break;
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ int patch_brc;
+
+ /* ltgr %dst,%src */
+ EMIT4(0xb9020000, dst_reg, src_reg);
+ /* brc 8,0f */
+ patch_brc = jit->prg;
+ EMIT4_PCREL_RIC(0xa7040000, 8, 0);
+ /* iihf %dst,user_arena>>32 */
+ EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32);
+ /* 0: */
+ if (jit->prg_buf)
+ *(u16 *)(jit->prg_buf + patch_brc + 2) =
+ (jit->prg - patch_brc) >> 1;
+ break;
+ }
switch (insn->off) {
case 0: /* DST = SRC */
/* lgr %dst,%src */
@@ -2076,6 +2093,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena);
if (kern_arena)
jit->kern_arena = _EMIT_CONST_U64(kern_arena);
+ jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena);
bpf_jit_prologue(jit, fp, stack_depth);
if (bpf_set_addr(jit, 0) < 0)
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 08/11] s390/bpf: Enable arena
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (6 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 07/11] s390/bpf: Support address space cast instruction Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 09/11] s390/bpf: Support arena atomics Ilya Leoshkevich
` (2 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
Now that BPF_PROBE_MEM32 and address space cast instructions are
implemented, tell the verifier that the JIT supports arena.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 39c1d9aa7f1e..1dd359c25ada 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -2820,3 +2820,8 @@ bool bpf_jit_supports_subprog_tailcalls(void)
{
return true;
}
+
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 09/11] s390/bpf: Support arena atomics
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (7 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 08/11] s390/bpf: Enable arena Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 10/11] selftests/bpf: Add UAF tests for " Ilya Leoshkevich
2024-07-01 13:24 ` [PATCH bpf-next v2 11/11] selftests/bpf: Remove arena tests from DENYLIST.s390x Ilya Leoshkevich
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
s390x supports most BPF atomics using single instructions, which
makes implementing arena support a matter of adding arena address to
the base register (unfortunately atomics do not support index
registers), and wrapping the respective native instruction in probing
sequences.
An exception is BPF_XCHG, which is implemented using two different
memory accesses and a loop. Make sure there is enough extable entries
for both instructions. Compute the base address once for both memory
accesses. Since on exception we need to land after the loop, emit the
nops manually.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
arch/s390/net/bpf_jit_comp.c | 104 +++++++++++++++++++++++++++++++----
1 file changed, 94 insertions(+), 10 deletions(-)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 1dd359c25ada..ddfc0e99872e 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -704,6 +704,10 @@ static void bpf_jit_probe_init(struct bpf_jit_probe *probe)
static void bpf_jit_probe_emit_nop(struct bpf_jit *jit,
struct bpf_jit_probe *probe)
{
+ if (probe->prg == -1 || probe->nop_prg != -1)
+ /* The probe is not armed or nop is already emitted. */
+ return;
+
probe->nop_prg = jit->prg;
/* bcr 0,%0 */
_EMIT2(0x0700);
@@ -738,6 +742,21 @@ static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn,
probe->prg = jit->prg;
}
+static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit,
+ struct bpf_insn *insn,
+ struct bpf_jit_probe *probe)
+{
+ if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
+ return;
+
+ /* lgrl %r1,kern_arena */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena);
+ /* agr %r1,%dst */
+ EMIT4(0xb9080000, REG_W1, insn->dst_reg);
+ probe->arena_reg = REG_W1;
+ probe->prg = jit->prg;
+}
+
static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp,
struct bpf_jit_probe *probe)
{
@@ -1523,15 +1542,30 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_STX | BPF_ATOMIC | BPF_DW:
case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
{
bool is32 = BPF_SIZE(insn->code) == BPF_W;
+ /*
+ * Unlike loads and stores, atomics have only a base register,
+ * but no index register. For the non-arena case, simply use
+ * %dst as a base. For the arena case, use the work register
+ * %r1: first, load the arena base into it, and then add %dst
+ * to it.
+ */
+ probe.arena_reg = dst_reg;
+
switch (insn->imm) {
-/* {op32|op64} {%w0|%src},%src,off(%dst) */
#define EMIT_ATOMIC(op32, op64) do { \
+ bpf_jit_probe_atomic_pre(jit, insn, &probe); \
+ /* {op32|op64} {%w0|%src},%src,off(%arena) */ \
EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
(insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
- src_reg, dst_reg, off); \
+ src_reg, probe.arena_reg, off); \
+ err = bpf_jit_probe_post(jit, fp, &probe); \
+ if (err < 0) \
+ return err; \
if (insn->imm & BPF_FETCH) { \
/* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \
_EMIT2(0x07e0); \
@@ -1560,25 +1594,50 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ATOMIC(0x00f7, 0x00e7);
break;
#undef EMIT_ATOMIC
- case BPF_XCHG:
- /* {ly|lg} %w0,off(%dst) */
+ case BPF_XCHG: {
+ struct bpf_jit_probe load_probe = probe;
+ int loop_start;
+
+ bpf_jit_probe_atomic_pre(jit, insn, &load_probe);
+ /* {ly|lg} %w0,off(%arena) */
EMIT6_DISP_LH(0xe3000000,
is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
- dst_reg, off);
- /* 0: {csy|csg} %w0,%src,off(%dst) */
+ load_probe.arena_reg, off);
+ bpf_jit_probe_emit_nop(jit, &load_probe);
+ /* Reuse {ly|lg}'s arena_reg for {csy|csg}. */
+ if (load_probe.prg != -1) {
+ probe.prg = jit->prg;
+ probe.arena_reg = load_probe.arena_reg;
+ }
+ loop_start = jit->prg;
+ /* 0: {csy|csg} %w0,%src,off(%arena) */
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
- REG_W0, src_reg, dst_reg, off);
+ REG_W0, src_reg, probe.arena_reg, off);
+ bpf_jit_probe_emit_nop(jit, &probe);
/* brc 4,0b */
- EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
+ EMIT4_PCREL_RIC(0xa7040000, 4, loop_start);
/* {llgfr|lgr} %src,%w0 */
EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
+ /* Both probes should land here on exception. */
+ err = bpf_jit_probe_post(jit, fp, &load_probe);
+ if (err < 0)
+ return err;
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
if (is32 && insn_is_zext(&insn[1]))
insn_count = 2;
break;
+ }
case BPF_CMPXCHG:
- /* 0: {csy|csg} %b0,%src,off(%dst) */
+ bpf_jit_probe_atomic_pre(jit, insn, &probe);
+ /* 0: {csy|csg} %b0,%src,off(%arena) */
EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
- BPF_REG_0, src_reg, dst_reg, off);
+ BPF_REG_0, src_reg,
+ probe.arena_reg, off);
+ err = bpf_jit_probe_post(jit, fp, &probe);
+ if (err < 0)
+ return err;
break;
default:
pr_err("Unknown atomic operation %02x\n", insn->imm);
@@ -2142,9 +2201,25 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
struct bpf_prog *fp)
{
struct bpf_binary_header *header;
+ struct bpf_insn *insn;
u32 extable_size;
u32 code_size;
+ int i;
+ for (i = 0; i < fp->len; i++) {
+ insn = &fp->insnsi[i];
+
+ if (BPF_CLASS(insn->code) == BPF_STX &&
+ BPF_MODE(insn->code) == BPF_PROBE_ATOMIC &&
+ (BPF_SIZE(insn->code) == BPF_DW ||
+ BPF_SIZE(insn->code) == BPF_W) &&
+ insn->imm == BPF_XCHG)
+ /*
+ * bpf_jit_insn() emits a load and a compare-and-swap,
+ * both of which need to be probed.
+ */
+ fp->aux->num_exentries += 1;
+ }
/* We need two entries per insn. */
fp->aux->num_exentries *= 2;
@@ -2825,3 +2900,12 @@ bool bpf_jit_supports_arena(void)
{
return true;
}
+
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+ /*
+ * Currently the verifier uses this function only to check which
+ * atomic stores to arena are supported, and they all are.
+ */
+ return true;
+}
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* [PATCH bpf-next v2 10/11] selftests/bpf: Add UAF tests for arena atomics
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (8 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 09/11] s390/bpf: Support arena atomics Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
2024-07-01 15:22 ` Daniel Borkmann
2024-07-01 13:24 ` [PATCH bpf-next v2 11/11] selftests/bpf: Remove arena tests from DENYLIST.s390x Ilya Leoshkevich
10 siblings, 1 reply; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
Check that __sync_*() functions don't cause kernel panics when handling
freed arena pages.
x86_64 does not support some arena atomics yet, and aarch64 may or may
not support them, based on the availability of LSE atomics at run time.
Do not enable this test for these architectures for simplicity.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
.../selftests/bpf/prog_tests/arena_atomics.c | 18 +++++
.../selftests/bpf/progs/arena_atomics.c | 76 +++++++++++++++++++
2 files changed, 94 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
index 0807a48a58ee..26e7c06c6cb4 100644
--- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
+++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
@@ -146,6 +146,22 @@ static void test_xchg(struct arena_atomics *skel)
ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
}
+static void test_uaf(struct arena_atomics *skel)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err, prog_fd;
+
+ /* No need to attach it, just run it directly */
+ prog_fd = bpf_program__fd(skel->progs.uaf);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err"))
+ return;
+ if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+ return;
+
+ ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails");
+}
+
void test_arena_atomics(void)
{
struct arena_atomics *skel;
@@ -180,6 +196,8 @@ void test_arena_atomics(void)
test_cmpxchg(skel);
if (test__start_subtest("xchg"))
test_xchg(skel);
+ if (test__start_subtest("uaf"))
+ test_uaf(skel);
cleanup:
arena_atomics__destroy(skel);
diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
index 55f10563208d..0ea310713fe6 100644
--- a/tools/testing/selftests/bpf/progs/arena_atomics.c
+++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
@@ -176,3 +176,79 @@ int xchg(const void *ctx)
return 0;
}
+
+__u64 __arena uaf_sink;
+volatile __u64 __arena uaf_recovery_fails;
+
+SEC("syscall")
+int uaf(const void *ctx)
+{
+ if (pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
+ !defined(__TARGET_ARCH_x86)
+ __u32 __arena *page32;
+ __u64 __arena *page64;
+ void __arena *page;
+
+ page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
+ bpf_arena_free_pages(&arena, page, 1);
+ uaf_recovery_fails = 24;
+
+ page32 = (__u32 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page32, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page32, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page32, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page32, 1);
+ uaf_recovery_fails -= 1;
+
+ page64 = (__u64 __arena *)page;
+ uaf_sink += __sync_fetch_and_add(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_add_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_sub(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_sub_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_and(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_and_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_or(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_or_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_fetch_and_xor(page64, 1);
+ uaf_recovery_fails -= 1;
+ __sync_xor_and_fetch(page64, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_val_compare_and_swap(page64, 0, 1);
+ uaf_recovery_fails -= 1;
+ uaf_sink += __sync_lock_test_and_set(page64, 1);
+ uaf_recovery_fails -= 1;
+#endif
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread* Re: [PATCH bpf-next v2 10/11] selftests/bpf: Add UAF tests for arena atomics
2024-07-01 13:24 ` [PATCH bpf-next v2 10/11] selftests/bpf: Add UAF tests for " Ilya Leoshkevich
@ 2024-07-01 15:22 ` Daniel Borkmann
2024-07-01 17:02 ` Ilya Leoshkevich
0 siblings, 1 reply; 14+ messages in thread
From: Daniel Borkmann @ 2024-07-01 15:22 UTC (permalink / raw)
To: Ilya Leoshkevich, Alexei Starovoitov, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev
On 7/1/24 3:24 PM, Ilya Leoshkevich wrote:
> Check that __sync_*() functions don't cause kernel panics when handling
> freed arena pages.
>
> x86_64 does not support some arena atomics yet, and aarch64 may or may
> not support them, based on the availability of LSE atomics at run time.
> Do not enable this test for these architectures for simplicity.
>
> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
> ---
> .../selftests/bpf/prog_tests/arena_atomics.c | 18 +++++
> .../selftests/bpf/progs/arena_atomics.c | 76 +++++++++++++++++++
> 2 files changed, 94 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> index 0807a48a58ee..26e7c06c6cb4 100644
> --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> @@ -146,6 +146,22 @@ static void test_xchg(struct arena_atomics *skel)
> ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
> }
>
> +static void test_uaf(struct arena_atomics *skel)
> +{
> + LIBBPF_OPTS(bpf_test_run_opts, topts);
> + int err, prog_fd;
> +
> + /* No need to attach it, just run it directly */
> + prog_fd = bpf_program__fd(skel->progs.uaf);
> + err = bpf_prog_test_run_opts(prog_fd, &topts);
> + if (!ASSERT_OK(err, "test_run_opts err"))
> + return;
> + if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
> + return;
> +
> + ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails");
> +}
> +
> void test_arena_atomics(void)
> {
> struct arena_atomics *skel;
> @@ -180,6 +196,8 @@ void test_arena_atomics(void)
> test_cmpxchg(skel);
> if (test__start_subtest("xchg"))
> test_xchg(skel);
> + if (test__start_subtest("uaf"))
> + test_uaf(skel);
>
> cleanup:
> arena_atomics__destroy(skel);
> diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c
> index 55f10563208d..0ea310713fe6 100644
> --- a/tools/testing/selftests/bpf/progs/arena_atomics.c
> +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
> @@ -176,3 +176,79 @@ int xchg(const void *ctx)
>
> return 0;
> }
> +
> +__u64 __arena uaf_sink;
> +volatile __u64 __arena uaf_recovery_fails;
> +
> +SEC("syscall")
> +int uaf(const void *ctx)
> +{
> + if (pid != (bpf_get_current_pid_tgid() >> 32))
> + return 0;
> +#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \
> + !defined(__TARGET_ARCH_x86)
> + __u32 __arena *page32;
> + __u64 __arena *page64;
> + void __arena *page;
> +
Looks like the selftest is failing s390x-gcc CI build, ptal :
https://github.com/kernel-patches/bpf/actions/runs/9745362735/job/26893165998
[...]
CLNG-BPF [test_maps] btf__core_reloc_size.bpf.o
CLNG-BPF [test_maps] bind6_prog.bpf.o
progs/arena_atomics.c:190:8: error: 'section' attribute only applies to functions, global variables, Objective-C methods, and Objective-C properties
190 | __u32 __arena *page32;
| ^
progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
32 | #define __arena SEC(".addr_space.1")
| ^
/tmp/work/bpf/bpf/tools/testing/selftests/bpf/tools/include/bpf/bpf_helpers.h:40:17: note: expanded from macro 'SEC'
40 | __attribute__((section(name), used)) \
| ^
progs/arena_atomics.c:191:8: error: 'section' attribute only applies to functions, global variables, Objective-C methods, and Objective-C properties
191 | __u64 __arena *page64;
| ^
progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
32 | #define __arena SEC(".addr_space.1")
| ^
/tmp/work/bpf/bpf/tools/testing/selftests/bpf/tools/include/bpf/bpf_helpers.h:40:17: note: expanded from macro 'SEC'
40 | __attribute__((section(name), used)) \
| ^
progs/arena_atomics.c:192:7: error: 'section' attribute only applies to functions, global variables, Objective-C methods, and Objective-C properties
192 | void __arena *page;
| ^
progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
32 | #define __arena SEC(".addr_space.1")
| ^
/tmp/work/bpf/bpf/tools/testing/selftests/bpf/tools/include/bpf/bpf_helpers.h:40:17: note: expanded from macro 'SEC'
40 | __attribute__((section(name), used)) \
| ^
3 errors generated.
CLNG-BPF [test_maps] cpumask_success.bpf.o
make: *** [Makefile:654: /tmp/work/bpf/bpf/tools/testing/selftests/bpf/arena_atomics.bpf.o] Error 1
make: *** Waiting for unfinished jobs....
CLNG-BPF [test_maps] fib_lookup.bpf.o
make: Leaving directory '/tmp/work/bpf/bpf/tools/testing/selftests/bpf'
Error: Process completed with exit code 2.
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH bpf-next v2 10/11] selftests/bpf: Add UAF tests for arena atomics
2024-07-01 15:22 ` Daniel Borkmann
@ 2024-07-01 17:02 ` Ilya Leoshkevich
0 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 17:02 UTC (permalink / raw)
To: Daniel Borkmann, Alexei Starovoitov, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev
On Mon, 2024-07-01 at 17:22 +0200, Daniel Borkmann wrote:
> On 7/1/24 3:24 PM, Ilya Leoshkevich wrote:
> > Check that __sync_*() functions don't cause kernel panics when
> > handling
> > freed arena pages.
> >
> > x86_64 does not support some arena atomics yet, and aarch64 may or
> > may
> > not support them, based on the availability of LSE atomics at run
> > time.
> > Do not enable this test for these architectures for simplicity.
> >
> > Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
> > ---
> > .../selftests/bpf/prog_tests/arena_atomics.c | 18 +++++
> > .../selftests/bpf/progs/arena_atomics.c | 76
> > +++++++++++++++++++
> > 2 files changed, 94 insertions(+)
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > index 0807a48a58ee..26e7c06c6cb4 100644
> > --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c
> > @@ -146,6 +146,22 @@ static void test_xchg(struct arena_atomics
> > *skel)
> > ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result");
> > }
> >
> > +static void test_uaf(struct arena_atomics *skel)
> > +{
> > + LIBBPF_OPTS(bpf_test_run_opts, topts);
> > + int err, prog_fd;
> > +
> > + /* No need to attach it, just run it directly */
> > + prog_fd = bpf_program__fd(skel->progs.uaf);
> > + err = bpf_prog_test_run_opts(prog_fd, &topts);
> > + if (!ASSERT_OK(err, "test_run_opts err"))
> > + return;
> > + if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
> > + return;
> > +
> > + ASSERT_EQ(skel->arena->uaf_recovery_fails, 0,
> > "uaf_recovery_fails");
> > +}
> > +
> > void test_arena_atomics(void)
> > {
> > struct arena_atomics *skel;
> > @@ -180,6 +196,8 @@ void test_arena_atomics(void)
> > test_cmpxchg(skel);
> > if (test__start_subtest("xchg"))
> > test_xchg(skel);
> > + if (test__start_subtest("uaf"))
> > + test_uaf(skel);
> >
> > cleanup:
> > arena_atomics__destroy(skel);
> > diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c
> > b/tools/testing/selftests/bpf/progs/arena_atomics.c
> > index 55f10563208d..0ea310713fe6 100644
> > --- a/tools/testing/selftests/bpf/progs/arena_atomics.c
> > +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c
> > @@ -176,3 +176,79 @@ int xchg(const void *ctx)
> >
> > return 0;
> > }
> > +
> > +__u64 __arena uaf_sink;
> > +volatile __u64 __arena uaf_recovery_fails;
> > +
> > +SEC("syscall")
> > +int uaf(const void *ctx)
> > +{
> > + if (pid != (bpf_get_current_pid_tgid() >> 32))
> > + return 0;
> > +#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64)
> > && \
> > + !defined(__TARGET_ARCH_x86)
> > + __u32 __arena *page32;
> > + __u64 __arena *page64;
> > + void __arena *page;
> > +
>
> Looks like the selftest is failing s390x-gcc CI build, ptal :
>
>
> https://github.com/kernel-patches/bpf/actions/runs/9745362735/job/26893165998
>
> [...]
> CLNG-BPF [test_maps] btf__core_reloc_size.bpf.o
> CLNG-BPF [test_maps] bind6_prog.bpf.o
> progs/arena_atomics.c:190:8: error: 'section' attribute only
> applies to functions, global variables, Objective-C methods, and
> Objective-C properties
> 190 | __u32 __arena *page32;
> | ^
> progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
> 32 | #define __arena SEC(".addr_space.1")
> | ^
>
> /tmp/work/bpf/bpf/tools/testing/selftests/bpf/tools/include/bpf/bpf_h
> elpers.h:40:17: note: expanded from macro 'SEC'
> 40 | __attribute__((section(name),
> used)) \
> | ^
> progs/arena_atomics.c:191:8: error: 'section' attribute only
> applies to functions, global variables, Objective-C methods, and
> Objective-C properties
> 191 | __u64 __arena *page64;
> | ^
> progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
> 32 | #define __arena SEC(".addr_space.1")
> | ^
>
> /tmp/work/bpf/bpf/tools/testing/selftests/bpf/tools/include/bpf/bpf_h
> elpers.h:40:17: note: expanded from macro 'SEC'
> 40 | __attribute__((section(name),
> used)) \
> | ^
> progs/arena_atomics.c:192:7: error: 'section' attribute only
> applies to functions, global variables, Objective-C methods, and
> Objective-C properties
> 192 | void __arena *page;
> | ^
> progs/arena_atomics.c:32:17: note: expanded from macro '__arena'
> 32 | #define __arena SEC(".addr_space.1")
> | ^
>
> /tmp/work/bpf/bpf/tools/testing/selftests/bpf/tools/include/bpf/bpf_h
> elpers.h:40:17: note: expanded from macro 'SEC'
> 40 | __attribute__((section(name),
> used)) \
> | ^
> 3 errors generated.
> CLNG-BPF [test_maps] cpumask_success.bpf.o
> make: *** [Makefile:654:
> /tmp/work/bpf/bpf/tools/testing/selftests/bpf/arena_atomics.bpf.o]
> Error 1
> make: *** Waiting for unfinished jobs....
> CLNG-BPF [test_maps] fib_lookup.bpf.o
> make: Leaving directory
> '/tmp/work/bpf/bpf/tools/testing/selftests/bpf'
> Error: Process completed with exit code 2.
Apparently this particular test redefines the __arena macro.
The "common" definition is __attribute__((address_space(1))) for LLVM,
and nothing for GCC. I assume this doesn't work if one wants to have
globals inside the arena, hence the redefinition. Unfortunately the
redefinition breaks the usage of __arena in pointer types.
I think I will replace the redefinition with a separate __arena_global
macro.
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH bpf-next v2 11/11] selftests/bpf: Remove arena tests from DENYLIST.s390x
2024-07-01 13:24 [PATCH bpf-next v2 00/11] s390/bpf: Implement arena Ilya Leoshkevich
` (9 preceding siblings ...)
2024-07-01 13:24 ` [PATCH bpf-next v2 10/11] selftests/bpf: Add UAF tests for " Ilya Leoshkevich
@ 2024-07-01 13:24 ` Ilya Leoshkevich
10 siblings, 0 replies; 14+ messages in thread
From: Ilya Leoshkevich @ 2024-07-01 13:24 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
Cc: bpf, Heiko Carstens, Vasily Gorbik, Alexander Gordeev,
Ilya Leoshkevich
Now that the s390x JIT supports arena, remove the respective tests from
the denylist.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
---
tools/testing/selftests/bpf/DENYLIST.s390x | 3 ---
1 file changed, 3 deletions(-)
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index c34adf39eeb2..cb810a98e78f 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -4,6 +4,3 @@ exceptions # JIT does not support calling kfunc bpf_throw (excepti
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
verifier_iterating_callbacks
-verifier_arena # JIT does not support arena
-arena_htab # JIT does not support arena
-arena_atomics
--
2.45.2
^ permalink raw reply related [flat|nested] 14+ messages in thread