* [PATCH] bpf/arm64: support packet data load instructions
@ 2026-03-10 12:20 Christophe Fontaine
2026-03-17 9:07 ` David Marchand
` (2 more replies)
0 siblings, 3 replies; 19+ messages in thread
From: Christophe Fontaine @ 2026-03-10 12:20 UTC (permalink / raw)
To: dev
Cc: Christophe Fontaine, Konstantin Ananyev, Marat Khalili,
Wathsala Vithanage
arm64 jit compiler didn't support reading from a packet.
Enable arm64 JIT to generate native code for
(BPF_ABS | <size> | BPF_LD) and (BPF_IND | <size> | BPF_LD)
instructions.
Compared to the x86_64 HIT, only a "slow path" is implemented,
'__rte_pktmbuf_read' is systematically called.
Signed-off-by: Christophe Fontaine <cfontain@redhat.com>
---
app/test/test_bpf.c | 42 ++++++++++++++++++++++++++
lib/bpf/bpf_jit_arm64.c | 65 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 107 insertions(+)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 093cf5fe1d..8d5c0d6de1 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -2750,6 +2750,30 @@ static const struct rte_bpf_xsym test_call5_xsym[] = {
},
};
+/* load mbuf (BPF_ABS/BPF_IND) test-cases */
+static const struct ebpf_insn test_ld_mbuf0_prog[] = {
+ /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_6,
+ .src_reg = EBPF_REG_1,
+ },
+ /* load IPv4 version and IHL */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_B),
+ .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_ld_mbuf0_check(uint64_t rc, const void *arg)
+{
+ return cmp_res(__func__, 0x45, rc, arg, arg, 0);
+}
+
/* load mbuf (BPF_ABS/BPF_IND) test-cases */
static const struct ebpf_insn test_ld_mbuf1_prog[] = {
@@ -3417,6 +3441,22 @@ static const struct bpf_test tests[] = {
/* for now don't support function calls on 32 bit platform */
.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
},
+ {
+ .name = "test_ld_mbuf0",
+ .arg_sz = sizeof(struct dummy_mbuf),
+ .prm = {
+ .ins = test_ld_mbuf0_prog,
+ .nb_ins = RTE_DIM(test_ld_mbuf0_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .buf_size = sizeof(struct dummy_mbuf),
+ },
+ },
+ .prepare = test_ld_mbuf1_prepare,
+ .check_result = test_ld_mbuf0_check,
+ /* mbuf as input argument is not supported on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
{
.name = "test_ld_mbuf1",
.arg_sz = sizeof(struct dummy_mbuf),
@@ -3491,6 +3531,7 @@ run_test(const struct bpf_test *tst)
if (ret != 0) {
printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
__func__, __LINE__, tst->name, ret, strerror(ret));
+ return -1;
}
/* repeat the same test with jit, when possible */
@@ -3506,6 +3547,7 @@ run_test(const struct bpf_test *tst)
"error: %d(%s);\n",
__func__, __LINE__, tst->name,
rv, strerror(rv));
+ return -1;
}
}
diff --git a/lib/bpf/bpf_jit_arm64.c b/lib/bpf/bpf_jit_arm64.c
index a04ef33a9c..5d5710b876 100644
--- a/lib/bpf/bpf_jit_arm64.c
+++ b/lib/bpf/bpf_jit_arm64.c
@@ -8,6 +8,7 @@
#include <rte_common.h>
#include <rte_byteorder.h>
+#include <rte_mbuf.h>
#include "bpf_impl.h"
@@ -965,6 +966,54 @@ emit_return_zero_if_src_zero(struct a64_jit_ctx *ctx, bool is64, uint8_t src)
emit_b(ctx, jump_to_epilogue);
}
+/*
+ * Emit code for BPF_LD | BPF_ABS/IND: load from packet.
+ * Calls __rte_pktmbuf_read(mbuf, off, len, buf).
+ */
+static void
+emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1, uint8_t tmp2,
+ uint8_t src, uint32_t imm)
+{
+ uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0);
+ uint8_t r6 = ebpf_to_a64_reg(ctx, EBPF_REG_6);
+ uint32_t mode = BPF_MODE(op);
+ uint32_t opsz = BPF_SIZE(op);
+ uint32_t sz = bpf_size(opsz);
+ int16_t jump_to_epilogue;
+
+ /* r0 = mbuf (R6) */
+ emit_mov_64(ctx, A64_R(0), r6);
+
+ /* r1 = off: for ABS use imm, for IND use src + imm */
+ if (mode == BPF_ABS) {
+ emit_mov_imm(ctx, 1, A64_R(1), imm);
+ } else {
+ emit_mov_imm(ctx, 1, tmp2, imm);
+ emit_add(ctx, 1, tmp2, src);
+ emit_mov_64(ctx, A64_R(1), tmp2);
+ }
+
+ /* r2 = len */
+ emit_mov_imm(ctx, 1, A64_R(2), sz);
+
+ /* r3 = buf (SP) */
+ emit_mov_64(ctx, A64_R(3), A64_SP);
+
+ /* call __rte_pktmbuf_read */
+ emit_call(ctx, tmp1, __rte_pktmbuf_read);
+ /* check return value of __rte_pktmbuf_read */
+ emit_cbnz(ctx, 1, A64_R(0), 3);
+ emit_mov_imm(ctx, 1, r0, 0);
+ jump_to_epilogue = (ctx->program_start + ctx->program_sz) - ctx->idx;
+ emit_b(ctx, jump_to_epilogue);
+
+ /* r0 points to the data, load 1/2/4 bytes */
+ emit_ldr(ctx, opsz, A64_R(0), A64_R(0), A64_ZR);
+ if (sz != sizeof(uint8_t))
+ emit_be(ctx, A64_R(0), sz * CHAR_BIT);
+ emit_mov_64(ctx, r0, A64_R(0));
+}
+
static void
emit_stadd(struct a64_jit_ctx *ctx, bool is64, uint8_t rs, uint8_t rn)
{
@@ -1137,6 +1186,13 @@ check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
switch (op) {
/* Call imm */
case (BPF_JMP | EBPF_CALL):
+ /* BPF_LD | BPF_ABS/IND use __rte_pktmbuf_read */
+ case (BPF_LD | BPF_ABS | BPF_B):
+ case (BPF_LD | BPF_ABS | BPF_H):
+ case (BPF_LD | BPF_ABS | BPF_W):
+ case (BPF_LD | BPF_IND | BPF_B):
+ case (BPF_LD | BPF_IND | BPF_H):
+ case (BPF_LD | BPF_IND | BPF_W):
ctx->foundcall = 1;
return;
}
@@ -1338,6 +1394,15 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
emit_mov_imm(ctx, 1, dst, u64);
i++;
break;
+ /* load absolute/indirect from packet */
+ case (BPF_LD | BPF_ABS | BPF_B):
+ case (BPF_LD | BPF_ABS | BPF_H):
+ case (BPF_LD | BPF_ABS | BPF_W):
+ case (BPF_LD | BPF_IND | BPF_B):
+ case (BPF_LD | BPF_IND | BPF_H):
+ case (BPF_LD | BPF_IND | BPF_W):
+ emit_ld_mbuf(ctx, op, tmp1, tmp2, src, imm);
+ break;
/* *(size *)(dst + off) = src */
case (BPF_STX | BPF_MEM | BPF_B):
case (BPF_STX | BPF_MEM | BPF_H):
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* Re: [PATCH] bpf/arm64: support packet data load instructions
2026-03-10 12:20 [PATCH] bpf/arm64: support packet data load instructions Christophe Fontaine
@ 2026-03-17 9:07 ` David Marchand
2026-03-18 11:59 ` Marat Khalili
2026-03-19 11:44 ` [PATCH v2 0/2] " Christophe Fontaine
2 siblings, 0 replies; 19+ messages in thread
From: David Marchand @ 2026-03-17 9:07 UTC (permalink / raw)
To: Christophe Fontaine, Konstantin Ananyev, Marat Khalili,
Wathsala Vithanage
Cc: dev
On Mon, 16 Mar 2026 at 20:13, Christophe Fontaine <cfontain@redhat.com> wrote:
>
> arm64 jit compiler didn't support reading from a packet.
> Enable arm64 JIT to generate native code for
> (BPF_ABS | <size> | BPF_LD) and (BPF_IND | <size> | BPF_LD)
> instructions.
>
> Compared to the x86_64 HIT, only a "slow path" is implemented,
> '__rte_pktmbuf_read' is systematically called.
Worth mentioning in the commitlog that the unit test was a yes box
(failures to load mbuf data on arm were silently ignored iiuc).
On the code itself, a review from BPF (and ARM?) maintainers is needed.
--
David Marchand
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-10 12:20 [PATCH] bpf/arm64: support packet data load instructions Christophe Fontaine
2026-03-17 9:07 ` David Marchand
@ 2026-03-18 11:59 ` Marat Khalili
2026-03-18 12:54 ` Morten Brørup
2026-03-19 11:44 ` [PATCH v2 0/2] " Christophe Fontaine
2 siblings, 1 reply; 19+ messages in thread
From: Marat Khalili @ 2026-03-18 11:59 UTC (permalink / raw)
To: Christophe Fontaine, dev@dpdk.org; +Cc: Konstantin Ananyev, Wathsala Vithanage
Hi! Thank you for doing this, please see some comments inline.
> @@ -3491,6 +3531,7 @@ run_test(const struct bpf_test *tst)
> if (ret != 0) {
> printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
> __func__, __LINE__, tst->name, ret, strerror(ret));
> + return -1;
> }
>
> /* repeat the same test with jit, when possible */
> @@ -3506,6 +3547,7 @@ run_test(const struct bpf_test *tst)
> "error: %d(%s);\n",
> __func__, __LINE__, tst->name,
> rv, strerror(rv));
> + return -1;
> }
> }
>
All return values were OR-ed, so adding early return is not strictly necessary.
The real problem is JIT tests being skipped altogether if jit.func == NULL, and
it still remains. We need to disallow empty jit.func on platforms with known
working JIT.
(I'd ask for many more tests, but not sure it's fair since they aren't really
platform-dependent.)
// snip to implementation
> +static void
> +emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1, uint8_t tmp2,
> + uint8_t src, uint32_t imm)
Handling immediate as unsigned is questionable, especially in the BPF_IND case
it may produce incorrect results.
To make things worse, `__rte_pktmbuf_read` is also buggy when passed very large
lengths (again, technically not ARM eBPF fault).
> +{
> + uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0);
> + uint8_t r6 = ebpf_to_a64_reg(ctx, EBPF_REG_6);
> + uint32_t mode = BPF_MODE(op);
> + uint32_t opsz = BPF_SIZE(op);
> + uint32_t sz = bpf_size(opsz);
> + int16_t jump_to_epilogue;
> +
> + /* r0 = mbuf (R6) */
> + emit_mov_64(ctx, A64_R(0), r6);
> +
> + /* r1 = off: for ABS use imm, for IND use src + imm */
> + if (mode == BPF_ABS) {
> + emit_mov_imm(ctx, 1, A64_R(1), imm);
> + } else {
> + emit_mov_imm(ctx, 1, tmp2, imm);
> + emit_add(ctx, 1, tmp2, src);
> + emit_mov_64(ctx, A64_R(1), tmp2);
> + }
> +
> + /* r2 = len */
> + emit_mov_imm(ctx, 1, A64_R(2), sz);
> +
> + /* r3 = buf (SP) */
> + emit_mov_64(ctx, A64_R(3), A64_SP);
> +
> + /* call __rte_pktmbuf_read */
> + emit_call(ctx, tmp1, __rte_pktmbuf_read);
> + /* check return value of __rte_pktmbuf_read */
> + emit_cbnz(ctx, 1, A64_R(0), 3);
> + emit_mov_imm(ctx, 1, r0, 0);
> + jump_to_epilogue = (ctx->program_start + ctx->program_sz) - ctx->idx;
> + emit_b(ctx, jump_to_epilogue);
Could we call emit_return_zero_if_src_zero here instead?
> +
> + /* r0 points to the data, load 1/2/4 bytes */
> + emit_ldr(ctx, opsz, A64_R(0), A64_R(0), A64_ZR);
> + if (sz != sizeof(uint8_t))
> + emit_be(ctx, A64_R(0), sz * CHAR_BIT);
> + emit_mov_64(ctx, r0, A64_R(0));
> +}
> +
I would also pass final verdict on ARM code to ARM folks. To my untrained eye
it looks correct apart from the signed immediate issue. Optimizations are
possible, but since we're only implementing slow path for now maybe not worth
the effort.
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 11:59 ` Marat Khalili
@ 2026-03-18 12:54 ` Morten Brørup
2026-03-18 13:07 ` Marat Khalili
2026-03-18 23:13 ` Stephen Hemminger
0 siblings, 2 replies; 19+ messages in thread
From: Morten Brørup @ 2026-03-18 12:54 UTC (permalink / raw)
To: Marat Khalili, Christophe Fontaine, dev
Cc: Konstantin Ananyev, Wathsala Vithanage
> > +static void
> > +emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1,
> uint8_t tmp2,
> > + uint8_t src, uint32_t imm)
>
> Handling immediate as unsigned is questionable, especially in the
> BPF_IND case
> it may produce incorrect results.
In Classic BPF (cBPF), when the immediate "k" is negative (when cast to signed integer), it is used for getting packet metadata (e.g. SKF_AD_VLAN_TAG gets the VLAN ID); otherwise it is considered unsigned.
>
> To make things worse, `__rte_pktmbuf_read` is also buggy when passed
> very large
> lengths (again, technically not ARM eBPF fault).
Are you referring to the potential integer wraparound in the off+len > rte_pktmbuf_pkt_len(m) comparison?
[BZ1724]
Or some other bug in __rte_pktmbuf_read()?
[BZ1724]: https://bugs.dpdk.org/show_bug.cgi?id=1724
>
> > +{
> > + uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0);
> > + uint8_t r6 = ebpf_to_a64_reg(ctx, EBPF_REG_6);
> > + uint32_t mode = BPF_MODE(op);
> > + uint32_t opsz = BPF_SIZE(op);
> > + uint32_t sz = bpf_size(opsz);
> > + int16_t jump_to_epilogue;
> > +
> > + /* r0 = mbuf (R6) */
> > + emit_mov_64(ctx, A64_R(0), r6);
> > +
> > + /* r1 = off: for ABS use imm, for IND use src + imm */
> > + if (mode == BPF_ABS) {
> > + emit_mov_imm(ctx, 1, A64_R(1), imm);
> > + } else {
> > + emit_mov_imm(ctx, 1, tmp2, imm);
> > + emit_add(ctx, 1, tmp2, src);
> > + emit_mov_64(ctx, A64_R(1), tmp2);
> > + }
> > +
> > + /* r2 = len */
> > + emit_mov_imm(ctx, 1, A64_R(2), sz);
> > +
> > + /* r3 = buf (SP) */
> > + emit_mov_64(ctx, A64_R(3), A64_SP);
> > +
> > + /* call __rte_pktmbuf_read */
> > + emit_call(ctx, tmp1, __rte_pktmbuf_read);
> > + /* check return value of __rte_pktmbuf_read */
> > + emit_cbnz(ctx, 1, A64_R(0), 3);
> > + emit_mov_imm(ctx, 1, r0, 0);
> > + jump_to_epilogue = (ctx->program_start + ctx->program_sz) - ctx-
> >idx;
> > + emit_b(ctx, jump_to_epilogue);
>
> Could we call emit_return_zero_if_src_zero here instead?
>
> > +
> > + /* r0 points to the data, load 1/2/4 bytes */
> > + emit_ldr(ctx, opsz, A64_R(0), A64_R(0), A64_ZR);
> > + if (sz != sizeof(uint8_t))
> > + emit_be(ctx, A64_R(0), sz * CHAR_BIT);
> > + emit_mov_64(ctx, r0, A64_R(0));
> > +}
> > +
>
> I would also pass final verdict on ARM code to ARM folks. To my
> untrained eye
> it looks correct apart from the signed immediate issue. Optimizations
> are
> possible, but since we're only implementing slow path for now maybe not
> worth
> the effort.
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 12:54 ` Morten Brørup
@ 2026-03-18 13:07 ` Marat Khalili
2026-03-18 13:39 ` Morten Brørup
2026-03-18 23:13 ` Stephen Hemminger
1 sibling, 1 reply; 19+ messages in thread
From: Marat Khalili @ 2026-03-18 13:07 UTC (permalink / raw)
To: Morten Brørup, Christophe Fontaine, dev@dpdk.org
Cc: Konstantin Ananyev, Wathsala Vithanage
> > Handling immediate as unsigned is questionable, especially in the
> > BPF_IND case
> > it may produce incorrect results.
>
> In Classic BPF (cBPF), when the immediate "k" is negative (when cast to signed integer), it is used
> for getting packet metadata (e.g. SKF_AD_VLAN_TAG gets the VLAN ID); otherwise it is considered
> unsigned.
Yes. Since we don't support it, we should probably consider these offsets invalid.
And in the BPF_IND case one might be tempted to load end of some packet area in
the register and use negative offsets, we probably should handle it correctly.
> > To make things worse, `__rte_pktmbuf_read` is also buggy when passed
> > very large
> > lengths (again, technically not ARM eBPF fault).
>
> Are you referring to the potential integer wraparound in the off+len > rte_pktmbuf_pkt_len(m)
> comparison?
> [BZ1724]
> Or some other bug in __rte_pktmbuf_read()?
>
> [BZ1724]: https://bugs.dpdk.org/show_bug.cgi?id=1724
Technically that one manifested itself in rte_pktmbuf_read (without
underscores), but essentially the root cause is the same. In the eBPF BPF_ABS
case we could potentially rely on `__rte_pktmbuf_read` for refusing to accept
negative values converted to large integers, but due to overflows we cannot.
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 13:07 ` Marat Khalili
@ 2026-03-18 13:39 ` Morten Brørup
2026-03-18 15:34 ` Christophe Fontaine
0 siblings, 1 reply; 19+ messages in thread
From: Morten Brørup @ 2026-03-18 13:39 UTC (permalink / raw)
To: Marat Khalili, Christophe Fontaine, dev
Cc: Konstantin Ananyev, Wathsala Vithanage
> From: Marat Khalili [mailto:marat.khalili@huawei.com]
> Sent: Wednesday, 18 March 2026 14.07
>
> > > Handling immediate as unsigned is questionable, especially in the
> > > BPF_IND case
> > > it may produce incorrect results.
> >
> > In Classic BPF (cBPF), when the immediate "k" is negative (when cast
> to signed integer), it is used
> > for getting packet metadata (e.g. SKF_AD_VLAN_TAG gets the VLAN ID);
> otherwise it is considered
> > unsigned.
>
> Yes. Since we don't support it, we should probably consider these
> offsets invalid.
+1
>
> And in the BPF_IND case one might be tempted to load end of some packet
> area in
> the register and use negative offsets, we probably should handle it
> correctly.
In Classic BPF, negative "k" has special meaning for both BPF_ABS and BPF_IND.
So we should consider it invalid for both cases.
That prevents applications from using it the way you describe.
And it will allow us to add BPF library support for Linux-compatible special meanings later, without breaking the ABI.
>
> > > To make things worse, `__rte_pktmbuf_read` is also buggy when
> passed
> > > very large
> > > lengths (again, technically not ARM eBPF fault).
> >
> > Are you referring to the potential integer wraparound in the off+len
> > rte_pktmbuf_pkt_len(m)
> > comparison?
> > [BZ1724]
> > Or some other bug in __rte_pktmbuf_read()?
> >
> > [BZ1724]: https://bugs.dpdk.org/show_bug.cgi?id=1724
>
> Technically that one manifested itself in rte_pktmbuf_read (without
> underscores), but essentially the root cause is the same. In the eBPF
> BPF_ABS
> case we could potentially rely on `__rte_pktmbuf_read` for refusing to
> accept
> negative values converted to large integers, but due to overflows we
> cannot.
Agree.
The off+len wraparound in rte_pktmbuf_read() and __rte_pktmbuf_read() is clearly a bug, and not intentional.
Reading the function documentation supports the conclusion that the wraparound is a bug; the off and len parameters are unsigned, and the documentation says nothing about an ability to make them behave as signed.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 13:39 ` Morten Brørup
@ 2026-03-18 15:34 ` Christophe Fontaine
2026-03-18 16:16 ` Marat Khalili
0 siblings, 1 reply; 19+ messages in thread
From: Christophe Fontaine @ 2026-03-18 15:34 UTC (permalink / raw)
To: Morten Brørup
Cc: Marat Khalili, dev, Konstantin Ananyev, Wathsala Vithanage
On Wed, Mar 18, 2026 at 2:39 PM Morten Brørup <mb@smartsharesystems.com> wrote:
>
> > From: Marat Khalili [mailto:marat.khalili@huawei.com]
> > Sent: Wednesday, 18 March 2026 14.07
> >
> > > > Handling immediate as unsigned is questionable, especially in the
> > > > BPF_IND case
> > > > it may produce incorrect results.
> > >
> > > In Classic BPF (cBPF), when the immediate "k" is negative (when cast
> > to signed integer), it is used
> > > for getting packet metadata (e.g. SKF_AD_VLAN_TAG gets the VLAN ID);
> > otherwise it is considered
> > > unsigned.
> >
> > Yes. Since we don't support it, we should probably consider these
> > offsets invalid.
>
> +1
>
> >
> > And in the BPF_IND case one might be tempted to load end of some packet
> > area in
> > the register and use negative offsets, we probably should handle it
> > correctly.
>
> In Classic BPF, negative "k" has special meaning for both BPF_ABS and BPF_IND.
> So we should consider it invalid for both cases.
>
> That prevents applications from using it the way you describe.
> And it will allow us to add BPF library support for Linux-compatible special meanings later, without breaking the ABI.
>
Aren't these invalid offsets already taken care during the syntax
check when we validate the BPF program ?
in bpf_validate.c +1499:
/* load absolute instructions */
[(BPF_LD | BPF_ABS | BPF_B)] = {
.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
.off = { .min = 0, .max = 0},
.imm = { .min = 0, .max = INT32_MAX},
.eval = eval_ld_mbuf,
},
IIUC, as __rte_bpf_validate fails when we cal rte_bpf_load ( in
bpf_load.c +113), we can't even interpret the cBPF program.
> >
> > > > To make things worse, `__rte_pktmbuf_read` is also buggy when
> > passed
> > > > very large
> > > > lengths (again, technically not ARM eBPF fault).
> > >
> > > Are you referring to the potential integer wraparound in the off+len
> > > rte_pktmbuf_pkt_len(m)
> > > comparison?
> > > [BZ1724]
> > > Or some other bug in __rte_pktmbuf_read()?
> > >
> > > [BZ1724]: https://bugs.dpdk.org/show_bug.cgi?id=1724
> >
> > Technically that one manifested itself in rte_pktmbuf_read (without
> > underscores), but essentially the root cause is the same. In the eBPF
> > BPF_ABS
> > case we could potentially rely on `__rte_pktmbuf_read` for refusing to
> > accept
> > negative values converted to large integers, but due to overflows we
> > cannot.
>
> Agree.
>
> The off+len wraparound in rte_pktmbuf_read() and __rte_pktmbuf_read() is clearly a bug, and not intentional.
> Reading the function documentation supports the conclusion that the wraparound is a bug; the off and len parameters are unsigned, and the documentation says nothing about an ability to make them behave as signed.
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 15:34 ` Christophe Fontaine
@ 2026-03-18 16:16 ` Marat Khalili
2026-03-18 16:37 ` Morten Brørup
0 siblings, 1 reply; 19+ messages in thread
From: Marat Khalili @ 2026-03-18 16:16 UTC (permalink / raw)
To: Christophe Fontaine, Morten Brørup
Cc: dev@dpdk.org, Konstantin Ananyev, Wathsala Vithanage
> > In Classic BPF, negative "k" has special meaning for both BPF_ABS and BPF_IND.
> > So we should consider it invalid for both cases.
> >
> > That prevents applications from using it the way you describe.
> > And it will allow us to add BPF library support for Linux-compatible special meanings later, without
> breaking the ABI.
> >
>
> Aren't these invalid offsets already taken care during the syntax
> check when we validate the BPF program ?
> in bpf_validate.c +1499:
> /* load absolute instructions */
> [(BPF_LD | BPF_ABS | BPF_B)] = {
> .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
> .off = { .min = 0, .max = 0},
> .imm = { .min = 0, .max = INT32_MAX},
> .eval = eval_ld_mbuf,
> },
>
> IIUC, as __rte_bpf_validate fails when we cal rte_bpf_load ( in
> bpf_load.c +113), we can't even interpret the cBPF program.
Good point, we can probably consider BPF_ABS case covered by this.
For BPF_IND however it does not seem to exclude any values, and even if it did
we don't know what's in the register. Speaking of which, I just noticed that
we're truncating it.
I suggest the following logic in pseudo-code:
static void
emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1, uint8_t tmp2,
uint8_t src, int32_t imm)
{
// ...
/* r1 = off: for ABS use imm, for IND use src + imm */
if (mode == BPF_ABS) {
assert imm >= 0, "verified by verifier"
emit MOV W1, #<imm>
} else {
/* add signed imm to the source register */
emit(s) X1 = src + #<imm>
/* verify dynamically that offset is within the domain of __rte_pktmbuf_read */
emit(s) jump_to_epilogue if X1 <s 0 || X1 > INT32_MAX
}
// ...
}
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 16:16 ` Marat Khalili
@ 2026-03-18 16:37 ` Morten Brørup
2026-03-18 16:43 ` Marat Khalili
0 siblings, 1 reply; 19+ messages in thread
From: Morten Brørup @ 2026-03-18 16:37 UTC (permalink / raw)
To: Marat Khalili, Christophe Fontaine
Cc: dev, Konstantin Ananyev, Wathsala Vithanage
> > > In Classic BPF, negative "k" has special meaning for both BPF_ABS
> and BPF_IND.
> > > So we should consider it invalid for both cases.
> > >
> > > That prevents applications from using it the way you describe.
> > > And it will allow us to add BPF library support for Linux-
> compatible special meanings later, without
> > breaking the ABI.
> > >
> >
> > Aren't these invalid offsets already taken care during the syntax
> > check when we validate the BPF program ?
> > in bpf_validate.c +1499:
> > /* load absolute instructions */
> > [(BPF_LD | BPF_ABS | BPF_B)] = {
> > .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
> > .off = { .min = 0, .max = 0},
> > .imm = { .min = 0, .max = INT32_MAX},
> > .eval = eval_ld_mbuf,
> > },
> >
> > IIUC, as __rte_bpf_validate fails when we cal rte_bpf_load ( in
> > bpf_load.c +113), we can't even interpret the cBPF program.
>
> Good point, we can probably consider BPF_ABS case covered by this.
Agree.
>
> For BPF_IND however it does not seem to exclude any values, and even if
> it did
> we don't know what's in the register. Speaking of which, I just noticed
> that
> we're truncating it.
>
> I suggest the following logic in pseudo-code:
>
> static void
> emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1,
> uint8_t tmp2,
> uint8_t src, int32_t imm)
> {
> // ...
>
> /* r1 = off: for ABS use imm, for IND use src + imm */
> if (mode == BPF_ABS) {
> assert imm >= 0, "verified by verifier"
> emit MOV W1, #<imm>
> } else {
> /* add signed imm to the source register */
> emit(s) X1 = src + #<imm>
> /* verify dynamically that offset is within the domain of
> __rte_pktmbuf_read */
> emit(s) jump_to_epilogue if X1 <s 0 || X1 > INT32_MAX
> }
>
> // ...
> }
I don't know if it's the sum of src+imm that determines special meaning, or it's the imm itself.
If it's the imm itself, a simple fix would be to update the validator's .imm.max values for BPF_IND from UINT32_MAX to INT32_MAX.
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 16:37 ` Morten Brørup
@ 2026-03-18 16:43 ` Marat Khalili
2026-03-18 18:10 ` Konstantin Ananyev
0 siblings, 1 reply; 19+ messages in thread
From: Marat Khalili @ 2026-03-18 16:43 UTC (permalink / raw)
To: Morten Brørup, Christophe Fontaine
Cc: dev@dpdk.org, Konstantin Ananyev, Wathsala Vithanage
> > I suggest the following logic in pseudo-code:
> >
> > static void
> > emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1,
> > uint8_t tmp2,
> > uint8_t src, int32_t imm)
> > {
> > // ...
> >
> > /* r1 = off: for ABS use imm, for IND use src + imm */
> > if (mode == BPF_ABS) {
> > assert imm >= 0, "verified by verifier"
> > emit MOV W1, #<imm>
> > } else {
> > /* add signed imm to the source register */
> > emit(s) X1 = src + #<imm>
> > /* verify dynamically that offset is within the domain of
> > __rte_pktmbuf_read */
> > emit(s) jump_to_epilogue if X1 <s 0 || X1 > INT32_MAX
> > }
> >
> > // ...
> > }
>
> I don't know if it's the sum of src+imm that determines special meaning, or it's the imm itself.
> If it's the imm itself, a simple fix would be to update the validator's .imm.max values for BPF_IND
> from UINT32_MAX to INT32_MAX.
>
Even if we do it (I have no particular opinion), it won't prevent sum of 64-bit
register and immediate from being negative or greater than the range of values
supported by 32-bit argument of __rte_pktmbuf_read.
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 16:43 ` Marat Khalili
@ 2026-03-18 18:10 ` Konstantin Ananyev
2026-03-19 9:20 ` Morten Brørup
0 siblings, 1 reply; 19+ messages in thread
From: Konstantin Ananyev @ 2026-03-18 18:10 UTC (permalink / raw)
To: Marat Khalili, Morten Brørup, Christophe Fontaine
Cc: dev@dpdk.org, Wathsala Vithanage
>
> > > I suggest the following logic in pseudo-code:
> > >
> > > static void
> > > emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1,
> > > uint8_t tmp2,
> > > uint8_t src, int32_t imm)
> > > {
> > > // ...
> > >
> > > /* r1 = off: for ABS use imm, for IND use src + imm */
> > > if (mode == BPF_ABS) {
> > > assert imm >= 0, "verified by verifier"
> > > emit MOV W1, #<imm>
> > > } else {
> > > /* add signed imm to the source register */
> > > emit(s) X1 = src + #<imm>
> > > /* verify dynamically that offset is within the domain of
> > > __rte_pktmbuf_read */
> > > emit(s) jump_to_epilogue if X1 <s 0 || X1 > INT32_MAX
Shouldn't it be UINT32_MAX?
> > > }
> > >
> > > // ...
> > > }
I wonder does x86 jit and VM perform that check?
If not, then it is probably not fair to demand that patch to fix these things for arm:
It probably has to be another patch that will fix that issue for all targets (vm, x86, arm).
After all - it wouldn't cause any memory corruption, right?
In the worst case (wraparound) bpf will read some valid data from the
unexpected location in the packet instead of simply returning 0.
BTW, I think we still need to add check for overflow in __rte_pktmbuf_read() -
bpf jit is not the only user of it.
Unless, of-course, we want rte_pktmbuf_read() to work with wrapped-around values.
> >
> > I don't know if it's the sum of src+imm that determines special meaning, or it's
> the imm itself.
> > If it's the imm itself, a simple fix would be to update the validator's .imm.max
> values for BPF_IND
> > from UINT32_MAX to INT32_MAX.
> >
>
> Even if we do it (I have no particular opinion), it won't prevent sum of 64-bit
> register and immediate from being negative or greater than the range of values
> supported by 32-bit argument of __rte_pktmbuf_read.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 12:54 ` Morten Brørup
2026-03-18 13:07 ` Marat Khalili
@ 2026-03-18 23:13 ` Stephen Hemminger
1 sibling, 0 replies; 19+ messages in thread
From: Stephen Hemminger @ 2026-03-18 23:13 UTC (permalink / raw)
To: Morten Brørup
Cc: Marat Khalili, Christophe Fontaine, dev, Konstantin Ananyev,
Wathsala Vithanage
[-- Attachment #1: Type: text/plain, Size: 3199 bytes --]
Dpdk doesn't yet support these negative offsets.
But it is not worth doing until upstream libpcap is fixed.
Right now, libpcap hard codes knowledge about the destination being a live
linux capture into pcap_ compile. Filed a bug on that several years ago but
no real fix came. Maybe time to send AI into libpcap
On Wed, Mar 18, 2026, 5:54 AM Morten Brørup <mb@smartsharesystems.com>
wrote:
> > > +static void
> > > +emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1,
> > uint8_t tmp2,
> > > + uint8_t src, uint32_t imm)
> >
> > Handling immediate as unsigned is questionable, especially in the
> > BPF_IND case
> > it may produce incorrect results.
>
> In Classic BPF (cBPF), when the immediate "k" is negative (when cast to
> signed integer), it is used for getting packet metadata (e.g.
> SKF_AD_VLAN_TAG gets the VLAN ID); otherwise it is considered unsigned.
>
> >
> > To make things worse, `__rte_pktmbuf_read` is also buggy when passed
> > very large
> > lengths (again, technically not ARM eBPF fault).
>
> Are you referring to the potential integer wraparound in the off+len >
> rte_pktmbuf_pkt_len(m) comparison?
> [BZ1724]
> Or some other bug in __rte_pktmbuf_read()?
>
> [BZ1724]: https://bugs.dpdk.org/show_bug.cgi?id=1724
>
> >
> > > +{
> > > + uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0);
> > > + uint8_t r6 = ebpf_to_a64_reg(ctx, EBPF_REG_6);
> > > + uint32_t mode = BPF_MODE(op);
> > > + uint32_t opsz = BPF_SIZE(op);
> > > + uint32_t sz = bpf_size(opsz);
> > > + int16_t jump_to_epilogue;
> > > +
> > > + /* r0 = mbuf (R6) */
> > > + emit_mov_64(ctx, A64_R(0), r6);
> > > +
> > > + /* r1 = off: for ABS use imm, for IND use src + imm */
> > > + if (mode == BPF_ABS) {
> > > + emit_mov_imm(ctx, 1, A64_R(1), imm);
> > > + } else {
> > > + emit_mov_imm(ctx, 1, tmp2, imm);
> > > + emit_add(ctx, 1, tmp2, src);
> > > + emit_mov_64(ctx, A64_R(1), tmp2);
> > > + }
> > > +
> > > + /* r2 = len */
> > > + emit_mov_imm(ctx, 1, A64_R(2), sz);
> > > +
> > > + /* r3 = buf (SP) */
> > > + emit_mov_64(ctx, A64_R(3), A64_SP);
> > > +
> > > + /* call __rte_pktmbuf_read */
> > > + emit_call(ctx, tmp1, __rte_pktmbuf_read);
> > > + /* check return value of __rte_pktmbuf_read */
> > > + emit_cbnz(ctx, 1, A64_R(0), 3);
> > > + emit_mov_imm(ctx, 1, r0, 0);
> > > + jump_to_epilogue = (ctx->program_start + ctx->program_sz) - ctx-
> > >idx;
> > > + emit_b(ctx, jump_to_epilogue);
> >
> > Could we call emit_return_zero_if_src_zero here instead?
> >
> > > +
> > > + /* r0 points to the data, load 1/2/4 bytes */
> > > + emit_ldr(ctx, opsz, A64_R(0), A64_R(0), A64_ZR);
> > > + if (sz != sizeof(uint8_t))
> > > + emit_be(ctx, A64_R(0), sz * CHAR_BIT);
> > > + emit_mov_64(ctx, r0, A64_R(0));
> > > +}
> > > +
> >
> > I would also pass final verdict on ARM code to ARM folks. To my
> > untrained eye
> > it looks correct apart from the signed immediate issue. Optimizations
> > are
> > possible, but since we're only implementing slow path for now maybe not
> > worth
> > the effort.
>
[-- Attachment #2: Type: text/html, Size: 4239 bytes --]
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] bpf/arm64: support packet data load instructions
2026-03-18 18:10 ` Konstantin Ananyev
@ 2026-03-19 9:20 ` Morten Brørup
0 siblings, 0 replies; 19+ messages in thread
From: Morten Brørup @ 2026-03-19 9:20 UTC (permalink / raw)
To: Konstantin Ananyev, Marat Khalili, Christophe Fontaine
Cc: dev, Wathsala Vithanage
> BTW, I think we still need to add check for overflow in
> __rte_pktmbuf_read() -
> bpf jit is not the only user of it.
> Unless, of-course, we want rte_pktmbuf_read() to work with wrapped-
> around values.
It makes no sense for rte_pktmbuf_read() to wrap around.
Its offset and length parameters are clearly specified and documented as unsigned, and there is no mention of wrap-around or implicit negative values when summing them.
I have submitted a fix just now:
https://inbox.dpdk.org/dev/20260319084048.652493-1-mb@smartsharesystems.com/T/#u
-Morten
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v2 0/2] bpf/arm64: support packet data load instructions
2026-03-10 12:20 [PATCH] bpf/arm64: support packet data load instructions Christophe Fontaine
2026-03-17 9:07 ` David Marchand
2026-03-18 11:59 ` Marat Khalili
@ 2026-03-19 11:44 ` Christophe Fontaine
2026-03-19 11:44 ` [PATCH v2 1/2] bpf/arm64: fix offset type to allow a negative jump Christophe Fontaine
2026-03-19 11:44 ` [PATCH v2 2/2] bpf/arm64: support packet data load instructions Christophe Fontaine
2 siblings, 2 replies; 19+ messages in thread
From: Christophe Fontaine @ 2026-03-19 11:44 UTC (permalink / raw)
To: dev; +Cc: Christophe Fontaine
v2:
- use emit_return_zero_if_src_zero instead of manual call
- add fast path (direct read from mbuf->buf_addr) if the
requested data is present in the first mbuf and fallback to
'slow path' (call to __rte_pktmbuf_read) otherwise.
- add tests for BPF_LD ABS/IND amd slow path.
Christophe Fontaine (2):
bpf/arm64: fix offset type to allow a negative jump
bpf/arm64: support packet data load instructions
app/test/test_bpf.c | 121 ++++++++++++++++++++++++++++++++++++++++
lib/bpf/bpf_jit_arm64.c | 88 ++++++++++++++++++++++++++++-
2 files changed, 208 insertions(+), 1 deletion(-)
--
2.53.0
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v2 1/2] bpf/arm64: fix offset type to allow a negative jump
2026-03-19 11:44 ` [PATCH v2 0/2] " Christophe Fontaine
@ 2026-03-19 11:44 ` Christophe Fontaine
2026-03-19 11:44 ` [PATCH v2 2/2] bpf/arm64: support packet data load instructions Christophe Fontaine
1 sibling, 0 replies; 19+ messages in thread
From: Christophe Fontaine @ 2026-03-19 11:44 UTC (permalink / raw)
To: dev
Cc: Christophe Fontaine, Wathsala Vithanage, Konstantin Ananyev,
Marat Khalili
The epilogue code can be before the call to
'emit_return_zero_if_src_zero', but the code only supported a
jump forward.
Fix the type of jump_to_epilogue from unsigned to signed,
to be able to jump backward.
Signed-off-by: Christophe Fontaine <cfontain@redhat.com>
---
lib/bpf/bpf_jit_arm64.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/bpf/bpf_jit_arm64.c b/lib/bpf/bpf_jit_arm64.c
index a04ef33a9c..099822e9f1 100644
--- a/lib/bpf/bpf_jit_arm64.c
+++ b/lib/bpf/bpf_jit_arm64.c
@@ -957,7 +957,7 @@ static void
emit_return_zero_if_src_zero(struct a64_jit_ctx *ctx, bool is64, uint8_t src)
{
uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0);
- uint16_t jump_to_epilogue;
+ int32_t jump_to_epilogue;
emit_cbnz(ctx, is64, src, 3);
emit_mov_imm(ctx, is64, r0, 0);
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* [PATCH v2 2/2] bpf/arm64: support packet data load instructions
2026-03-19 11:44 ` [PATCH v2 0/2] " Christophe Fontaine
2026-03-19 11:44 ` [PATCH v2 1/2] bpf/arm64: fix offset type to allow a negative jump Christophe Fontaine
@ 2026-03-19 11:44 ` Christophe Fontaine
2026-03-23 8:15 ` Christophe Fontaine
1 sibling, 1 reply; 19+ messages in thread
From: Christophe Fontaine @ 2026-03-19 11:44 UTC (permalink / raw)
To: dev
Cc: Christophe Fontaine, Konstantin Ananyev, Marat Khalili,
Wathsala Vithanage
arm64 jit compiler didn't support reading from a packet.
Enable arm64 JIT to generate native code for
(BPF_ABS | <size> | BPF_LD) and (BPF_IND | <size> | BPF_LD)
instructions.
Both 'fast path' and 'slow path' are implemented, similar to
the x86_64 JIT, where we call '__rte_pktmbuf_read' if the
requested data is not in the first segment.
Added unit test which focuses only on BPF_LD | BPF_ABS/BPF_IND and
a BPF_LD | BPF_ABS slow path call.
Signed-off-by: Christophe Fontaine <cfontain@redhat.com>
---
app/test/test_bpf.c | 121 ++++++++++++++++++++++++++++++++++++++++
lib/bpf/bpf_jit_arm64.c | 86 ++++++++++++++++++++++++++++
2 files changed, 207 insertions(+)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index dd24722450..799f9d30ca 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -2750,6 +2750,79 @@ static const struct rte_bpf_xsym test_call5_xsym[] = {
},
};
+/* load mbuf (BPF_ABS/BPF_IND) test-cases */
+static const struct ebpf_insn test_ld_mbuf0_prog[] = {
+ /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_6,
+ .src_reg = EBPF_REG_1,
+ },
+ /* load IPv4 version and IHL */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_B),
+ .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+/* load mbuf (BPF_ABS/BPF_IND) test-cases */
+static const struct ebpf_insn test_ld_slow_mbuf0_prog[] = {
+ /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_6,
+ .src_reg = EBPF_REG_1,
+ },
+ /* load from chained mbuf */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_B),
+ /* 201: second mbuf, built by test_ld_mbuf1_prepare */
+ .imm = 201 + 0x42,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static const struct ebpf_insn test_ld_ind_mbuf0_prog[] = {
+ /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_6,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ /* Set return value to one. */
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0,
+ },
+ /* load IPv4 version and IHL */
+ {
+ .code = (BPF_LD | BPF_IND | BPF_B),
+ .src_reg = EBPF_REG_0,
+ .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_ld_mbuf0_check(uint64_t rc, const void *arg)
+{
+ return cmp_res(__func__, 0x45, rc, arg, arg, 0);
+}
+
+static int
+test_ld_slow_mbuf0_check(uint64_t rc, const void *arg)
+{
+ return cmp_res(__func__, 0x42, rc, arg, arg, 0);
+}
+
/* load mbuf (BPF_ABS/BPF_IND) test-cases */
static const struct ebpf_insn test_ld_mbuf1_prog[] = {
@@ -3417,6 +3490,54 @@ static const struct bpf_test tests[] = {
/* for now don't support function calls on 32 bit platform */
.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
},
+ {
+ .name = "test_ld_abs_mbuf0",
+ .arg_sz = sizeof(struct dummy_mbuf),
+ .prm = {
+ .ins = test_ld_mbuf0_prog,
+ .nb_ins = RTE_DIM(test_ld_mbuf0_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .buf_size = sizeof(struct dummy_mbuf),
+ },
+ },
+ .prepare = test_ld_mbuf1_prepare,
+ .check_result = test_ld_mbuf0_check,
+ /* mbuf as input argument is not supported on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+ {
+ .name = "test_ld_slow_mbuf0",
+ .arg_sz = sizeof(struct dummy_mbuf),
+ .prm = {
+ .ins = test_ld_slow_mbuf0_prog,
+ .nb_ins = RTE_DIM(test_ld_slow_mbuf0_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .buf_size = sizeof(struct dummy_mbuf),
+ },
+ },
+ .prepare = test_ld_mbuf1_prepare,
+ .check_result = test_ld_slow_mbuf0_check,
+ /* mbuf as input argument is not supported on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+ {
+ .name = "test_ld_ind_mbuf0",
+ .arg_sz = sizeof(struct dummy_mbuf),
+ .prm = {
+ .ins = test_ld_ind_mbuf0_prog,
+ .nb_ins = RTE_DIM(test_ld_ind_mbuf0_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .buf_size = sizeof(struct dummy_mbuf),
+ },
+ },
+ .prepare = test_ld_mbuf1_prepare,
+ .check_result = test_ld_mbuf0_check,
+ /* mbuf as input argument is not supported on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
{
.name = "test_ld_mbuf1",
.arg_sz = sizeof(struct dummy_mbuf),
diff --git a/lib/bpf/bpf_jit_arm64.c b/lib/bpf/bpf_jit_arm64.c
index 099822e9f1..5b3b80cb86 100644
--- a/lib/bpf/bpf_jit_arm64.c
+++ b/lib/bpf/bpf_jit_arm64.c
@@ -8,6 +8,7 @@
#include <rte_common.h>
#include <rte_byteorder.h>
+#include <rte_mbuf.h>
#include "bpf_impl.h"
@@ -1123,6 +1124,75 @@ emit_branch(struct a64_jit_ctx *ctx, uint8_t op, uint32_t i, int16_t off)
emit_b_cond(ctx, ebpf_to_a64_cond(op), jump_offset_get(ctx, i, off));
}
+/*
+ * Emit code for BPF_LD | BPF_ABS/IND: load from packet.
+ * Implements both a fast path, which computes the offset and read directly
+ * and a slow path, which calls __rte_pktmbuf_read(mbuf, off, len, buf)
+ * when the data is not in the first segment.
+ */
+static void
+emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1, uint8_t tmp2,
+ uint8_t src, int32_t imm)
+{
+ uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0);
+ uint8_t r6 = ebpf_to_a64_reg(ctx, EBPF_REG_6);
+ uint32_t mode = BPF_MODE(op);
+ uint32_t opsz = BPF_SIZE(op);
+ uint32_t sz = bpf_size(opsz);
+
+ /* r0 = mbuf (R6) */
+ emit_mov_64(ctx, A64_R(0), r6);
+
+ /* r1 = off: for ABS use imm, for IND use src + imm */
+ if (mode == BPF_ABS) {
+ emit_mov_imm(ctx, 0, A64_R(1), imm);
+ } else {
+ emit_mov_imm(ctx, 0, tmp2, imm);
+ emit_add(ctx, 0, tmp2, src);
+ emit_mov_64(ctx, A64_R(1), tmp2);
+ }
+
+ /* r2 = len, 1/2/4 bytes */
+ emit_mov_imm32(ctx, 0, A64_R(2), sz);
+ /* r3 = buf (SP) */
+ emit_mov_64(ctx, A64_R(3), A64_SP);
+
+ /* tmp1 = mbuf->data_len */
+ emit_mov_imm(ctx, 1, tmp1, offsetof(struct rte_mbuf, data_len));
+ emit_ldr(ctx, BPF_W, tmp1, r6, tmp1);
+
+ /* tmp2 = off + sz */
+ emit_add_imm_64(ctx, tmp2, A64_R(1), sz);
+ /* if off+sz > data_len, jump to slow path */
+ emit_cmp(ctx, 1, tmp2, tmp1);
+ emit_b_cond(ctx, A64_HI, 8);
+
+ /* Fast path, read directly, pointer to the data will be in A64_R(0) */
+ /* A64_R(0) = mbuf->buf_addr */
+ emit_mov_imm(ctx, 1, tmp1, offsetof(struct rte_mbuf, buf_addr));
+ emit_ldr(ctx, EBPF_DW, A64_R(0), r6, tmp1);
+ /* tmp2 = * mbuf->data_off */
+ emit_mov_imm(ctx, 1, tmp2, offsetof(struct rte_mbuf, data_off));
+ emit_ldr(ctx, BPF_H, tmp2, r6, tmp2);
+
+ /* A64_R(0) += data_off + off */
+ emit_add(ctx, 1, A64_R(0), tmp2);
+ emit_add(ctx, 1, A64_R(0), A64_R(1));
+
+ /* End of Fast Path, skip slow path */
+ emit_b(ctx, 4);
+
+ /* slow path, call __rte_pktmbuf_read */
+ emit_call(ctx, tmp1, __rte_pktmbuf_read);
+ /* check return value of __rte_pktmbuf_read */
+ emit_return_zero_if_src_zero(ctx, 1, A64_R(0));
+
+ /* A64_R(0) points to the data, load 1/2/4 bytes into r0*/
+ emit_ldr(ctx, opsz, r0, A64_R(0), A64_ZR);
+ if (sz != sizeof(uint8_t))
+ emit_be(ctx, r0, sz * CHAR_BIT);
+}
+
static void
check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
{
@@ -1137,6 +1207,13 @@ check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
switch (op) {
/* Call imm */
case (BPF_JMP | EBPF_CALL):
+ /* BPF_LD | BPF_ABS/IND use __rte_pktmbuf_read */
+ case (BPF_LD | BPF_ABS | BPF_B):
+ case (BPF_LD | BPF_ABS | BPF_H):
+ case (BPF_LD | BPF_ABS | BPF_W):
+ case (BPF_LD | BPF_IND | BPF_B):
+ case (BPF_LD | BPF_IND | BPF_H):
+ case (BPF_LD | BPF_IND | BPF_W):
ctx->foundcall = 1;
return;
}
@@ -1338,6 +1415,15 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
emit_mov_imm(ctx, 1, dst, u64);
i++;
break;
+ /* load absolute/indirect from packet */
+ case (BPF_LD | BPF_ABS | BPF_B):
+ case (BPF_LD | BPF_ABS | BPF_H):
+ case (BPF_LD | BPF_ABS | BPF_W):
+ case (BPF_LD | BPF_IND | BPF_B):
+ case (BPF_LD | BPF_IND | BPF_H):
+ case (BPF_LD | BPF_IND | BPF_W):
+ emit_ld_mbuf(ctx, op, tmp1, tmp2, src, imm);
+ break;
/* *(size *)(dst + off) = src */
case (BPF_STX | BPF_MEM | BPF_B):
case (BPF_STX | BPF_MEM | BPF_H):
--
2.53.0
^ permalink raw reply related [flat|nested] 19+ messages in thread
* Re: [PATCH v2 2/2] bpf/arm64: support packet data load instructions
2026-03-19 11:44 ` [PATCH v2 2/2] bpf/arm64: support packet data load instructions Christophe Fontaine
@ 2026-03-23 8:15 ` Christophe Fontaine
2026-03-23 9:26 ` Marat Khalili
2026-04-09 22:11 ` Wathsala Vithanage
0 siblings, 2 replies; 19+ messages in thread
From: Christophe Fontaine @ 2026-03-23 8:15 UTC (permalink / raw)
To: dev; +Cc: Konstantin Ananyev, Marat Khalili, Wathsala Vithanage
It seems my patch broke the CI on debian/ubuntu, but works on Fedora/Centos.
I was able to reproduce that with a container, and hope to provide a
v3 this week.
Sorry for that!
Christophe
On Thu, Mar 19, 2026 at 12:45 PM Christophe Fontaine
<cfontain@redhat.com> wrote:
>
> arm64 jit compiler didn't support reading from a packet.
> Enable arm64 JIT to generate native code for
> (BPF_ABS | <size> | BPF_LD) and (BPF_IND | <size> | BPF_LD)
> instructions.
>
> Both 'fast path' and 'slow path' are implemented, similar to
> the x86_64 JIT, where we call '__rte_pktmbuf_read' if the
> requested data is not in the first segment.
>
> Added unit test which focuses only on BPF_LD | BPF_ABS/BPF_IND and
> a BPF_LD | BPF_ABS slow path call.
>
> Signed-off-by: Christophe Fontaine <cfontain@redhat.com>
> ---
> app/test/test_bpf.c | 121 ++++++++++++++++++++++++++++++++++++++++
> lib/bpf/bpf_jit_arm64.c | 86 ++++++++++++++++++++++++++++
> 2 files changed, 207 insertions(+)
>
> diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
> index dd24722450..799f9d30ca 100644
> --- a/app/test/test_bpf.c
> +++ b/app/test/test_bpf.c
> @@ -2750,6 +2750,79 @@ static const struct rte_bpf_xsym test_call5_xsym[] = {
> },
> };
>
> +/* load mbuf (BPF_ABS/BPF_IND) test-cases */
> +static const struct ebpf_insn test_ld_mbuf0_prog[] = {
> + /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
> + {
> + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
> + .dst_reg = EBPF_REG_6,
> + .src_reg = EBPF_REG_1,
> + },
> + /* load IPv4 version and IHL */
> + {
> + .code = (BPF_LD | BPF_ABS | BPF_B),
> + .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
> + },
> + {
> + .code = (BPF_JMP | EBPF_EXIT),
> + },
> +};
> +
> +/* load mbuf (BPF_ABS/BPF_IND) test-cases */
> +static const struct ebpf_insn test_ld_slow_mbuf0_prog[] = {
> + /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
> + {
> + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
> + .dst_reg = EBPF_REG_6,
> + .src_reg = EBPF_REG_1,
> + },
> + /* load from chained mbuf */
> + {
> + .code = (BPF_LD | BPF_ABS | BPF_B),
> + /* 201: second mbuf, built by test_ld_mbuf1_prepare */
> + .imm = 201 + 0x42,
> + },
> + {
> + .code = (BPF_JMP | EBPF_EXIT),
> + },
> +};
> +
> +static const struct ebpf_insn test_ld_ind_mbuf0_prog[] = {
> + /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
> + {
> + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
> + .dst_reg = EBPF_REG_6,
> + .src_reg = EBPF_REG_1,
> + },
> + {
> + /* Set return value to one. */
> + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
> + .dst_reg = EBPF_REG_0,
> + .imm = 0,
> + },
> + /* load IPv4 version and IHL */
> + {
> + .code = (BPF_LD | BPF_IND | BPF_B),
> + .src_reg = EBPF_REG_0,
> + .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
> + },
> + {
> + .code = (BPF_JMP | EBPF_EXIT),
> + },
> +};
> +
> +static int
> +test_ld_mbuf0_check(uint64_t rc, const void *arg)
> +{
> + return cmp_res(__func__, 0x45, rc, arg, arg, 0);
> +}
> +
> +static int
> +test_ld_slow_mbuf0_check(uint64_t rc, const void *arg)
> +{
> + return cmp_res(__func__, 0x42, rc, arg, arg, 0);
> +}
> +
> /* load mbuf (BPF_ABS/BPF_IND) test-cases */
> static const struct ebpf_insn test_ld_mbuf1_prog[] = {
>
> @@ -3417,6 +3490,54 @@ static const struct bpf_test tests[] = {
> /* for now don't support function calls on 32 bit platform */
> .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
> },
> + {
> + .name = "test_ld_abs_mbuf0",
> + .arg_sz = sizeof(struct dummy_mbuf),
> + .prm = {
> + .ins = test_ld_mbuf0_prog,
> + .nb_ins = RTE_DIM(test_ld_mbuf0_prog),
> + .prog_arg = {
> + .type = RTE_BPF_ARG_PTR_MBUF,
> + .buf_size = sizeof(struct dummy_mbuf),
> + },
> + },
> + .prepare = test_ld_mbuf1_prepare,
> + .check_result = test_ld_mbuf0_check,
> + /* mbuf as input argument is not supported on 32 bit platform */
> + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
> + },
> + {
> + .name = "test_ld_slow_mbuf0",
> + .arg_sz = sizeof(struct dummy_mbuf),
> + .prm = {
> + .ins = test_ld_slow_mbuf0_prog,
> + .nb_ins = RTE_DIM(test_ld_slow_mbuf0_prog),
> + .prog_arg = {
> + .type = RTE_BPF_ARG_PTR_MBUF,
> + .buf_size = sizeof(struct dummy_mbuf),
> + },
> + },
> + .prepare = test_ld_mbuf1_prepare,
> + .check_result = test_ld_slow_mbuf0_check,
> + /* mbuf as input argument is not supported on 32 bit platform */
> + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
> + },
> + {
> + .name = "test_ld_ind_mbuf0",
> + .arg_sz = sizeof(struct dummy_mbuf),
> + .prm = {
> + .ins = test_ld_ind_mbuf0_prog,
> + .nb_ins = RTE_DIM(test_ld_ind_mbuf0_prog),
> + .prog_arg = {
> + .type = RTE_BPF_ARG_PTR_MBUF,
> + .buf_size = sizeof(struct dummy_mbuf),
> + },
> + },
> + .prepare = test_ld_mbuf1_prepare,
> + .check_result = test_ld_mbuf0_check,
> + /* mbuf as input argument is not supported on 32 bit platform */
> + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
> + },
> {
> .name = "test_ld_mbuf1",
> .arg_sz = sizeof(struct dummy_mbuf),
> diff --git a/lib/bpf/bpf_jit_arm64.c b/lib/bpf/bpf_jit_arm64.c
> index 099822e9f1..5b3b80cb86 100644
> --- a/lib/bpf/bpf_jit_arm64.c
> +++ b/lib/bpf/bpf_jit_arm64.c
> @@ -8,6 +8,7 @@
>
> #include <rte_common.h>
> #include <rte_byteorder.h>
> +#include <rte_mbuf.h>
>
> #include "bpf_impl.h"
>
> @@ -1123,6 +1124,75 @@ emit_branch(struct a64_jit_ctx *ctx, uint8_t op, uint32_t i, int16_t off)
> emit_b_cond(ctx, ebpf_to_a64_cond(op), jump_offset_get(ctx, i, off));
> }
>
> +/*
> + * Emit code for BPF_LD | BPF_ABS/IND: load from packet.
> + * Implements both a fast path, which computes the offset and read directly
> + * and a slow path, which calls __rte_pktmbuf_read(mbuf, off, len, buf)
> + * when the data is not in the first segment.
> + */
> +static void
> +emit_ld_mbuf(struct a64_jit_ctx *ctx, uint32_t op, uint8_t tmp1, uint8_t tmp2,
> + uint8_t src, int32_t imm)
> +{
> + uint8_t r0 = ebpf_to_a64_reg(ctx, EBPF_REG_0);
> + uint8_t r6 = ebpf_to_a64_reg(ctx, EBPF_REG_6);
> + uint32_t mode = BPF_MODE(op);
> + uint32_t opsz = BPF_SIZE(op);
> + uint32_t sz = bpf_size(opsz);
> +
> + /* r0 = mbuf (R6) */
> + emit_mov_64(ctx, A64_R(0), r6);
> +
> + /* r1 = off: for ABS use imm, for IND use src + imm */
> + if (mode == BPF_ABS) {
> + emit_mov_imm(ctx, 0, A64_R(1), imm);
> + } else {
> + emit_mov_imm(ctx, 0, tmp2, imm);
> + emit_add(ctx, 0, tmp2, src);
> + emit_mov_64(ctx, A64_R(1), tmp2);
> + }
> +
> + /* r2 = len, 1/2/4 bytes */
> + emit_mov_imm32(ctx, 0, A64_R(2), sz);
> + /* r3 = buf (SP) */
> + emit_mov_64(ctx, A64_R(3), A64_SP);
> +
> + /* tmp1 = mbuf->data_len */
> + emit_mov_imm(ctx, 1, tmp1, offsetof(struct rte_mbuf, data_len));
> + emit_ldr(ctx, BPF_W, tmp1, r6, tmp1);
> +
> + /* tmp2 = off + sz */
> + emit_add_imm_64(ctx, tmp2, A64_R(1), sz);
> + /* if off+sz > data_len, jump to slow path */
> + emit_cmp(ctx, 1, tmp2, tmp1);
> + emit_b_cond(ctx, A64_HI, 8);
> +
> + /* Fast path, read directly, pointer to the data will be in A64_R(0) */
> + /* A64_R(0) = mbuf->buf_addr */
> + emit_mov_imm(ctx, 1, tmp1, offsetof(struct rte_mbuf, buf_addr));
> + emit_ldr(ctx, EBPF_DW, A64_R(0), r6, tmp1);
> + /* tmp2 = * mbuf->data_off */
> + emit_mov_imm(ctx, 1, tmp2, offsetof(struct rte_mbuf, data_off));
> + emit_ldr(ctx, BPF_H, tmp2, r6, tmp2);
> +
> + /* A64_R(0) += data_off + off */
> + emit_add(ctx, 1, A64_R(0), tmp2);
> + emit_add(ctx, 1, A64_R(0), A64_R(1));
> +
> + /* End of Fast Path, skip slow path */
> + emit_b(ctx, 4);
> +
> + /* slow path, call __rte_pktmbuf_read */
> + emit_call(ctx, tmp1, __rte_pktmbuf_read);
> + /* check return value of __rte_pktmbuf_read */
> + emit_return_zero_if_src_zero(ctx, 1, A64_R(0));
> +
> + /* A64_R(0) points to the data, load 1/2/4 bytes into r0*/
> + emit_ldr(ctx, opsz, r0, A64_R(0), A64_ZR);
> + if (sz != sizeof(uint8_t))
> + emit_be(ctx, r0, sz * CHAR_BIT);
> +}
> +
> static void
> check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
> {
> @@ -1137,6 +1207,13 @@ check_program_has_call(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
> switch (op) {
> /* Call imm */
> case (BPF_JMP | EBPF_CALL):
> + /* BPF_LD | BPF_ABS/IND use __rte_pktmbuf_read */
> + case (BPF_LD | BPF_ABS | BPF_B):
> + case (BPF_LD | BPF_ABS | BPF_H):
> + case (BPF_LD | BPF_ABS | BPF_W):
> + case (BPF_LD | BPF_IND | BPF_B):
> + case (BPF_LD | BPF_IND | BPF_H):
> + case (BPF_LD | BPF_IND | BPF_W):
> ctx->foundcall = 1;
> return;
> }
> @@ -1338,6 +1415,15 @@ emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
> emit_mov_imm(ctx, 1, dst, u64);
> i++;
> break;
> + /* load absolute/indirect from packet */
> + case (BPF_LD | BPF_ABS | BPF_B):
> + case (BPF_LD | BPF_ABS | BPF_H):
> + case (BPF_LD | BPF_ABS | BPF_W):
> + case (BPF_LD | BPF_IND | BPF_B):
> + case (BPF_LD | BPF_IND | BPF_H):
> + case (BPF_LD | BPF_IND | BPF_W):
> + emit_ld_mbuf(ctx, op, tmp1, tmp2, src, imm);
> + break;
> /* *(size *)(dst + off) = src */
> case (BPF_STX | BPF_MEM | BPF_B):
> case (BPF_STX | BPF_MEM | BPF_H):
> --
> 2.53.0
>
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v2 2/2] bpf/arm64: support packet data load instructions
2026-03-23 8:15 ` Christophe Fontaine
@ 2026-03-23 9:26 ` Marat Khalili
2026-04-09 22:11 ` Wathsala Vithanage
1 sibling, 0 replies; 19+ messages in thread
From: Marat Khalili @ 2026-03-23 9:26 UTC (permalink / raw)
To: Christophe Fontaine; +Cc: Konstantin Ananyev, Wathsala Vithanage, dev@dpdk.org
> It seems my patch broke the CI on debian/ubuntu, but works on Fedora/Centos.
> I was able to reproduce that with a container, and hope to provide a
> v3 this week.
Sorry, did not have time to look at it, I guess I'll wait for v3 now.
I wonder what was the problem though. Looks like some UB/compiler-dependent thing.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v2 2/2] bpf/arm64: support packet data load instructions
2026-03-23 8:15 ` Christophe Fontaine
2026-03-23 9:26 ` Marat Khalili
@ 2026-04-09 22:11 ` Wathsala Vithanage
1 sibling, 0 replies; 19+ messages in thread
From: Wathsala Vithanage @ 2026-04-09 22:11 UTC (permalink / raw)
To: Christophe Fontaine, dev; +Cc: Konstantin Ananyev, Marat Khalili
Hi Christophe,
Thanks for doing this work. I saw that you intend to submit a v3 due to
failure in Ubuntu. Please, see my comment regarding emit_b below.
>> + /* A64_R(0) = mbuf->buf_addr */
>> + emit_mov_imm(ctx, 1, tmp1, offsetof(struct rte_mbuf, buf_addr));
>> + emit_ldr(ctx, EBPF_DW, A64_R(0), r6, tmp1);
>> + /* tmp2 = * mbuf->data_off */
>> + emit_mov_imm(ctx, 1, tmp2, offsetof(struct rte_mbuf, data_off));
>> + emit_ldr(ctx, BPF_H, tmp2, r6, tmp2);
>> +
>> + /* A64_R(0) += data_off + off */
>> + emit_add(ctx, 1, A64_R(0), tmp2);
>> + emit_add(ctx, 1, A64_R(0), A64_R(1));
>> +
>> + /* End of Fast Path, skip slow path */
>> + emit_b(ctx, 4);
>> +
Followed by emit_b() above there is an emit_call(). It invokes
emit_mov_imm() which could possibly generate different number of movk
instructions depending on the address of the __rte_pktmbuf_read. So,
emit_b(ctx, 4), may not necessarily land at the intended location
because there could be more than 4 instructions emitted by emit_call.
>> + /* slow path, call __rte_pktmbuf_read */
>> + emit_call(ctx, tmp1, __rte_pktmbuf_read);
>> + /* check return value of __rte_pktmbuf_read */
>> + emit_return_zero_if_src_zero(ctx, 1, A64_R(0));
>> +
>> + /* A64_R(0) points to the data, load 1/2/4 bytes into r0*/
>> + emit_ldr(ctx, opsz, r0, A64_R(0), A64_ZR);
>> + if (sz != sizeof(uint8_t))
>> + emit_be(ctx, r0, sz * CHAR_BIT);
>> +}
>> +
>>
--wathsala
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2026-04-09 22:11 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-10 12:20 [PATCH] bpf/arm64: support packet data load instructions Christophe Fontaine
2026-03-17 9:07 ` David Marchand
2026-03-18 11:59 ` Marat Khalili
2026-03-18 12:54 ` Morten Brørup
2026-03-18 13:07 ` Marat Khalili
2026-03-18 13:39 ` Morten Brørup
2026-03-18 15:34 ` Christophe Fontaine
2026-03-18 16:16 ` Marat Khalili
2026-03-18 16:37 ` Morten Brørup
2026-03-18 16:43 ` Marat Khalili
2026-03-18 18:10 ` Konstantin Ananyev
2026-03-19 9:20 ` Morten Brørup
2026-03-18 23:13 ` Stephen Hemminger
2026-03-19 11:44 ` [PATCH v2 0/2] " Christophe Fontaine
2026-03-19 11:44 ` [PATCH v2 1/2] bpf/arm64: fix offset type to allow a negative jump Christophe Fontaine
2026-03-19 11:44 ` [PATCH v2 2/2] bpf/arm64: support packet data load instructions Christophe Fontaine
2026-03-23 8:15 ` Christophe Fontaine
2026-03-23 9:26 ` Marat Khalili
2026-04-09 22:11 ` Wathsala Vithanage
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox