From: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
To: bpf@vger.kernel.org
Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
martin.lau@linux.dev, eddyz87@gmail.com, song@kernel.org,
yonghong.song@linux.dev, john.fastabend@gmail.com,
kpsingh@kernel.org, sdf@fomichev.me, haoluo@google.com,
jolsa@kernel.org, djwillia@vt.edu, miloc@vt.edu, ericts@vt.edu,
rahult@vt.edu, doniaghazy@vt.edu, quanzhif@vt.edu,
jinghao7@illinois.edu, sidchintamaneni@gmail.com,
memxor@gmail.com, egor@vt.edu, sairoop10@gmail.com,
rjsu26@gmail.com
Subject: [PATCH 1/4] bpf: Introduce new structs and struct fields for fast path termination
Date: Sun, 7 Sep 2025 23:04:12 +0000 [thread overview]
Message-ID: <20250907230415.289327-2-sidchintamaneni@gmail.com> (raw)
In-Reply-To: <20250907230415.289327-1-sidchintamaneni@gmail.com>
Introduced the definition of struct bpf_term_aux_states
required to support fast-path termination of BPF programs.
Added the memory allocation and free logic for newly added
term_states feild in struct bpf_prog.
Signed-off-by: Raj Sahu <rjsu26@gmail.com>
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@gmail.com>
---
include/linux/bpf.h | 75 +++++++++++++++++++++++++++++----------------
kernel/bpf/core.c | 31 +++++++++++++++++++
2 files changed, 79 insertions(+), 27 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8f6e87f0f3a8..caaee33744fc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1584,6 +1584,25 @@ struct bpf_stream_stage {
int len;
};
+struct call_aux_states {
+ int call_bpf_insn_idx;
+ int jit_call_idx;
+ u8 is_helper_kfunc;
+ u8 is_bpf_loop;
+ u8 is_bpf_loop_cb_inline;
+};
+
+struct bpf_term_patch_call_sites {
+ u32 call_sites_cnt;
+ struct call_aux_states *call_states;
+};
+
+struct bpf_term_aux_states {
+ struct bpf_prog *prog;
+ struct work_struct work;
+ struct bpf_term_patch_call_sites *patch_call_sites;
+};
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
@@ -1618,6 +1637,7 @@ struct bpf_prog_aux {
bool tail_call_reachable;
bool xdp_has_frags;
bool exception_cb;
+ bool is_bpf_loop_cb_non_inline;
bool exception_boundary;
bool is_extended; /* true if extended by freplace program */
bool jits_use_priv_stack;
@@ -1696,33 +1716,34 @@ struct bpf_prog_aux {
};
struct bpf_prog {
- u16 pages; /* Number of allocated pages */
- u16 jited:1, /* Is our filter JIT'ed? */
- jit_requested:1,/* archs need to JIT the prog */
- gpl_compatible:1, /* Is filter GPL compatible? */
- cb_access:1, /* Is control block accessed? */
- dst_needed:1, /* Do we need dst entry? */
- blinding_requested:1, /* needs constant blinding */
- blinded:1, /* Was blinded */
- is_func:1, /* program is a bpf function */
- kprobe_override:1, /* Do we override a kprobe? */
- has_callchain_buf:1, /* callchain buffer allocated? */
- enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
- call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
- call_get_func_ip:1, /* Do we call get_func_ip() */
- tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
- sleepable:1; /* BPF program is sleepable */
- enum bpf_prog_type type; /* Type of BPF program */
- enum bpf_attach_type expected_attach_type; /* For some prog types */
- u32 len; /* Number of filter blocks */
- u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_stats __percpu *stats;
- int __percpu *active;
- unsigned int (*bpf_func)(const void *ctx,
- const struct bpf_insn *insn);
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ u16 pages; /* Number of allocated pages */
+ u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
+ gpl_compatible:1, /* Is filter GPL compatible? */
+ cb_access:1, /* Is control block accessed? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinding_requested:1, /* needs constant blinding */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1, /* Do we override a kprobe? */
+ has_callchain_buf:1, /* callchain buffer allocated? */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_func_ip:1, /* Do we call get_func_ip() */
+ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
+ sleepable:1; /* BPF program is sleepable */
+ enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
+ u32 len; /* Number of filter blocks */
+ u32 jited_len; /* Size of jited insns in bytes */
+ u8 tag[BPF_TAG_SIZE];
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ struct bpf_term_aux_states *term_states;
/* Instructions for interpreter */
union {
DECLARE_FLEX_ARRAY(struct sock_filter, insns);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ef01cc644a96..740b5a3a6b55 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -100,6 +100,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
struct bpf_prog_aux *aux;
struct bpf_prog *fp;
+ struct bpf_term_aux_states *term_states = NULL;
+ struct bpf_term_patch_call_sites *patch_call_sites = NULL;
size = round_up(size, __PAGE_SIZE);
fp = __vmalloc(size, gfp_flags);
@@ -118,11 +120,24 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
return NULL;
}
+ term_states = kzalloc(sizeof(*term_states), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
+ if (!term_states)
+ goto free_alloc_percpu;
+
+ patch_call_sites = kzalloc(sizeof(*patch_call_sites), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
+ if (!patch_call_sites)
+ goto free_bpf_term_states;
+
fp->pages = size / PAGE_SIZE;
fp->aux = aux;
fp->aux->prog = fp;
fp->jit_requested = ebpf_jit_enabled();
fp->blinding_requested = bpf_jit_blinding_enabled(fp);
+ fp->term_states = term_states;
+ fp->term_states->patch_call_sites = patch_call_sites;
+ fp->term_states->patch_call_sites->call_sites_cnt = 0;
+ fp->term_states->prog = fp;
+
#ifdef CONFIG_CGROUP_BPF
aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
#endif
@@ -140,6 +155,15 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
#endif
return fp;
+
+free_bpf_term_states:
+ kfree(term_states);
+free_alloc_percpu:
+ free_percpu(fp->active);
+ kfree(aux);
+ vfree(fp);
+
+ return NULL;
}
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
@@ -266,6 +290,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = pages;
fp->aux->prog = fp;
+ fp->term_states->prog = fp;
/* We keep fp->aux from fp_old around in the new
* reallocated structure.
@@ -273,6 +298,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
fp_old->aux = NULL;
fp_old->stats = NULL;
fp_old->active = NULL;
+ fp_old->term_states = NULL;
__bpf_prog_free(fp_old);
}
@@ -287,6 +313,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
kfree(fp->aux->poke_tab);
kfree(fp->aux);
}
+ if (fp->term_states) {
+ if (fp->term_states->patch_call_sites)
+ kfree(fp->term_states->patch_call_sites);
+ kfree(fp->term_states);
+ }
free_percpu(fp->stats);
free_percpu(fp->active);
vfree(fp);
--
2.43.0
next prev parent reply other threads:[~2025-09-07 23:04 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-07 23:04 [PATCH 0/4] bpf: Fast-Path approach for BPF program termination Siddharth Chintamaneni
2025-09-07 23:04 ` Siddharth Chintamaneni [this message]
2025-09-17 2:11 ` [PATCH 1/4] bpf: Introduce new structs and struct fields for fast path termination Kumar Kartikeya Dwivedi
2025-09-17 3:38 ` Siddharth Chintamaneni
2025-09-07 23:04 ` [PATCH 2/4] bpf: Creating call sites table to stub instructions during runtime Siddharth Chintamaneni
2025-09-07 23:04 ` [PATCH 3/4] bpf: runtime part of fast-path termination approach Siddharth Chintamaneni
2025-09-08 6:01 ` kernel test robot
2025-09-08 7:14 ` kernel test robot
2025-09-17 2:11 ` Kumar Kartikeya Dwivedi
2025-09-17 4:01 ` Siddharth Chintamaneni
2025-09-07 23:04 ` [PATCH 4/4] selftests/bpf: Adds selftests to check termination of long running nested bpf loops Siddharth Chintamaneni
2025-09-17 2:13 ` [PATCH 0/4] bpf: Fast-Path approach for BPF program termination Kumar Kartikeya Dwivedi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250907230415.289327-2-sidchintamaneni@gmail.com \
--to=sidchintamaneni@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=djwillia@vt.edu \
--cc=doniaghazy@vt.edu \
--cc=eddyz87@gmail.com \
--cc=egor@vt.edu \
--cc=ericts@vt.edu \
--cc=haoluo@google.com \
--cc=jinghao7@illinois.edu \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=martin.lau@linux.dev \
--cc=memxor@gmail.com \
--cc=miloc@vt.edu \
--cc=quanzhif@vt.edu \
--cc=rahult@vt.edu \
--cc=rjsu26@gmail.com \
--cc=sairoop10@gmail.com \
--cc=sdf@fomichev.me \
--cc=song@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox