From: Stanislav Fomichev <sdf@google.com>
To: netdev@vger.kernel.org, bpf@vger.kernel.org
Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
Stanislav Fomichev <sdf@google.com>
Subject: [PATCH bpf-next v7 04/11] bpf: minimize number of allocated lsm slots per program
Date: Wed, 18 May 2022 15:55:24 -0700 [thread overview]
Message-ID: <20220518225531.558008-5-sdf@google.com> (raw)
In-Reply-To: <20220518225531.558008-1-sdf@google.com>
Previous patch adds 1:1 mapping between all 211 LSM hooks
and bpf_cgroup program array. Instead of reserving a slot per
possible hook, reserve 10 slots per cgroup for lsm programs.
Those slots are dynamically allocated on demand and reclaimed.
struct cgroup_bpf {
struct bpf_prog_array * effective[33]; /* 0 264 */
/* --- cacheline 4 boundary (256 bytes) was 8 bytes ago --- */
struct hlist_head progs[33]; /* 264 264 */
/* --- cacheline 8 boundary (512 bytes) was 16 bytes ago --- */
u8 flags[33]; /* 528 33 */
/* XXX 7 bytes hole, try to pack */
struct list_head storages; /* 568 16 */
/* --- cacheline 9 boundary (576 bytes) was 8 bytes ago --- */
struct bpf_prog_array * inactive; /* 584 8 */
struct percpu_ref refcnt; /* 592 16 */
struct work_struct release_work; /* 608 72 */
/* size: 680, cachelines: 11, members: 7 */
/* sum members: 673, holes: 1, sum holes: 7 */
/* last cacheline: 40 bytes */
};
Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
include/linux/bpf-cgroup-defs.h | 3 +-
include/linux/bpf_lsm.h | 6 --
kernel/bpf/bpf_lsm.c | 5 --
kernel/bpf/cgroup.c | 135 +++++++++++++++++++++++++++++---
4 files changed, 125 insertions(+), 24 deletions(-)
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
index d5a70a35dace..359d3f16abea 100644
--- a/include/linux/bpf-cgroup-defs.h
+++ b/include/linux/bpf-cgroup-defs.h
@@ -10,7 +10,8 @@
struct bpf_prog_array;
-#define CGROUP_LSM_NUM 211 /* will be addressed in the next patch */
+/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
+#define CGROUP_LSM_NUM 10
enum cgroup_bpf_attach_type {
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index 7f0e59f5f9be..613de44aa429 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -43,7 +43,6 @@ extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
void bpf_inode_storage_free(struct inode *inode);
int bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
-int bpf_lsm_hook_idx(u32 btf_id);
#else /* !CONFIG_BPF_LSM */
@@ -74,11 +73,6 @@ static inline int bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
return -ENOENT;
}
-static inline int bpf_lsm_hook_idx(u32 btf_id)
-{
- return -EINVAL;
-}
-
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 654c23577ad3..96503c3e7a71 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -71,11 +71,6 @@ int bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
return 0;
}
-int bpf_lsm_hook_idx(u32 btf_id)
-{
- return btf_id_set_index(&bpf_lsm_hooks, btf_id);
-}
-
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 2c356a38f4cf..a959cdd22870 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -132,15 +132,110 @@ unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
}
#ifdef CONFIG_BPF_LSM
+struct list_head unused_bpf_lsm_atypes;
+struct list_head used_bpf_lsm_atypes;
+
+struct bpf_lsm_attach_type {
+ int index;
+ u32 btf_id;
+ int usecnt;
+ struct list_head atypes;
+ struct rcu_head rcu_head;
+};
+
+static int __init bpf_lsm_attach_type_init(void)
+{
+ struct bpf_lsm_attach_type *atype;
+ int i;
+
+ INIT_LIST_HEAD_RCU(&unused_bpf_lsm_atypes);
+ INIT_LIST_HEAD_RCU(&used_bpf_lsm_atypes);
+
+ for (i = 0; i < CGROUP_LSM_NUM; i++) {
+ atype = kzalloc(sizeof(*atype), GFP_KERNEL);
+ if (!atype)
+ continue;
+
+ atype->index = i;
+ list_add_tail_rcu(&atype->atypes, &unused_bpf_lsm_atypes);
+ }
+
+ return 0;
+}
+late_initcall(bpf_lsm_attach_type_init);
+
static enum cgroup_bpf_attach_type bpf_lsm_attach_type_get(u32 attach_btf_id)
{
- return CGROUP_LSM_START + bpf_lsm_hook_idx(attach_btf_id);
+ struct bpf_lsm_attach_type *atype;
+
+ lockdep_assert_held(&cgroup_mutex);
+
+ list_for_each_entry_rcu(atype, &used_bpf_lsm_atypes, atypes) {
+ if (atype->btf_id != attach_btf_id)
+ continue;
+
+ atype->usecnt++;
+ return CGROUP_LSM_START + atype->index;
+ }
+
+ atype = list_first_or_null_rcu(&unused_bpf_lsm_atypes, struct bpf_lsm_attach_type, atypes);
+ if (!atype)
+ return -E2BIG;
+
+ list_del_rcu(&atype->atypes);
+ atype->btf_id = attach_btf_id;
+ atype->usecnt = 1;
+ list_add_tail_rcu(&atype->atypes, &used_bpf_lsm_atypes);
+
+ return CGROUP_LSM_START + atype->index;
+}
+
+static void bpf_lsm_attach_type_reclaim(struct rcu_head *head)
+{
+ struct bpf_lsm_attach_type *atype =
+ container_of(head, struct bpf_lsm_attach_type, rcu_head);
+
+ atype->btf_id = 0;
+ atype->usecnt = 0;
+ list_add_tail_rcu(&atype->atypes, &unused_bpf_lsm_atypes);
+}
+
+static void bpf_lsm_attach_type_put(u32 attach_btf_id)
+{
+ struct bpf_lsm_attach_type *atype;
+
+ lockdep_assert_held(&cgroup_mutex);
+
+ list_for_each_entry_rcu(atype, &used_bpf_lsm_atypes, atypes) {
+ if (atype->btf_id != attach_btf_id)
+ continue;
+
+ if (--atype->usecnt <= 0) {
+ list_del_rcu(&atype->atypes);
+ WARN_ON_ONCE(atype->usecnt < 0);
+
+ /* call_rcu here prevents atype reuse within
+ * the same rcu grace period.
+ * shim programs use __bpf_prog_enter_lsm_cgroup
+ * which starts RCU read section.
+ */
+ call_rcu(&atype->rcu_head, bpf_lsm_attach_type_reclaim);
+ }
+
+ return;
+ }
+
+ WARN_ON_ONCE(1);
}
#else
static enum cgroup_bpf_attach_type bpf_lsm_attach_type_get(u32 attach_btf_id)
{
return -EOPNOTSUPP;
}
+
+static void bpf_lsm_attach_type_put(u32 attach_btf_id)
+{
+}
#endif /* CONFIG_BPF_LSM */
void cgroup_bpf_offline(struct cgroup *cgrp)
@@ -224,6 +319,7 @@ static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
static void bpf_cgroup_lsm_shim_release(struct bpf_prog *prog)
{
bpf_trampoline_unlink_cgroup_shim(prog);
+ bpf_lsm_attach_type_put(prog->aux->attach_btf_id);
}
/**
@@ -619,27 +715,37 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
progs = &cgrp->bpf.progs[atype];
- if (!hierarchy_allows_attach(cgrp, atype))
- return -EPERM;
+ if (!hierarchy_allows_attach(cgrp, atype)) {
+ err = -EPERM;
+ goto cleanup_attach_type;
+ }
- if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
+ if (!hlist_empty(progs) && cgrp->bpf.flags[atype] != saved_flags) {
/* Disallow attaching non-overridable on top
* of existing overridable in this cgroup.
* Disallow attaching multi-prog if overridable or none
*/
- return -EPERM;
+ err = -EPERM;
+ goto cleanup_attach_type;
+ }
- if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
- return -E2BIG;
+ if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) {
+ err = -E2BIG;
+ goto cleanup_attach_type;
+ }
pl = find_attach_entry(progs, prog, link, replace_prog,
flags & BPF_F_ALLOW_MULTI);
- if (IS_ERR(pl))
- return PTR_ERR(pl);
+ if (IS_ERR(pl)) {
+ err = PTR_ERR(pl);
+ goto cleanup_attach_type;
+ }
if (bpf_cgroup_storages_alloc(storage, new_storage, type,
- prog ? : link->link.prog, cgrp))
- return -ENOMEM;
+ prog ? : link->link.prog, cgrp)) {
+ err = -ENOMEM;
+ goto cleanup_attach_type;
+ }
if (pl) {
old_prog = pl->prog;
@@ -649,7 +755,8 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
pl = kmalloc(sizeof(*pl), GFP_KERNEL);
if (!pl) {
bpf_cgroup_storages_free(new_storage);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto cleanup_attach_type;
}
if (hlist_empty(progs))
hlist_add_head(&pl->node, progs);
@@ -698,6 +805,10 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
hlist_del(&pl->node);
kfree(pl);
}
+
+cleanup_attach_type:
+ if (type == BPF_LSM_CGROUP)
+ bpf_lsm_attach_type_put(new_prog->aux->attach_btf_id);
return err;
}
--
2.36.1.124.g0e6072fb45-goog
next prev parent reply other threads:[~2022-05-18 22:56 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-18 22:55 [PATCH bpf-next v7 00/11] bpf: cgroup_sock lsm flavor Stanislav Fomichev
2022-05-18 22:55 ` [PATCH bpf-next v7 01/11] bpf: add bpf_func_t and trampoline helpers Stanislav Fomichev
2022-05-20 0:45 ` Yonghong Song
2022-05-21 0:03 ` Stanislav Fomichev
2022-05-18 22:55 ` [PATCH bpf-next v7 02/11] bpf: convert cgroup_bpf.progs to hlist Stanislav Fomichev
2022-05-18 22:55 ` [PATCH bpf-next v7 03/11] bpf: per-cgroup lsm flavor Stanislav Fomichev
2022-05-20 1:00 ` Yonghong Song
2022-05-21 0:03 ` Stanislav Fomichev
2022-05-23 15:41 ` Yonghong Song
2022-05-21 0:53 ` Martin KaFai Lau
2022-05-24 2:15 ` Stanislav Fomichev
2022-05-24 5:40 ` Martin KaFai Lau
2022-05-24 15:56 ` Stanislav Fomichev
2022-05-24 5:57 ` Martin KaFai Lau
2022-05-18 22:55 ` Stanislav Fomichev [this message]
2022-05-21 6:56 ` [PATCH bpf-next v7 04/11] bpf: minimize number of allocated lsm slots per program Martin KaFai Lau
2022-05-24 2:14 ` Stanislav Fomichev
2022-05-24 5:53 ` Martin KaFai Lau
2022-05-18 22:55 ` [PATCH bpf-next v7 05/11] bpf: implement BPF_PROG_QUERY for BPF_LSM_CGROUP Stanislav Fomichev
2022-05-19 2:31 ` kernel test robot
2022-05-19 14:57 ` kernel test robot
2022-05-23 23:23 ` Andrii Nakryiko
2022-05-24 2:15 ` Stanislav Fomichev
2022-05-24 3:48 ` Martin KaFai Lau
2022-05-24 15:55 ` Stanislav Fomichev
2022-05-24 17:50 ` Martin KaFai Lau
2022-05-24 23:45 ` Andrii Nakryiko
2022-05-25 4:03 ` Stanislav Fomichev
2022-05-25 4:39 ` Andrii Nakryiko
2022-05-25 16:01 ` Stanislav Fomichev
2022-05-25 17:02 ` Stanislav Fomichev
2022-05-25 20:39 ` Martin KaFai Lau
2022-05-25 21:25 ` sdf
2022-05-26 0:03 ` Martin KaFai Lau
2022-05-26 1:23 ` Martin KaFai Lau
2022-05-26 2:50 ` Stanislav Fomichev
2022-05-31 23:08 ` Andrii Nakryiko
2022-05-18 22:55 ` [PATCH bpf-next v7 06/11] bpf: allow writing to a subset of sock fields from lsm progtype Stanislav Fomichev
2022-05-18 22:55 ` [PATCH bpf-next v7 07/11] libbpf: implement bpf_prog_query_opts Stanislav Fomichev
2022-05-23 23:22 ` Andrii Nakryiko
2022-05-24 2:15 ` Stanislav Fomichev
2022-05-24 3:45 ` Andrii Nakryiko
2022-05-24 4:01 ` Martin KaFai Lau
2022-05-18 22:55 ` [PATCH bpf-next v7 08/11] libbpf: add lsm_cgoup_sock type Stanislav Fomichev
2022-05-23 23:26 ` Andrii Nakryiko
2022-05-24 2:15 ` Stanislav Fomichev
2022-05-18 22:55 ` [PATCH bpf-next v7 09/11] bpftool: implement cgroup tree for BPF_LSM_CGROUP Stanislav Fomichev
2022-05-18 22:55 ` [PATCH bpf-next v7 10/11] selftests/bpf: lsm_cgroup functional test Stanislav Fomichev
2022-05-18 22:55 ` [PATCH bpf-next v7 11/11] selftests/bpf: verify lsm_cgroup struct sock access Stanislav Fomichev
2022-05-23 23:33 ` Andrii Nakryiko
2022-05-24 2:15 ` Stanislav Fomichev
2022-05-24 3:46 ` Andrii Nakryiko
2022-05-19 23:34 ` [PATCH bpf-next v7 00/11] bpf: cgroup_sock lsm flavor Yonghong Song
2022-05-19 23:39 ` Stanislav Fomichev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220518225531.558008-5-sdf@google.com \
--to=sdf@google.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox