* [PATCH bpf-next] bpf: simplify bpf_prog_pack_[size|mask]
@ 2022-07-13 20:49 Song Liu
2022-07-13 23:17 ` sdf
2022-07-22 20:10 ` patchwork-bot+netdevbpf
0 siblings, 2 replies; 3+ messages in thread
From: Song Liu @ 2022-07-13 20:49 UTC (permalink / raw)
To: bpf; +Cc: daniel, kernel-team, ast, andrii, Song Liu
Simplify the logic that selects bpf_prog_pack_size, and always use
(PMD_SIZE * num_possible_nodes()). This is a good tradeoff, as most of the
performance benefit observed is from less direct map fragmentation [1].
Also, module_alloc(4MB) may not allocate 4MB aligned memory. Therefore, we
cannot use (ptr & bpf_prog_pack_mask) to find the correct address of
bpf_prog_pack. Fix this by checking the header address falls in the range
of pack->ptr and (pack->ptr + bpf_prog_pack_size).
[1] https://lore.kernel.org/bpf/20220707223546.4124919-1-song@kernel.org/
Signed-off-by: Song Liu <song@kernel.org>
---
kernel/bpf/core.c | 71 ++++++++++++-----------------------------------
1 file changed, 17 insertions(+), 54 deletions(-)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index cfb8a50a9f12..72d0721318e1 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -825,15 +825,6 @@ struct bpf_prog_pack {
#define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
-static size_t bpf_prog_pack_size = -1;
-static size_t bpf_prog_pack_mask = -1;
-
-static int bpf_prog_chunk_count(void)
-{
- WARN_ON_ONCE(bpf_prog_pack_size == -1);
- return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
-}
-
static DEFINE_MUTEX(pack_mutex);
static LIST_HEAD(pack_list);
@@ -841,55 +832,33 @@ static LIST_HEAD(pack_list);
* CONFIG_MMU=n. Use PAGE_SIZE in these cases.
*/
#ifdef PMD_SIZE
-#define BPF_HPAGE_SIZE PMD_SIZE
-#define BPF_HPAGE_MASK PMD_MASK
+#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
#else
-#define BPF_HPAGE_SIZE PAGE_SIZE
-#define BPF_HPAGE_MASK PAGE_MASK
+#define BPF_PROG_PACK_SIZE PAGE_SIZE
#endif
-static size_t select_bpf_prog_pack_size(void)
-{
- size_t size;
- void *ptr;
-
- size = BPF_HPAGE_SIZE * num_online_nodes();
- ptr = module_alloc(size);
-
- /* Test whether we can get huge pages. If not just use PAGE_SIZE
- * packs.
- */
- if (!ptr || !is_vm_area_hugepages(ptr)) {
- size = PAGE_SIZE;
- bpf_prog_pack_mask = PAGE_MASK;
- } else {
- bpf_prog_pack_mask = BPF_HPAGE_MASK;
- }
-
- vfree(ptr);
- return size;
-}
+#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_prog_pack *pack;
- pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())),
+ pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
GFP_KERNEL);
if (!pack)
return NULL;
- pack->ptr = module_alloc(bpf_prog_pack_size);
+ pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
if (!pack->ptr) {
kfree(pack);
return NULL;
}
- bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size);
- bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
+ bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
+ bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
list_add_tail(&pack->list, &pack_list);
set_vm_flush_reset_perms(pack->ptr);
- set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
- set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
+ set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
+ set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
return pack;
}
@@ -901,10 +870,7 @@ static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insn
void *ptr = NULL;
mutex_lock(&pack_mutex);
- if (bpf_prog_pack_size == -1)
- bpf_prog_pack_size = select_bpf_prog_pack_size();
-
- if (size > bpf_prog_pack_size) {
+ if (size > BPF_PROG_PACK_SIZE) {
size = round_up(size, PAGE_SIZE);
ptr = module_alloc(size);
if (ptr) {
@@ -916,9 +882,9 @@ static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insn
goto out;
}
list_for_each_entry(pack, &pack_list, list) {
- pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
+ pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
nbits, 0);
- if (pos < bpf_prog_chunk_count())
+ if (pos < BPF_PROG_CHUNK_COUNT)
goto found_free_area;
}
@@ -942,18 +908,15 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
struct bpf_prog_pack *pack = NULL, *tmp;
unsigned int nbits;
unsigned long pos;
- void *pack_ptr;
mutex_lock(&pack_mutex);
- if (hdr->size > bpf_prog_pack_size) {
+ if (hdr->size > BPF_PROG_PACK_SIZE) {
module_memfree(hdr);
goto out;
}
- pack_ptr = (void *)((unsigned long)hdr & bpf_prog_pack_mask);
-
list_for_each_entry(tmp, &pack_list, list) {
- if (tmp->ptr == pack_ptr) {
+ if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
pack = tmp;
break;
}
@@ -963,14 +926,14 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
goto out;
nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
- pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
+ pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
"bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
bitmap_clear(pack->bitmap, pos, nbits);
- if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
- bpf_prog_chunk_count(), 0) == 0) {
+ if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
+ BPF_PROG_CHUNK_COUNT, 0) == 0) {
list_del(&pack->list);
module_memfree(pack->ptr);
kfree(pack);
--
2.30.2
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH bpf-next] bpf: simplify bpf_prog_pack_[size|mask]
2022-07-13 20:49 [PATCH bpf-next] bpf: simplify bpf_prog_pack_[size|mask] Song Liu
@ 2022-07-13 23:17 ` sdf
2022-07-22 20:10 ` patchwork-bot+netdevbpf
1 sibling, 0 replies; 3+ messages in thread
From: sdf @ 2022-07-13 23:17 UTC (permalink / raw)
To: Song Liu; +Cc: bpf, daniel, kernel-team, ast, andrii
On 07/13, Song Liu wrote:
> Simplify the logic that selects bpf_prog_pack_size, and always use
> (PMD_SIZE * num_possible_nodes()). This is a good tradeoff, as most of the
> performance benefit observed is from less direct map fragmentation [1].
> Also, module_alloc(4MB) may not allocate 4MB aligned memory. Therefore, we
> cannot use (ptr & bpf_prog_pack_mask) to find the correct address of
> bpf_prog_pack. Fix this by checking the header address falls in the range
> of pack->ptr and (pack->ptr + bpf_prog_pack_size).
> [1] https://lore.kernel.org/bpf/20220707223546.4124919-1-song@kernel.org/
> Signed-off-by: Song Liu <song@kernel.org>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
(purely from comparing before/after, ignoring larger context)
> ---
> kernel/bpf/core.c | 71 ++++++++++++-----------------------------------
> 1 file changed, 17 insertions(+), 54 deletions(-)
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index cfb8a50a9f12..72d0721318e1 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -825,15 +825,6 @@ struct bpf_prog_pack {
> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size,
> BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
> -static size_t bpf_prog_pack_size = -1;
> -static size_t bpf_prog_pack_mask = -1;
> -
> -static int bpf_prog_chunk_count(void)
> -{
> - WARN_ON_ONCE(bpf_prog_pack_size == -1);
> - return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
> -}
> -
> static DEFINE_MUTEX(pack_mutex);
> static LIST_HEAD(pack_list);
> @@ -841,55 +832,33 @@ static LIST_HEAD(pack_list);
> * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
> */
> #ifdef PMD_SIZE
> -#define BPF_HPAGE_SIZE PMD_SIZE
> -#define BPF_HPAGE_MASK PMD_MASK
> +#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
> #else
> -#define BPF_HPAGE_SIZE PAGE_SIZE
> -#define BPF_HPAGE_MASK PAGE_MASK
> +#define BPF_PROG_PACK_SIZE PAGE_SIZE
> #endif
> -static size_t select_bpf_prog_pack_size(void)
> -{
> - size_t size;
> - void *ptr;
> -
> - size = BPF_HPAGE_SIZE * num_online_nodes();
> - ptr = module_alloc(size);
> -
> - /* Test whether we can get huge pages. If not just use PAGE_SIZE
> - * packs.
> - */
> - if (!ptr || !is_vm_area_hugepages(ptr)) {
> - size = PAGE_SIZE;
> - bpf_prog_pack_mask = PAGE_MASK;
> - } else {
> - bpf_prog_pack_mask = BPF_HPAGE_MASK;
> - }
> -
> - vfree(ptr);
> - return size;
> -}
> +#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
> static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t
> bpf_fill_ill_insns)
> {
> struct bpf_prog_pack *pack;
> - pack = kzalloc(struct_size(pack, bitmap,
> BITS_TO_LONGS(bpf_prog_chunk_count())),
> + pack = kzalloc(struct_size(pack, bitmap,
> BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
> GFP_KERNEL);
> if (!pack)
> return NULL;
> - pack->ptr = module_alloc(bpf_prog_pack_size);
> + pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
> if (!pack->ptr) {
> kfree(pack);
> return NULL;
> }
> - bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size);
> - bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
> + bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
> + bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
> list_add_tail(&pack->list, &pack_list);
> set_vm_flush_reset_perms(pack->ptr);
> - set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
> - set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
> + set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> + set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> return pack;
> }
> @@ -901,10 +870,7 @@ static void *bpf_prog_pack_alloc(u32 size,
> bpf_jit_fill_hole_t bpf_fill_ill_insn
> void *ptr = NULL;
> mutex_lock(&pack_mutex);
> - if (bpf_prog_pack_size == -1)
> - bpf_prog_pack_size = select_bpf_prog_pack_size();
> -
> - if (size > bpf_prog_pack_size) {
> + if (size > BPF_PROG_PACK_SIZE) {
> size = round_up(size, PAGE_SIZE);
> ptr = module_alloc(size);
> if (ptr) {
> @@ -916,9 +882,9 @@ static void *bpf_prog_pack_alloc(u32 size,
> bpf_jit_fill_hole_t bpf_fill_ill_insn
> goto out;
> }
> list_for_each_entry(pack, &pack_list, list) {
> - pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(),
> 0,
> + pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
> nbits, 0);
> - if (pos < bpf_prog_chunk_count())
> + if (pos < BPF_PROG_CHUNK_COUNT)
> goto found_free_area;
> }
> @@ -942,18 +908,15 @@ static void bpf_prog_pack_free(struct
> bpf_binary_header *hdr)
> struct bpf_prog_pack *pack = NULL, *tmp;
> unsigned int nbits;
> unsigned long pos;
> - void *pack_ptr;
> mutex_lock(&pack_mutex);
> - if (hdr->size > bpf_prog_pack_size) {
> + if (hdr->size > BPF_PROG_PACK_SIZE) {
> module_memfree(hdr);
> goto out;
> }
> - pack_ptr = (void *)((unsigned long)hdr & bpf_prog_pack_mask);
> -
> list_for_each_entry(tmp, &pack_list, list) {
> - if (tmp->ptr == pack_ptr) {
> + if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void
> *)hdr) {
> pack = tmp;
> break;
> }
> @@ -963,14 +926,14 @@ static void bpf_prog_pack_free(struct
> bpf_binary_header *hdr)
> goto out;
> nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
> - pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >>
> BPF_PROG_CHUNK_SHIFT;
> + pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >>
> BPF_PROG_CHUNK_SHIFT;
> WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
> "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
> bitmap_clear(pack->bitmap, pos, nbits);
> - if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
> - bpf_prog_chunk_count(), 0) == 0) {
> + if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
> + BPF_PROG_CHUNK_COUNT, 0) == 0) {
> list_del(&pack->list);
> module_memfree(pack->ptr);
> kfree(pack);
> --
> 2.30.2
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH bpf-next] bpf: simplify bpf_prog_pack_[size|mask]
2022-07-13 20:49 [PATCH bpf-next] bpf: simplify bpf_prog_pack_[size|mask] Song Liu
2022-07-13 23:17 ` sdf
@ 2022-07-22 20:10 ` patchwork-bot+netdevbpf
1 sibling, 0 replies; 3+ messages in thread
From: patchwork-bot+netdevbpf @ 2022-07-22 20:10 UTC (permalink / raw)
To: Song Liu; +Cc: bpf, daniel, kernel-team, ast, andrii
Hello:
This patch was applied to bpf/bpf-next.git (master)
by Daniel Borkmann <daniel@iogearbox.net>:
On Wed, 13 Jul 2022 13:49:50 -0700 you wrote:
> Simplify the logic that selects bpf_prog_pack_size, and always use
> (PMD_SIZE * num_possible_nodes()). This is a good tradeoff, as most of the
> performance benefit observed is from less direct map fragmentation [1].
>
> Also, module_alloc(4MB) may not allocate 4MB aligned memory. Therefore, we
> cannot use (ptr & bpf_prog_pack_mask) to find the correct address of
> bpf_prog_pack. Fix this by checking the header address falls in the range
> of pack->ptr and (pack->ptr + bpf_prog_pack_size).
>
> [...]
Here is the summary with links:
- [bpf-next] bpf: simplify bpf_prog_pack_[size|mask]
https://git.kernel.org/bpf/bpf-next/c/ea2babac63d4
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2022-07-22 20:10 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-07-13 20:49 [PATCH bpf-next] bpf: simplify bpf_prog_pack_[size|mask] Song Liu
2022-07-13 23:17 ` sdf
2022-07-22 20:10 ` patchwork-bot+netdevbpf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox