From: Menglong Dong <menglong8.dong@gmail.com>
To: peterz@infradead.org, rostedt@goodmis.org, mark.rutland@arm.com,
alexei.starovoitov@gmail.com
Cc: catalin.marinas@arm.com, will@kernel.org, mhiramat@kernel.org,
tglx@linutronix.de, mingo@redhat.com, bp@alien8.de,
dave.hansen@linux.intel.com, x86@kernel.org, hpa@zytor.com,
ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
martin.lau@linux.dev, eddyz87@gmail.com, yonghong.song@linux.dev,
john.fastabend@gmail.com, kpsingh@kernel.org, sdf@fomichev.me,
jolsa@kernel.org, davem@davemloft.net, dsahern@kernel.org,
mathieu.desnoyers@efficios.com, nathan@kernel.org,
nick.desaulniers+lkml@gmail.com, morbo@google.com,
samitolvanen@google.com, kees@kernel.org,
dongml2@chinatelecom.cn, akpm@linux-foundation.org,
riel@surriel.com, rppt@kernel.org,
linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, linux-trace-kernel@vger.kernel.org,
bpf@vger.kernel.org, netdev@vger.kernel.org,
llvm@lists.linux.dev
Subject: [PATCH bpf-next v3 4/4] arm64: implement per-function metadata storage for arm64
Date: Mon, 3 Mar 2025 14:53:45 +0800 [thread overview]
Message-ID: <20250303065345.229298-5-dongml2@chinatelecom.cn> (raw)
In-Reply-To: <20250303065345.229298-1-dongml2@chinatelecom.cn>
The per-function metadata storage is already used by ftrace if
CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS is enabled, and it store the pointer
of the callback directly to the function padding, which consume 8-bytes,
in the commit
baaf553d3bc3 ("arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS").
So we can directly store the index to the function padding too, without
a prepending. With CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS enabled, the
function is 8-bytes aligned, and we will compile the kernel with extra
8-bytes (2 NOPS) padding space. Otherwise, the function is 4-bytes
aligned, and only extra 4-bytes (1 NOPS) is needed.
However, we have the same problem with Mark in the commit above: we can't
use the function padding together with CFI_CLANG, which can make the clang
compiles a wrong offset to the pre-function type hash. He said that he was
working with others on this problem 2 years ago. Hi Mark, is there any
progress on this problem?
Signed-off-by: Menglong Dong <dongml2@chinatelecom.cn>
---
arch/arm64/Kconfig | 15 +++++++++++++++
arch/arm64/Makefile | 23 ++++++++++++++++++++--
arch/arm64/include/asm/ftrace.h | 34 +++++++++++++++++++++++++++++++++
arch/arm64/kernel/ftrace.c | 13 +++++++++++--
4 files changed, 81 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 940343beb3d4..7ed80f5eb267 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1536,6 +1536,21 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
+config FUNCTION_METADATA
+ bool "Per-function metadata storage support"
+ default y
+ select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE if !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
+ depends on !CFI_CLANG
+ help
+ Support per-function metadata storage for kernel functions, and
+ get the metadata of the function by its address with almost no
+ overhead.
+
+ The index of the metadata will be stored in the function padding,
+ which will consume 4-bytes. If FUNCTION_ALIGNMENT_8B is enabled,
+ extra 8-bytes function padding will be reserved during compiling.
+ Otherwise, only extra 4-bytes function padding is needed.
+
source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2b25d671365f..2df2b0f4dd90 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -144,12 +144,31 @@ endif
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_FUNCTION_METADATA),y)
+ ifeq ($(CONFIG_FUNCTION_ALIGNMENT_8B),y)
+ __padding_nops := 2
+ else
+ __padding_nops := 1
+ endif
+else
+ __padding_nops := 0
+endif
+
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y)
+ __padding_nops := $(shell echo $(__padding_nops) + 2 | bc)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
- CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=$(shell echo $(__padding_nops) + 2 | bc),$(__padding_nops)
else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=$(shell echo $(__padding_nops) + 2 | bc),$(__padding_nops)
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
- CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+else ifeq ($(CONFIG_FUNCTION_METADATA),y)
+ CC_FLAGS_FTRACE += -fpatchable-function-entry=$(__padding_nops),$(__padding_nops)
+ ifneq ($(CONFIG_FUNCTION_TRACER),y)
+ KBUILD_CFLAGS += $(CC_FLAGS_FTRACE)
+ # some file need to remove this cflag when CONFIG_FUNCTION_TRACER
+ # is not enabled, so we need to export it here
+ export CC_FLAGS_FTRACE
+ endif
endif
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index bfe3ce9df197..aa3eaa91bf82 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -24,6 +24,16 @@
#define FTRACE_PLT_IDX 0
#define NR_FTRACE_PLTS 1
+#ifdef CONFIG_FUNCTION_METADATA
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+#define KFUNC_MD_DATA_OFFSET (AARCH64_INSN_SIZE * 3)
+#else
+#define KFUNC_MD_DATA_OFFSET AARCH64_INSN_SIZE
+#endif
+#define KFUNC_MD_INSN_SIZE AARCH64_INSN_SIZE
+#define KFUNC_MD_INSN_OFFSET KFUNC_MD_DATA_OFFSET
+#endif
+
/*
* Currently, gcc tends to save the link register after the local variables
* on the stack. This causes the max stack tracer to report the function
@@ -216,6 +226,30 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
*/
return !strcmp(sym + 8, name);
}
+
+#ifdef CONFIG_FUNCTION_METADATA
+#include <asm/text-patching.h>
+
+static inline bool kfunc_md_arch_exist(void *ip)
+{
+ return !aarch64_insn_is_nop(*(u32 *)(ip - KFUNC_MD_INSN_OFFSET));
+}
+
+static inline void kfunc_md_arch_pretend(u8 *insn, u32 index)
+{
+ *(u32 *)insn = index;
+}
+
+static inline void kfunc_md_arch_nops(u8 *insn)
+{
+ *(u32 *)insn = aarch64_insn_gen_nop();
+}
+
+static inline int kfunc_md_arch_poke(void *ip, u8 *insn)
+{
+ return aarch64_insn_patch_text_nosync(ip, *(u32 *)insn);
+}
+#endif
#endif /* ifndef __ASSEMBLY__ */
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index d7c0d023dfe5..4191ff0037f5 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -88,8 +88,10 @@ unsigned long ftrace_call_adjust(unsigned long addr)
* to `BL <caller>`, which is at `addr + 4` bytes in either case.
*
*/
- if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
- return addr + AARCH64_INSN_SIZE;
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) {
+ addr += AARCH64_INSN_SIZE;
+ goto out;
+ }
/*
* When using patchable-function-entry with pre-function NOPs, addr is
@@ -139,6 +141,13 @@ unsigned long ftrace_call_adjust(unsigned long addr)
/* Skip the first NOP after function entry */
addr += AARCH64_INSN_SIZE;
+out:
+ if (IS_ENABLED(CONFIG_FUNCTION_METADATA)) {
+ if (IS_ENABLED(CONFIG_FUNCTION_ALIGNMENT_8B))
+ addr += 2 * AARCH64_INSN_SIZE;
+ else
+ addr += AARCH64_INSN_SIZE;
+ }
return addr;
}
--
2.39.5
prev parent reply other threads:[~2025-03-03 7:03 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-03 6:53 [PATCH bpf-next v3 0/4] per-function storage support Menglong Dong
2025-03-03 6:53 ` [PATCH bpf-next v3 1/4] x86/ibt: factor out cfi and fineibt offset Menglong Dong
2025-03-03 9:18 ` Peter Zijlstra
2025-03-03 10:51 ` Menglong Dong
2025-03-03 11:24 ` Peter Zijlstra
2025-03-03 6:53 ` [PATCH bpf-next v3 2/4] add per-function metadata storage support Menglong Dong
2025-03-03 6:53 ` [PATCH bpf-next v3 3/4] x86: implement per-function metadata storage for x86 Menglong Dong
2025-03-03 16:05 ` Steven Rostedt
2025-03-04 2:07 ` Menglong Dong
2025-03-04 3:06 ` Menglong Dong
2025-03-03 6:53 ` Menglong Dong [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250303065345.229298-5-dongml2@chinatelecom.cn \
--to=menglong8.dong@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=alexei.starovoitov@gmail.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bp@alien8.de \
--cc=bpf@vger.kernel.org \
--cc=catalin.marinas@arm.com \
--cc=daniel@iogearbox.net \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=dongml2@chinatelecom.cn \
--cc=dsahern@kernel.org \
--cc=eddyz87@gmail.com \
--cc=hpa@zytor.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kees@kernel.org \
--cc=kpsingh@kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-trace-kernel@vger.kernel.org \
--cc=llvm@lists.linux.dev \
--cc=mark.rutland@arm.com \
--cc=martin.lau@linux.dev \
--cc=mathieu.desnoyers@efficios.com \
--cc=mhiramat@kernel.org \
--cc=mingo@redhat.com \
--cc=morbo@google.com \
--cc=nathan@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nick.desaulniers+lkml@gmail.com \
--cc=peterz@infradead.org \
--cc=riel@surriel.com \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=samitolvanen@google.com \
--cc=sdf@fomichev.me \
--cc=tglx@linutronix.de \
--cc=will@kernel.org \
--cc=x86@kernel.org \
--cc=yonghong.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox