linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
To: Ingo Molnar <mingo@kernel.org>
Cc: linux-arch@vger.kernel.org,
	Ananth N Mavinakayanahalli <ananth@in.ibm.com>,
	Sandeepa Prabhu <sandeepa.prabhu@linaro.org>,
	x86@kernel.org, lkml <linux-kernel@vger.kernel.org>,
	Steven Rostedt <rostedt@goodmis.org>,
	virtualization@lists.linux-foundation.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Ingo Molnar <mingo@redhat.com>,
	systemtap@sourceware.org, "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	"David S. Miller" <davem@davemloft.net>
Subject: [PATCH -tip v3 06/23] kprobes/x86: Allow probe on some kprobe preparation functions
Date: Wed, 20 Nov 2013 04:22:04 +0000	[thread overview]
Message-ID: <20131120042203.15296.7073.stgit@kbuild-fedora.novalocal> (raw)
In-Reply-To: <20131120042148.15296.88360.stgit@kbuild-fedora.novalocal>

There is no need to prohibit probing on the functions
used in preparation phase. Those are safely probed because
those are not invoked from breakpoint/fault/debug handlers,
there is no chance to cause recursive exceptions.

Following functions are now removed from the kprobes blacklist.
 can_boost
 can_probe
 can_optimize
 is_IF_modifier
 __copy_instruction
 copy_optimized_instructions
 arch_copy_kprobe
 arch_prepare_kprobe
 arch_arm_kprobe
 arch_disarm_kprobe
 arch_remove_kprobe
 arch_trampoline_kprobe
 arch_prepare_kprobe_ftrace
 arch_prepare_optimized_kprobe
 arch_check_optimized_kprobe
 arch_within_optimized_kprobe
 __arch_remove_optimized_kprobe
 arch_remove_optimized_kprobe
 arch_optimize_kprobes
 arch_unoptimize_kprobe

I tested the safety via kprobe-tracer as below;

 # cd /sys/kernel/debug/tracing
 # cat above-coverted-symbols-list | while read s; do
   echo "p $s"; done > kprobe_events
 (Note: some symbols are not found, those are inlined)
 # echo 1 > events/kprobes/enable
 # echo p:foo vfs_symlink >> kprobe_events
 # echo p:bar vfs_symlink+5 >> kprobe_events
 # echo p vfs_symlink+5 >> kprobe_events
 # echo 1 > events/kprobes/foo/enable
 # ln -sf /tmp/foo /tmp/bar
 # echo 0 > events/kprobes/foo/enable
 # echo -:foo >> kprobe_events
 # head -n 20 trace
 # echo 0 > events/kprobes/enable
 # echo > kprobe_events
 # echo > trace

Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
---
 arch/x86/kernel/kprobes/core.c   |   20 ++++++++++----------
 arch/x86/kernel/kprobes/ftrace.c |    2 +-
 arch/x86/kernel/kprobes/opt.c    |   24 ++++++++++++------------
 3 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 349112e..c2f7b1f 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -159,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
  * Returns non-zero if opcode is boostable.
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  */
-int __kprobes can_boost(kprobe_opcode_t *opcodes)
+int can_boost(kprobe_opcode_t *opcodes)
 {
 	kprobe_opcode_t opcode;
 	kprobe_opcode_t *orig_opcodes = opcodes;
@@ -260,7 +260,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
 }
 
 /* Check if paddr is at an instruction boundary */
-static int __kprobes can_probe(unsigned long paddr)
+static int can_probe(unsigned long paddr)
 {
 	unsigned long addr, __addr, offset = 0;
 	struct insn insn;
@@ -299,7 +299,7 @@ static int __kprobes can_probe(unsigned long paddr)
 /*
  * Returns non-zero if opcode modifies the interrupt flag.
  */
-static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
+static int is_IF_modifier(kprobe_opcode_t *insn)
 {
 	/* Skip prefixes */
 	insn = skip_prefixes(insn);
@@ -322,7 +322,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
  * If not, return null.
  * Only applicable to 64-bit x86.
  */
-int __kprobes __copy_instruction(u8 *dest, u8 *src)
+int __copy_instruction(u8 *dest, u8 *src)
 {
 	struct insn insn;
 	kprobe_opcode_t buf[MAX_INSN_SIZE];
@@ -365,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
 	return insn.length;
 }
 
-static int __kprobes arch_copy_kprobe(struct kprobe *p)
+static int arch_copy_kprobe(struct kprobe *p)
 {
 	int ret;
 
@@ -392,7 +392,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p)
 	return 0;
 }
 
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+int arch_prepare_kprobe(struct kprobe *p)
 {
 	if (alternatives_text_reserved(p->addr, p->addr))
 		return -EINVAL;
@@ -407,17 +407,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 	return arch_copy_kprobe(p);
 }
 
-void __kprobes arch_arm_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
 {
 	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
 }
 
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
+void arch_disarm_kprobe(struct kprobe *p)
 {
 	text_poke(p->addr, &p->opcode, 1);
 }
 
-void __kprobes arch_remove_kprobe(struct kprobe *p)
+void arch_remove_kprobe(struct kprobe *p)
 {
 	if (p->ainsn.insn) {
 		free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
@@ -1079,7 +1079,7 @@ int __init arch_init_kprobes(void)
 	return 0;
 }
 
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+int arch_trampoline_kprobe(struct kprobe *p)
 {
 	return 0;
 }
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 23ef5c5..dcaa131 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -85,7 +85,7 @@ end:
 	local_irq_restore(flags);
 }
 
-int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
 {
 	p->ainsn.insn = NULL;
 	p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 898160b..fba7fb0 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -77,7 +77,7 @@ found:
 }
 
 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
-static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
+static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
 {
 #ifdef CONFIG_X86_64
 	*addr++ = 0x48;
@@ -169,7 +169,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_
 	local_irq_restore(flags);
 }
 
-static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
+static int copy_optimized_instructions(u8 *dest, u8 *src)
 {
 	int len = 0, ret;
 
@@ -189,7 +189,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
 }
 
 /* Check whether insn is indirect jump */
-static int __kprobes insn_is_indirect_jump(struct insn *insn)
+static int insn_is_indirect_jump(struct insn *insn)
 {
 	return ((insn->opcode.bytes[0] == 0xff &&
 		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -224,7 +224,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
 }
 
 /* Decode whole function to ensure any instructions don't jump into target */
-static int __kprobes can_optimize(unsigned long paddr)
+static int can_optimize(unsigned long paddr)
 {
 	unsigned long addr, size = 0, offset = 0;
 	struct insn insn;
@@ -275,7 +275,7 @@ static int __kprobes can_optimize(unsigned long paddr)
 }
 
 /* Check optimized_kprobe can actually be optimized. */
-int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
+int arch_check_optimized_kprobe(struct optimized_kprobe *op)
 {
 	int i;
 	struct kprobe *p;
@@ -290,15 +290,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
 }
 
 /* Check the addr is within the optimized instructions. */
-int __kprobes
-arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
+int arch_within_optimized_kprobe(struct optimized_kprobe *op,
+				 unsigned long addr)
 {
 	return ((unsigned long)op->kp.addr <= addr &&
 		(unsigned long)op->kp.addr + op->optinsn.size > addr);
 }
 
 /* Free optimized instruction slot */
-static __kprobes
+static
 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
 {
 	if (op->optinsn.insn) {
@@ -308,7 +308,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
 	}
 }
 
-void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
+void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
 {
 	__arch_remove_optimized_kprobe(op, 1);
 }
@@ -318,7 +318,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
  * Target instructions MUST be relocatable (checked inside)
  * This is called when new aggr(opt)probe is allocated or reused.
  */
-int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
 {
 	u8 *buf;
 	int ret;
@@ -372,7 +372,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
  * Replace breakpoints (int3) with relative jumps.
  * Caller must call with locking kprobe_mutex and text_mutex.
  */
-void __kprobes arch_optimize_kprobes(struct list_head *oplist)
+void arch_optimize_kprobes(struct list_head *oplist)
 {
 	struct optimized_kprobe *op, *tmp;
 	u8 insn_buf[RELATIVEJUMP_SIZE];
@@ -398,7 +398,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
 }
 
 /* Replace a relative jump with a breakpoint (int3).  */
-void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
+void arch_unoptimize_kprobe(struct optimized_kprobe *op)
 {
 	u8 insn_buf[RELATIVEJUMP_SIZE];
 

  parent reply	other threads:[~2013-11-20  4:22 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-20  4:21 [PATCH -tip v3 00/23] kprobes: introduce NOKPROBE_SYMBOL() and general cleaning of kprobe blacklist Masami Hiramatsu
2013-11-20  4:21 ` Masami Hiramatsu
2013-11-20  4:21 ` [PATCH -tip v3 01/23] kprobes: Prohibit probing on .entry.text code Masami Hiramatsu
2013-11-20  4:21   ` Masami Hiramatsu
2013-11-20  4:21 ` [PATCH -tip v3 02/23] kprobes: Introduce NOKPROBE_SYMBOL() macro for blacklist Masami Hiramatsu
2013-11-20  4:21   ` Masami Hiramatsu
2013-11-27 13:32   ` Ingo Molnar
2013-11-27 13:32     ` Ingo Molnar
2013-11-28  7:56     ` Masami Hiramatsu
2013-11-20  4:21 ` [PATCH -tip v3 03/23] kprobes: Show blacklist entries via debugfs Masami Hiramatsu
2013-11-20  4:21 ` [PATCH -tip v3 04/23] kprobes: Support blacklist functions in module Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 05/23] kprobes: Use NOKPROBE_SYMBOL() in sample modules Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` Masami Hiramatsu [this message]
2013-11-20  4:22   ` [PATCH -tip v3 06/23] kprobes/x86: Allow probe on some kprobe preparation functions Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 07/23] kprobes/x86: Use NOKPROBE_SYMBOL instead of __kprobes Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 08/23] kprobes: Allow probe on some kprobe functions Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 09/23] kprobes: Use NOKPROBE_SYMBOL macro instead of __kprobes Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 10/23] ftrace/kprobes: Allow probing on some preparation functions Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 11/23] ftrace/kprobes: Use NOKPROBE_SYMBOL macro in ftrace Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 12/23] x86/hw_breakpoint: Use NOKPROBE_SYMBOL macro in hw_breakpoint Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 13/23] x86/trap: Use NOKPROBE_SYMBOL macro in trap.c Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-22 21:21   ` Andi Kleen
2013-11-23 12:11     ` Masami Hiramatsu
2013-11-27  1:38     ` Masami Hiramatsu
2013-11-27  1:38       ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 14/23] x86/fault: Use NOKPROBE_SYMBOL macro in fault.c Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 15/23] x86/alternative: Use NOKPROBE_SYMBOL macro in alternative.c Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 16/23] x86/nmi: Use NOKPROBE_SYMBOL macro for nmi handlers Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 17/23] x86/kvm: Use NOKPROBE_SYMBOL macro in kvm.c Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 18/23] x86/dumpstack: Use NOKPROBE_SYMBOL macro in dumpstack.c Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-21 11:30   ` Ingo Molnar
2013-11-21 11:30     ` Ingo Molnar
2013-11-22  2:08     ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 19/23] [BUGFIX] kprobes/x86: Prohibit probing on debug_stack_* Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 20/23] [BUGFIX] kprobes: Prohibit probing on func_ptr_is_kernel_text Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 21/23] notifier: Use NOKPROBE_SYMBOL macro in notifier Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 22/23] sched: Use NOKPROBE_SYMBOL macro in sched Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20  4:22 ` [PATCH -tip v3 23/23] kprobes/x86: Use kprobe_blacklist for .kprobes.text and .entry.text Masami Hiramatsu
2013-11-20  4:22   ` Masami Hiramatsu
2013-11-20 14:26 ` [PATCH -tip v3 00/23] kprobes: introduce NOKPROBE_SYMBOL() and general cleaning of kprobe blacklist Frank Ch. Eigler
2013-11-20 15:38   ` Ingo Molnar
2013-11-20 15:38     ` Ingo Molnar
2013-11-20 17:36     ` Frank Ch. Eigler
2013-11-20 17:56       ` Steven Rostedt
2013-11-20 18:09         ` Josh Stone
2013-11-21  2:14       ` Masami Hiramatsu
2013-11-21  2:14         ` Masami Hiramatsu
2013-11-21  7:29         ` Ingo Molnar
2013-11-22  2:35           ` Masami Hiramatsu
2013-11-22  2:35             ` Masami Hiramatsu
2013-11-22 11:46             ` Masami Hiramatsu
2013-11-27 13:30               ` Ingo Molnar
2013-11-27 13:30                 ` Ingo Molnar
2013-11-28 10:43                 ` Masami Hiramatsu
2013-11-30 13:46                   ` Ingo Molnar
2013-12-01  2:16                     ` Masami Hiramatsu
2013-12-01  2:16                       ` Masami Hiramatsu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20131120042203.15296.7073.stgit@kbuild-fedora.novalocal \
    --to=masami.hiramatsu.pt@hitachi.com \
    --cc=akpm@linux-foundation.org \
    --cc=ananth@in.ibm.com \
    --cc=davem@davemloft.net \
    --cc=hpa@zytor.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=mingo@redhat.com \
    --cc=rostedt@goodmis.org \
    --cc=sandeepa.prabhu@linaro.org \
    --cc=systemtap@sourceware.org \
    --cc=tglx@linutronix.de \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).