From: wangnan0@huawei.com (Wang Nan)
To: linux-arm-kernel@lists.infradead.org
Subject: [RFC PATCH v2 24/26] early kprobes: core logic to support early kprobe on ftrace.
Date: Thu, 12 Feb 2015 20:21:25 +0800 [thread overview]
Message-ID: <1423743685-13072-1-git-send-email-wangnan0@huawei.com> (raw)
In-Reply-To: <1423743476-11927-1-git-send-email-wangnan0@huawei.com>
Utilize previous introduced ftrace update notify chain to support early
kprobe on ftrace.
Signed-off-by: Wang Nan <wangnan0@huawei.com>
---
include/linux/kprobes.h | 1 +
kernel/kprobes.c | 213 ++++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 197 insertions(+), 17 deletions(-)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 92aafa7..1c211e8 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -131,6 +131,7 @@ struct kprobe {
*/
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
#define KPROBE_FLAG_EARLY 16 /* early kprobe */
+#define KPROBE_FLAG_RESTORED 32 /* temporarily restored to its original insn */
/* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 0bbb510..c9cd46f 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -48,6 +48,7 @@
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/jump_label.h>
+#include <linux/stop_machine.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
@@ -2540,11 +2541,127 @@ EXPORT_SYMBOL_GPL(jprobe_return);
void __weak arch_fix_ftrace_early_kprobe(struct optimized_kprobe *p)
{
}
+
+static int restore_optimized_kprobe(struct optimized_kprobe *op)
+{
+ /* If it already restored, pass it to other. */
+ if (op->kp.flags & KPROBE_FLAG_RESTORED)
+ return NOTIFY_DONE;
+
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+ arch_restore_optimized_kprobe(op);
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+
+ op->kp.flags |= KPROBE_FLAG_RESTORED;
+ return NOTIFY_STOP;
+}
+
+static int ftrace_notifier_call(struct notifier_block *nb,
+ unsigned long val, void *param)
+{
+ struct ftrace_update_notifier_info *info = param;
+ struct optimized_kprobe *op;
+ struct dyn_ftrace *rec;
+ struct kprobe *kp;
+ int enable;
+ void *addr;
+ int ret = NOTIFY_DONE;
+
+ if (!info || !info->rec || !info->rec->ip)
+ return NOTIFY_DONE;
+
+ rec = info->rec;
+ enable = info->enable;
+ addr = (void *)rec->ip;
+
+ mutex_lock(&kprobe_mutex);
+ kp = get_kprobe(addr);
+ mutex_unlock(&kprobe_mutex);
+
+ if (!kp || !kprobe_aggrprobe(kp))
+ return NOTIFY_DONE;
+
+ op = container_of(kp, struct optimized_kprobe, kp);
+ /*
+ * Ftrace is trying to convert ftrace entries to nop
+ * instruction. This conversion should have already been done
+ * at register_early_kprobe(). x86 needs fixing here.
+ */
+ if (!(rec->flags & FTRACE_FL_ENABLED) && (!enable)) {
+ arch_fix_ftrace_early_kprobe(op);
+ return NOTIFY_STOP;
+ }
+
+ /*
+ * Ftrace is trying to enable a trace entry. We temporary
+ * restore the probed instruction.
+ * We can continue using this kprobe as a ftrace-based kprobe,
+ * but event between this restoring and early kprobe conversion
+ * will get lost.
+ */
+ if (!(rec->flags & FTRACE_FL_ENABLED) && enable) {
+ ret = restore_optimized_kprobe(op);
+
+ /* Let ftrace retry if restore is successful. */
+ if (ret == NOTIFY_STOP)
+ info->retry = true;
+ return ret;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ftrace_notifier_block = {
+ .notifier_call = ftrace_notifier_call,
+};
+static bool ftrace_notifier_registred = false;
+
+static int enable_early_kprobe_on_ftrace(struct kprobe *p)
+{
+ int err;
+
+ if (!ftrace_notifier_registred) {
+ err = register_ftrace_update_notifier(&ftrace_notifier_block);
+ if (err) {
+ pr_err("Failed to register ftrace update notifier\n");
+ return err;
+ }
+ ftrace_notifier_registred = true;
+ }
+
+ err = ftrace_process_loc_early((unsigned long)p->addr);
+ if (err)
+ pr_err("Failed to process ftrace entry at %p\n", p->addr);
+ return err;
+}
+
+/* Caller must ensure kprobe_aggrprobe(kp). */
+static void convert_early_ftrace_kprobe_top(struct optimized_kprobe *op)
+{
+ restore_optimized_kprobe(op);
+ arm_kprobe_ftrace(&op->kp);
+}
+
+#else
+static inline int enable_early_kprobe_on_ftrace(struct kprobe *__unused)
+{ return 0; }
+
+/*
+ * If CONFIG_KPROBES_ON_FTRACE is off this function should never get called,
+ * so let it trigger a warning.
+ */
+static inline void convert_early_ftrace_kprobe_top(struct optimized_kprobe *__unused)
+{
+ WARN_ON(1);
+}
#endif
static int register_early_kprobe(struct kprobe *p)
{
struct early_kprobe_slot *slot;
+ struct module *probed_mod;
int err;
if (p->break_handler || p->post_handler)
@@ -2552,13 +2669,25 @@ static int register_early_kprobe(struct kprobe *p)
if (p->flags & KPROBE_FLAG_DISABLED)
return -EINVAL;
+ err = check_kprobe_address_safe(p, &probed_mod);
+ if (err)
+ return err;
+
+ BUG_ON(probed_mod);
+
+ if (kprobe_ftrace(p)) {
+ err = enable_early_kprobe_on_ftrace(p);
+ if (err)
+ return err;
+ }
+
slot = ek_alloc_early_kprobe();
if (!slot) {
pr_err("No enough early kprobe slots.\n");
return -ENOMEM;
}
- p->flags &= KPROBE_FLAG_DISABLED;
+ p->flags &= KPROBE_FLAG_DISABLED | KPROBE_FLAG_FTRACE;
p->flags |= KPROBE_FLAG_EARLY;
p->nmissed = 0;
@@ -2599,43 +2728,93 @@ free_slot:
}
static void
-convert_early_kprobe(struct kprobe *kp)
+convert_early_kprobe_top(struct kprobe *kp)
{
struct module *probed_mod;
+ struct optimized_kprobe *op;
int err;
BUG_ON(!kprobe_aggrprobe(kp));
+ op = container_of(kp, struct optimized_kprobe, kp);
err = check_kprobe_address_safe(kp, &probed_mod);
if (err)
panic("Insert kprobe at %p is not safe!", kp->addr);
+ BUG_ON(probed_mod);
- /*
- * FIXME:
- * convert kprobe to ftrace if CONFIG_KPROBES_ON_FTRACE is on
- * and kp is on ftrace location.
- */
+ if (kprobe_ftrace(kp))
+ convert_early_ftrace_kprobe_top(op);
+}
- mutex_lock(&kprobe_mutex);
- hlist_del_rcu(&kp->hlist);
+static void
+convert_early_kprobes_top(void)
+{
+ struct kprobe *p;
+
+ hlist_for_each_entry(p, &early_kprobe_hlist, hlist)
+ convert_early_kprobe_top(p);
+}
+
+static LIST_HEAD(early_freeing_list);
+
+static void
+convert_early_kprobe_stop_machine(struct kprobe *kp)
+{
+ struct optimized_kprobe *op;
+
+ BUG_ON(!kprobe_aggrprobe(kp));
+ op = container_of(kp, struct optimized_kprobe, kp);
+
+ if ((kprobe_ftrace(kp)) && (list_is_singular(&op->kp.list))) {
+ /* Update kp */
+ kp = list_entry(op->kp.list.next, struct kprobe, list);
+
+ hlist_replace_rcu(&op->kp.hlist, &kp->hlist);
+ list_del_init(&kp->list);
+
+ op->kp.flags |= KPROBE_FLAG_DISABLED;
+ list_add(&op->list, &early_freeing_list);
+ }
+ hlist_del_rcu(&kp->hlist);
INIT_HLIST_NODE(&kp->hlist);
hlist_add_head_rcu(&kp->hlist,
- &kprobe_table[hash_ptr(kp->addr, KPROBE_HASH_BITS)]);
- mutex_unlock(&kprobe_mutex);
-
- if (probed_mod)
- module_put(probed_mod);
+ &kprobe_table[hash_ptr(kp->addr, KPROBE_HASH_BITS)]);
}
-static void
-convert_early_kprobes(void)
+static int
+convert_early_kprobes_stop_machine(void *__unused)
{
struct kprobe *p;
struct hlist_node *tmp;
hlist_for_each_entry_safe(p, tmp, &early_kprobe_hlist, hlist)
- convert_early_kprobe(p);
+ convert_early_kprobe_stop_machine(p);
+ return 0;
+}
+
+static void
+convert_early_kprobes(void)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ mutex_lock(&kprobe_mutex);
+
+ convert_early_kprobes_top();
+
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+
+ stop_machine(convert_early_kprobes_stop_machine, NULL, NULL);
+
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+ mutex_unlock(&kprobe_mutex);
+
+ list_for_each_entry_safe(op, tmp, &early_freeing_list, list) {
+ list_del_init(&op->list);
+ free_aggr_kprobe(&op->kp);
+ }
};
#else
static int register_early_kprobe(struct kprobe *p) { return -ENOSYS; }
--
1.8.4
next prev parent reply other threads:[~2015-02-12 12:21 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-02-12 12:17 [RFC PATCH v2 00/26] Early kprobe: enable kprobes at very early booting stage Wang Nan
2015-02-12 12:19 ` [RFC PATCH v2 01/26] kprobes: set kprobes_all_disarmed earlier to enable re-optimization Wang Nan
2015-02-12 12:19 ` [RFC PATCH v2 02/26] kprobes: makes kprobes/enabled works correctly for optimized kprobes Wang Nan
2015-02-12 12:19 ` [RFC PATCH v2 03/26] kprobes: x86: mark 2 bytes NOP as boostable Wang Nan
2015-02-12 12:19 ` [RFC PATCH v2 04/26] ftrace: don't update record flags if code modification fail Wang Nan
2015-02-12 12:19 ` [RFC PATCH v2 05/26] ftrace/x86: Ensure rec->flags no change when failure occures Wang Nan
2015-02-12 12:19 ` [RFC PATCH v2 06/26] ftrace: sort ftrace entries earlier Wang Nan
2015-02-12 17:35 ` Steven Rostedt
2015-02-12 12:19 ` [RFC PATCH v2 07/26] ftrace: allow search ftrace addr before ftrace fully inited Wang Nan
2015-02-12 17:38 ` Steven Rostedt
2015-02-12 12:19 ` [RFC PATCH v2 08/26] ftrace: enable other subsystems make ftrace nop before ftrace_init() Wang Nan
2015-02-12 17:39 ` Steven Rostedt
2015-02-13 1:29 ` Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 10/26] ftrace: x86: try to fix ftrace when ftrace_replace_code Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 11/26] early kprobes: introduce kprobe_is_early for futher early kprobe use Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 12/26] early kprobes: Add an KPROBE_FLAG_EARLY for early kprobe Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 13/26] early kprobes: ARM: directly modify code Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 14/26] early kprobes: ARM: introduce early kprobes related code area Wang Nan
2015-02-13 17:32 ` Russell King - ARM Linux
2015-02-15 8:26 ` [RFC PATCH 0/3] early kprobes: rearrange vmlinux.lds related code Wang Nan
2015-02-15 8:27 ` [RFC PATCH 1/3] early kprobes: ARM: add definition for vmlinux.lds use Wang Nan
2015-02-15 8:27 ` [RFC PATCH 2/3] early kprobes: x86: " Wang Nan
2015-02-15 8:27 ` [RFC PATCH 3/3] early kprobes: introduce early kprobes related code area Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 15/26] early kprobes: x86: directly modify code Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 16/26] early kprobes: x86: introduce early kprobes related code area Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 17/26] early kprobes: introduces macros for allocing early kprobe resources Wang Nan
2015-02-12 12:20 ` [RFC PATCH v2 18/26] early kprobes: allows __alloc_insn_slot() from early kprobes slots Wang Nan
2015-02-12 12:21 ` [RFC PATCH v2 19/26] early kprobes: perhibit probing at early kprobe reserved area Wang Nan
2015-02-12 12:21 ` [RFC PATCH v2 20/26] early kprobes: core logic of eraly kprobes Wang Nan
2015-02-12 12:21 ` [RFC PATCH v2 21/26] early kprobes: add CONFIG_EARLY_KPROBES option Wang Nan
2015-02-12 12:21 ` [RFC PATCH v2 22/26] early kprobes: introduce arch_fix_ftrace_early_kprobe() Wang Nan
2015-02-12 12:21 ` [RFC PATCH v2 23/26] early kprobes: x86: arch_restore_optimized_kprobe() Wang Nan
2015-02-12 12:21 ` Wang Nan [this message]
2015-02-12 12:21 ` [RFC PATCH v2 25/26] early kprobes: introduce kconfig option to support early kprobe on ftrace Wang Nan
2015-02-12 12:21 ` [RFC PATCH v2 26/26] kprobes: enable 'ekprobe=' cmdline option for early kprobes Wang Nan
2015-02-12 12:21 ` [RFC PATCH v2 09/26] ftrace: callchain and ftrace_bug_tryfix Wang Nan
2015-02-13 5:38 ` [RFC PATCH v3 00/26] Early kprobe: enable kprobes at very early booting stage Wang Nan
2015-02-13 17:15 ` Steven Rostedt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1423743685-13072-1-git-send-email-wangnan0@huawei.com \
--to=wangnan0@huawei.com \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).