From: wangnan0@huawei.com (Wang Nan)
To: linux-arm-kernel@lists.infradead.org
Subject: [RFC PATCH 09/11] kprobes: core logic of eraly kprobes.
Date: Wed, 7 Jan 2015 15:36:00 +0800 [thread overview]
Message-ID: <1420616160-43110-1-git-send-email-wangnan0@huawei.com> (raw)
In-Reply-To: <1420616086-42692-1-git-send-email-wangnan0@huawei.com>
This patch is the main logic of early kprobe.
If register_kprobe() is called before kprobes_initialized, an early
kprobe is allocated. Try to utilize existing OPTPROBE mechanism to
replace the target instruction by a branch instead of breakpoint,
because interrupt handlers may not been initialized yet.
All resources required by early kprobes are allocated statically.
CONFIG_NR_EARLY_KPROBES_SLOTS is used to control number of possible
early kprobes.
Signed-off-by: Wang Nan <wangnan0@huawei.com>
---
include/linux/kprobes.h | 4 ++
kernel/kprobes.c | 151 ++++++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 149 insertions(+), 6 deletions(-)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 27a27ed..a54947d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -434,6 +434,10 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
size_t *length, loff_t *ppos);
#endif
+struct early_kprobe_slot {
+ struct optimized_kprobe op;
+};
+
#endif /* CONFIG_OPTPROBES */
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1882bfa..9c3ea9b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -71,6 +71,10 @@ int kprobes_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
+#ifdef CONFIG_EARLY_KPROBES
+static HLIST_HEAD(early_kprobe_hlist);
+#endif
+
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_all_disarmed;
@@ -320,7 +324,12 @@ struct kprobe *get_kprobe(void *addr)
struct hlist_head *head;
struct kprobe *p;
- head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
+#ifdef CONFIG_EARLY_KPROBES
+ if (unlikely(!kprobes_initialized))
+ head = &early_kprobe_hlist;
+ else
+#endif
+ head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
hlist_for_each_entry_rcu(p, head, hlist) {
if (p->addr == addr)
return p;
@@ -377,14 +386,18 @@ void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
NOKPROBE_SYMBOL(opt_pre_handler);
/* Free optimized instructions and optimized_kprobe */
+static int ek_free_early_kprobe(struct early_kprobe_slot *slot);
static void free_aggr_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
+ struct early_kprobe_slot *ep;
op = container_of(p, struct optimized_kprobe, kp);
arch_remove_optimized_kprobe(op);
arch_remove_kprobe(p);
- kfree(op);
+ ep = container_of(op, struct early_kprobe_slot, op);
+ if (likely(!ek_free_early_kprobe(ep)))
+ kfree(op);
}
/* Return true(!0) if the kprobe is ready for optimization. */
@@ -601,9 +614,15 @@ static void optimize_kprobe(struct kprobe *p)
struct optimized_kprobe *op;
/* Check if the kprobe is disabled or not ready for optimization. */
- if (!kprobe_optready(p) || !kprobes_allow_optimization ||
- (kprobe_disabled(p) || kprobes_all_disarmed))
- return;
+ if (unlikely(!kprobes_initialized)) {
+ BUG_ON(!(p->flags & KPROBE_FLAG_EARLY));
+ if (!kprobe_optready(p) || kprobe_disabled(p))
+ return;
+ } else {
+ if (!kprobe_optready(p) || !kprobes_allow_optimization ||
+ (kprobe_disabled(p) || kprobes_all_disarmed))
+ return;
+ }
/* Both of break_handler and post_handler are not supported. */
if (p->break_handler || p->post_handler)
@@ -625,7 +644,10 @@ static void optimize_kprobe(struct kprobe *p)
list_del_init(&op->list);
else {
list_add(&op->list, &optimizing_list);
- kick_kprobe_optimizer();
+ if (unlikely(!kprobes_initialized))
+ arch_optimize_kprobes(&optimizing_list);
+ else
+ kick_kprobe_optimizer();
}
}
@@ -1491,6 +1513,8 @@ out:
return ret;
}
+static int register_early_kprobe(struct kprobe *p);
+
int register_kprobe(struct kprobe *p)
{
int ret;
@@ -1504,6 +1528,14 @@ int register_kprobe(struct kprobe *p)
return PTR_ERR(addr);
p->addr = addr;
+ if (unlikely(!kprobes_initialized)) {
+ p->flags |= KPROBE_FLAG_EARLY;
+ return register_early_kprobe(p);
+ }
+
+ WARN(p->flags & KPROBE_FLAG_EARLY,
+ "register early kprobe after kprobes initialized\n");
+
ret = check_kprobe_rereg(p);
if (ret)
return ret;
@@ -2136,6 +2168,8 @@ static struct notifier_block kprobe_module_nb = {
extern unsigned long __start_kprobe_blacklist[];
extern unsigned long __stop_kprobe_blacklist[];
+static void convert_early_kprobes(void);
+
static int __init init_kprobes(void)
{
int i, err = 0;
@@ -2184,6 +2218,7 @@ static int __init init_kprobes(void)
if (!err)
err = register_module_notifier(&kprobe_module_nb);
+ convert_early_kprobes();
kprobes_initialized = (err == 0);
if (!err)
@@ -2477,3 +2512,107 @@ module_init(init_kprobes);
/* defined in arch/.../kernel/kprobes.c */
EXPORT_SYMBOL_GPL(jprobe_return);
+
+#ifdef CONFIG_EARLY_KPROBES
+DEFINE_EKPROBE_ALLOC_OPS(struct early_kprobe_slot, early_kprobe, static);
+
+static int register_early_kprobe(struct kprobe *p)
+{
+ struct early_kprobe_slot *slot;
+ int err;
+
+ if (p->break_handler || p->post_handler)
+ return -EINVAL;
+ if (p->flags & KPROBE_FLAG_DISABLED)
+ return -EINVAL;
+
+ slot = ek_alloc_early_kprobe();
+ if (!slot) {
+ pr_err("No enough early kprobe slots.\n");
+ return -ENOMEM;
+ }
+
+ p->flags &= KPROBE_FLAG_DISABLED;
+ p->flags |= KPROBE_FLAG_EARLY;
+ p->nmissed = 0;
+
+ err = arch_prepare_kprobe(p);
+ if (err) {
+ pr_err("arch_prepare_kprobe failed\n");
+ goto free_slot;
+ }
+
+ INIT_LIST_HEAD(&p->list);
+ INIT_HLIST_NODE(&p->hlist);
+ INIT_LIST_HEAD(&slot->op.list);
+ slot->op.kp.addr = p->addr;
+ slot->op.kp.flags = p->flags | KPROBE_FLAG_EARLY;
+
+ err = arch_prepare_optimized_kprobe(&slot->op, p);
+ if (err) {
+ pr_err("Failed to prepare optimized kprobe.\n");
+ goto remove_optimized;
+ }
+
+ if (!arch_prepared_optinsn(&slot->op.optinsn)) {
+ pr_err("Failed to prepare optinsn.\n");
+ err = -ENOMEM;
+ goto remove_optimized;
+ }
+
+ hlist_add_head_rcu(&p->hlist, &early_kprobe_hlist);
+ init_aggr_kprobe(&slot->op.kp, p);
+ optimize_kprobe(&slot->op.kp);
+ return 0;
+
+remove_optimized:
+ arch_remove_optimized_kprobe(&slot->op);
+free_slot:
+ ek_free_early_kprobe(slot);
+ return err;
+}
+
+static void
+convert_early_kprobe(struct kprobe *kp)
+{
+ struct module *probed_mod;
+ int err;
+
+ BUG_ON(!kprobe_aggrprobe(kp));
+
+ err = check_kprobe_address_safe(kp, &probed_mod);
+ if (err)
+ panic("Insert kprobe at %p is not safe!", kp->addr);
+
+ /*
+ * FIXME:
+ * convert kprobe to ftrace if CONFIG_KPROBES_ON_FTRACE is on
+ * and kp is on ftrace location.
+ */
+
+ mutex_lock(&kprobe_mutex);
+ hlist_del_rcu(&kp->hlist);
+
+ INIT_HLIST_NODE(&kp->hlist);
+ hlist_add_head_rcu(&kp->hlist,
+ &kprobe_table[hash_ptr(kp->addr, KPROBE_HASH_BITS)]);
+ mutex_unlock(&kprobe_mutex);
+
+ if (probed_mod)
+ module_put(probed_mod);
+}
+
+static void
+convert_early_kprobes(void)
+{
+ struct kprobe *p;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(p, tmp, &early_kprobe_hlist, hlist)
+ convert_early_kprobe(p);
+};
+#else
+static int register_early_kprobe(struct kprobe *p) { return -ENOSYS; }
+static int ek_free_early_kprobe(struct early_kprobe_slot *slot) { return 0; }
+static void convert_early_kprobes(void) {};
+#endif
--
1.8.4
next prev parent reply other threads:[~2015-01-07 7:36 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-01-07 7:34 [RFC PATCH 00/11] Early kprobe: enable kprobes at very early Wang Nan
2015-01-07 7:35 ` [RFC PATCH 01/11] ARM: kprobes: directly modify code if kprobe is not initialized Wang Nan
2015-01-07 17:15 ` Christopher Covington
2015-01-13 15:34 ` Masami Hiramatsu
2015-01-07 7:35 ` [RFC PATCH 02/11] ARM: kprobes: introduce early kprobes related code area Wang Nan
2015-01-07 7:35 ` [RFC PATCH 03/11] x86: kprobes: directly modify code if kprobe is not initialized Wang Nan
2015-01-07 7:35 ` [RFC PATCH 04/11] x86: kprobes: introduce early kprobes related code area Wang Nan
2015-01-07 7:35 ` [RFC PATCH 05/11] kprobes: Add an KPROBE_FLAG_EARLY for early kprobe Wang Nan
2015-01-07 7:35 ` [RFC PATCH 06/11] kprobes: makes kprobes_initialized globally visable Wang Nan
2015-01-07 7:35 ` [RFC PATCH 07/11] kprobes: introduces macros for allocing early kprobe resources Wang Nan
2015-01-07 10:45 ` Wang Nan
2015-01-07 7:35 ` [RFC PATCH 08/11] kprobes: allows __alloc_insn_slot() from early kprobes slots Wang Nan
2015-01-07 7:36 ` Wang Nan [this message]
2015-01-07 7:36 ` [RFC PATCH 10/11] kprobes: enable 'ekprobe=' cmdline option for early kprobes Wang Nan
2015-01-07 7:36 ` [RFC PATCH 11/11] kprobes: add CONFIG_EARLY_KPROBES option Wang Nan
2015-01-07 7:42 ` Wang Nan
2015-01-13 15:58 ` [RFC PATCH 00/11] Early kprobe: enable kprobes at very early Masami Hiramatsu
2015-02-06 10:30 ` [RFC PATCH] x86: kprobes: enable optmize relative call insn Wang Nan
2015-02-07 10:08 ` Masami Hiramatsu
2015-02-07 10:42 ` Wang Nan
2015-01-16 17:48 ` [RFC PATCH 00/11] Early kprobe: enable kprobes at very early Steven Rostedt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1420616160-43110-1-git-send-email-wangnan0@huawei.com \
--to=wangnan0@huawei.com \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).