From: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
To: Chen Gang <gang.chen.5i5j@gmail.com>
Cc: ananth@in.ibm.com, anil.s.keshavamurthy@intel.com,
"Håvard Skinnemoen" <hskinnemoen@gmail.com>,
"David Miller" <davem@davemloft.net>,
"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
"Hans-Christian Egtvedt" <egtvedt@samfundet.no>,
"yrl.pp-manager.tt@hitachi.com" <yrl.pp-manager.tt@hitachi.com>,
"Ingo Molnar" <mingo@elte.hu>
Subject: Re: [PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area
Date: Wed, 05 Feb 2014 14:00:41 +0900 [thread overview]
Message-ID: <52F1C579.1080907@hitachi.com> (raw)
In-Reply-To: <52F1B1CE.2040204@gmail.com>
(2014/02/05 12:36), Chen Gang wrote:
> When CONFIG_KRETPROBES disabled, kretprobe implementation are useless,
> so need move them to CONFIG_KPROBES area.
>
> - move all kretprobe* to CONFIG_KPROBES area and dummy outside.
> - define kretprobe_flush_task() to let kprobe_flush_task() call.
> - define init_kretprobes() to let init_kprobes() call.
>
>
Looks good to me ;)
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
> ---
> kernel/kprobes.c | 323 +++++++++++++++++++++++++++++++------------------------
> 1 file changed, 181 insertions(+), 142 deletions(-)
>
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index ceeadfc..0619536 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -69,7 +69,6 @@
>
> static int kprobes_initialized;
> static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
> -static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
>
> /* NOTE: change this value only with kprobe_mutex held */
> static bool kprobes_all_disarmed;
> @@ -77,14 +76,6 @@ static bool kprobes_all_disarmed;
> /* This protects kprobe_table and optimizing_list */
> static DEFINE_MUTEX(kprobe_mutex);
> static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
> -static struct {
> - raw_spinlock_t lock ____cacheline_aligned_in_smp;
> -} kretprobe_table_locks[KPROBE_TABLE_SIZE];
> -
> -static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
> -{
> - return &(kretprobe_table_locks[hash].lock);
> -}
>
> /*
> * Normally, functions that we'd want to prohibit kprobes in, are marked
> @@ -1079,125 +1070,6 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
> return;
> }
>
> -void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
> - struct hlist_head *head)
> -{
> - struct kretprobe *rp = ri->rp;
> -
> - /* remove rp inst off the rprobe_inst_table */
> - hlist_del(&ri->hlist);
> - INIT_HLIST_NODE(&ri->hlist);
> - if (likely(rp)) {
> - raw_spin_lock(&rp->lock);
> - hlist_add_head(&ri->hlist, &rp->free_instances);
> - raw_spin_unlock(&rp->lock);
> - } else
> - /* Unregistering */
> - hlist_add_head(&ri->hlist, head);
> -}
> -
> -void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
> - struct hlist_head **head, unsigned long *flags)
> -__acquires(hlist_lock)
> -{
> - unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
> - raw_spinlock_t *hlist_lock;
> -
> - *head = &kretprobe_inst_table[hash];
> - hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_lock_irqsave(hlist_lock, *flags);
> -}
> -
> -static void __kprobes kretprobe_table_lock(unsigned long hash,
> - unsigned long *flags)
> -__acquires(hlist_lock)
> -{
> - raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_lock_irqsave(hlist_lock, *flags);
> -}
> -
> -void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
> - unsigned long *flags)
> -__releases(hlist_lock)
> -{
> - unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
> - raw_spinlock_t *hlist_lock;
> -
> - hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_unlock_irqrestore(hlist_lock, *flags);
> -}
> -
> -static void __kprobes kretprobe_table_unlock(unsigned long hash,
> - unsigned long *flags)
> -__releases(hlist_lock)
> -{
> - raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
> - raw_spin_unlock_irqrestore(hlist_lock, *flags);
> -}
> -
> -/*
> - * This function is called from finish_task_switch when task tk becomes dead,
> - * so that we can recycle any function-return probe instances associated
> - * with this task. These left over instances represent probed functions
> - * that have been called but will never return.
> - */
> -void __kprobes kprobe_flush_task(struct task_struct *tk)
> -{
> - struct kretprobe_instance *ri;
> - struct hlist_head *head, empty_rp;
> - struct hlist_node *tmp;
> - unsigned long hash, flags = 0;
> -
> - if (unlikely(!kprobes_initialized))
> - /* Early boot. kretprobe_table_locks not yet initialized. */
> - return;
> -
> - INIT_HLIST_HEAD(&empty_rp);
> - hash = hash_ptr(tk, KPROBE_HASH_BITS);
> - head = &kretprobe_inst_table[hash];
> - kretprobe_table_lock(hash, &flags);
> - hlist_for_each_entry_safe(ri, tmp, head, hlist) {
> - if (ri->task == tk)
> - recycle_rp_inst(ri, &empty_rp);
> - }
> - kretprobe_table_unlock(hash, &flags);
> - hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
> - hlist_del(&ri->hlist);
> - kfree(ri);
> - }
> -}
> -
> -static inline void free_rp_inst(struct kretprobe *rp)
> -{
> - struct kretprobe_instance *ri;
> - struct hlist_node *next;
> -
> - hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
> - hlist_del(&ri->hlist);
> - kfree(ri);
> - }
> -}
> -
> -static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
> -{
> - unsigned long flags, hash;
> - struct kretprobe_instance *ri;
> - struct hlist_node *next;
> - struct hlist_head *head;
> -
> - /* No race here */
> - for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
> - kretprobe_table_lock(hash, &flags);
> - head = &kretprobe_inst_table[hash];
> - hlist_for_each_entry_safe(ri, next, head, hlist) {
> - if (ri->rp == rp)
> - ri->rp = NULL;
> - }
> - kretprobe_table_unlock(hash, &flags);
> - }
> - free_rp_inst(rp);
> -}
> -
> /*
> * Add the new probe to ap->list. Fail if this is the
> * second jprobe at the address - two jprobes can't coexist
> @@ -1764,6 +1636,55 @@ void __kprobes unregister_jprobes(struct jprobe **jps, int num)
> EXPORT_SYMBOL_GPL(unregister_jprobes);
>
> #ifdef CONFIG_KRETPROBES
> +static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
> +static struct {
> + raw_spinlock_t lock ____cacheline_aligned_in_smp;
> +} kretprobe_table_locks[KPROBE_TABLE_SIZE];
> +
> +static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
> +{
> + return &(kretprobe_table_locks[hash].lock);
> +}
> +
> +void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
> + struct hlist_head **head, unsigned long *flags)
> +__acquires(hlist_lock)
> +{
> + unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
> + raw_spinlock_t *hlist_lock;
> +
> + *head = &kretprobe_inst_table[hash];
> + hlist_lock = kretprobe_table_lock_ptr(hash);
> + raw_spin_lock_irqsave(hlist_lock, *flags);
> +}
> +
> +void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
> + unsigned long *flags)
> +__releases(hlist_lock)
> +{
> + unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
> + raw_spinlock_t *hlist_lock;
> +
> + hlist_lock = kretprobe_table_lock_ptr(hash);
> + raw_spin_unlock_irqrestore(hlist_lock, *flags);
> +}
> +
> +static void __kprobes kretprobe_table_lock(unsigned long hash,
> + unsigned long *flags)
> +__acquires(hlist_lock)
> +{
> + raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
> + raw_spin_lock_irqsave(hlist_lock, *flags);
> +}
> +
> +static void __kprobes kretprobe_table_unlock(unsigned long hash,
> + unsigned long *flags)
> +__releases(hlist_lock)
> +{
> + raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
> + raw_spin_unlock_irqrestore(hlist_lock, *flags);
> +}
> +
> /*
> * This kprobe pre_handler is registered with every kretprobe. When probe
> * hits it will set up the return probe.
> @@ -1808,6 +1729,17 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
> return 0;
> }
>
> +static inline void free_rp_inst(struct kretprobe *rp)
> +{
> + struct kretprobe_instance *ri;
> + struct hlist_node *next;
> +
> + hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
> + hlist_del(&ri->hlist);
> + kfree(ri);
> + }
> +}
> +
> int __kprobes register_kretprobe(struct kretprobe *rp)
> {
> int ret = 0;
> @@ -1885,6 +1817,26 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp)
> }
> EXPORT_SYMBOL_GPL(unregister_kretprobe);
>
> +static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
> +{
> + unsigned long flags, hash;
> + struct kretprobe_instance *ri;
> + struct hlist_node *next;
> + struct hlist_head *head;
> +
> + /* No race here */
> + for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
> + kretprobe_table_lock(hash, &flags);
> + head = &kretprobe_inst_table[hash];
> + hlist_for_each_entry_safe(ri, next, head, hlist) {
> + if (ri->rp == rp)
> + ri->rp = NULL;
> + }
> + kretprobe_table_unlock(hash, &flags);
> + }
> + free_rp_inst(rp);
> +}
> +
> void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
> {
> int i;
> @@ -1907,7 +1859,78 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
> }
> EXPORT_SYMBOL_GPL(unregister_kretprobes);
>
> +void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
> + struct hlist_head *head)
> +{
> + struct kretprobe *rp = ri->rp;
> +
> + /* remove rp inst off the rprobe_inst_table */
> + hlist_del(&ri->hlist);
> + INIT_HLIST_NODE(&ri->hlist);
> + if (likely(rp)) {
> + raw_spin_lock(&rp->lock);
> + hlist_add_head(&ri->hlist, &rp->free_instances);
> + raw_spin_unlock(&rp->lock);
> + } else
> + /* Unregistering */
> + hlist_add_head(&ri->hlist, head);
> +}
> +
> +static void __kprobes kretprobe_flush_task(struct task_struct *tk)
> +{
> + struct kretprobe_instance *ri;
> + struct hlist_head *head, empty_rp;
> + struct hlist_node *tmp;
> + unsigned long hash, flags = 0;
> +
> + if (unlikely(!kprobes_initialized))
> + /* Early boot. kretprobe_table_locks not yet initialized. */
> + return;
> +
> + INIT_HLIST_HEAD(&empty_rp);
> + hash = hash_ptr(tk, KPROBE_HASH_BITS);
> + head = &kretprobe_inst_table[hash];
> + kretprobe_table_lock(hash, &flags);
> + hlist_for_each_entry_safe(ri, tmp, head, hlist) {
> + if (ri->task == tk)
> + recycle_rp_inst(ri, &empty_rp);
> + }
> + kretprobe_table_unlock(hash, &flags);
> + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
> + hlist_del(&ri->hlist);
> + kfree(ri);
> + }
> +}
> +
> +static void __init init_kretprobes(void)
> +{
> + int i;
> +
> + /* FIXME allocate the probe table, currently defined statically */
> + /* initialize all list heads */
> + for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> + INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
> + raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
> + }
> +
> + if (kretprobe_blacklist_size) {
> + /* lookup the function address from its name */
> + for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
> + kprobe_lookup_name(kretprobe_blacklist[i].name,
> + kretprobe_blacklist[i].addr);
> + if (!kretprobe_blacklist[i].addr)
> + printk(KERN_WARNING
> + "kretprobe: lookup failed: %s\n",
> + kretprobe_blacklist[i].name);
> + }
> + }
> +}
> +
> #else /* CONFIG_KRETPROBES */
> +
> +#define kretprobe_flush_task(p) do {} while (0)
> +#define init_kretprobes() do {} while (0)
> +
> int __kprobes register_kretprobe(struct kretprobe *rp)
> {
> return -ENOSYS;
> @@ -1936,8 +1959,35 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
> return 0;
> }
>
> +void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
> + struct hlist_head *head)
> +{
> +}
> +
> +void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
> + struct hlist_head **head, unsigned long *flags)
> +__acquires(hlist_lock)
> +{
> +}
> +
> +void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
> + unsigned long *flags)
> +__releases(hlist_lock)
> +{
> +}
> #endif /* CONFIG_KRETPROBES */
>
> +/*
> + * This function is called from finish_task_switch when task tk becomes dead,
> + * so that we can recycle any function-return probe instances associated
> + * with this task. These left over instances represent probed functions
> + * that have been called but will never return.
> + */
> +void __kprobes kprobe_flush_task(struct task_struct *tk)
> +{
> + kretprobe_flush_task(tk);
> +}
> +
> /* Set the kprobe gone and remove its instruction buffer. */
> static void __kprobes kill_kprobe(struct kprobe *p)
> {
> @@ -2073,11 +2123,8 @@ static int __init init_kprobes(void)
>
> /* FIXME allocate the probe table, currently defined statically */
> /* initialize all list heads */
> - for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
> + for (i = 0; i < KPROBE_TABLE_SIZE; i++)
> INIT_HLIST_HEAD(&kprobe_table[i]);
> - INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
> - raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
> - }
>
> /*
> * Lookup and populate the kprobe_blacklist.
> @@ -2101,16 +2148,8 @@ static int __init init_kprobes(void)
> kb->range = size;
> }
>
> - if (kretprobe_blacklist_size) {
> - /* lookup the function address from its name */
> - for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
> - kprobe_lookup_name(kretprobe_blacklist[i].name,
> - kretprobe_blacklist[i].addr);
> - if (!kretprobe_blacklist[i].addr)
> - printk("kretprobe: lookup failed: %s\n",
> - kretprobe_blacklist[i].name);
> - }
> - }
> + /* Initialize kretprobes */
> + init_kretprobes();
>
> #if defined(CONFIG_OPTPROBES)
> #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
>
--
Masami HIRAMATSU
IT Management Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: masami.hiramatsu.pt@hitachi.com
next prev parent reply other threads:[~2014-02-05 5:00 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-02-01 12:17 [PATCH] kernel/kprobes.c: move cleanup_rp_inst() to where CONFIG_KRETPROBES enabled Chen Gang
2014-02-02 2:40 ` Masami Hiramatsu
2014-02-03 11:48 ` Chen Gang
2014-02-03 15:42 ` Masami Hiramatsu
2014-02-04 2:25 ` Chen Gang
2014-02-04 5:16 ` [PATCH] kernel: kprobe: move all *kretprobe* generic implementation to CONFIG_KRETPROBES enabled area Chen Gang
2014-02-04 7:17 ` Masami Hiramatsu
2014-02-04 11:58 ` Chen Gang
2014-02-04 12:07 ` Chen Gang
2014-02-04 13:29 ` Masami Hiramatsu
2014-02-04 13:53 ` Chen Gang
2014-02-04 15:39 ` Masami Hiramatsu
2014-02-05 0:18 ` Chen Gang
2014-02-05 1:21 ` Masami Hiramatsu
2014-02-05 3:08 ` Chen Gang
2014-02-05 3:36 ` [PATCH] kernel/kprobes.c: move kretprobe implementation to CONFIG_KRETPROBES area Chen Gang
2014-02-05 5:00 ` Masami Hiramatsu [this message]
2014-02-05 5:08 ` Chen Gang
2014-02-05 5:27 ` [PATCH] include/linux/kprobes.h: move all functions to their matched area Chen Gang
2014-02-05 7:51 ` Masami Hiramatsu
2014-02-05 11:12 ` Chen Gang
2014-02-05 4:57 ` [PATCH] kernel: kprobe: move all *kretprobe* generic implementation to CONFIG_KRETPROBES enabled area Masami Hiramatsu
2014-02-05 5:13 ` Chen Gang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=52F1C579.1080907@hitachi.com \
--to=masami.hiramatsu.pt@hitachi.com \
--cc=ananth@in.ibm.com \
--cc=anil.s.keshavamurthy@intel.com \
--cc=davem@davemloft.net \
--cc=egtvedt@samfundet.no \
--cc=gang.chen.5i5j@gmail.com \
--cc=hskinnemoen@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@elte.hu \
--cc=yrl.pp-manager.tt@hitachi.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).