* [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe()
2011-11-08 17:03 [PATCH 0/4] MIPS Kprobes Maneesh Soni
@ 2011-11-08 17:04 ` Maneesh Soni
2011-11-08 19:57 ` David Daney
2011-11-17 23:17 ` Ralf Baechle
2011-11-08 17:05 ` [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions Maneesh Soni
` (2 subsequent siblings)
3 siblings, 2 replies; 15+ messages in thread
From: Maneesh Soni @ 2011-11-08 17:04 UTC (permalink / raw)
To: Ralf Baechle; +Cc: David Daney, ananth, kamensky, linux-kernel, linux-mips
From: Maneesh Soni <manesoni@cisco.com>
Fix OOPS in arch_prepare_kprobe() for MIPS
This patch fixes the arch_prepare_kprobe() on MIPS when it tries to find the
instruction at the previous address to the probed address. The oops happens
when the probed address is the first address in a kernel module and there is
no previous address. The patch uses probe_kernel_read() to safely read the
previous instruction.
CPU 3 Unable to handle kernel paging request at virtual address ffffffffc0211ffc, epc == ffffffff81113204, ra == ffffffff8111511c
Oops[#1]:
Cpu 3
$ 0 : 0000000000000000 0000000000000001 ffffffffc0212000 0000000000000000
$ 4 : ffffffffc0220030 0000000000000000 0000000000000adf ffffffff81a3f898
$ 8 : ffffffffc0220030 ffffffffffffffff 000000000000ffff 0000000000004821
$12 : 000000000000000a ffffffff81105ddc ffffffff812927d0 0000000000000000
$16 : ffffffff81a40000 ffffffffc0220030 ffffffffc0220030 ffffffffc0212660
$20 : 0000000000000000 0000000000000008 efffffffffffffff ffffffffc0220000
$24 : 0000000000000002 ffffffff8139f5b0
$28 : a800000072adc000 a800000072adfca0 ffffffffc0220000 ffffffff8111511c
Hi : 0000000000000000
Lo : 0000000000000000
epc : ffffffff81113204 arch_prepare_kprobe+0x1c/0xe8
Tainted: P
ra : ffffffff8111511c register_kprobe+0x33c/0x730
Status: 10008ce3 KX SX UX KERNEL EXL IE
Cause : 00800008
BadVA : ffffffffc0211ffc
PrId : 000d9008 (Cavium Octeon II)
Modules linked in: bpa_mem crashinfo pds tun cpumem ipv6 exportfs nfsd OOBnd(P) OOBhal(P) cvmx_mdio cvmx_gpio aipcmod(P) mtsmod procfs(P) utaker_mod dplr_pci hello atomicm_foo [last unloaded: sysmgr_hb]
Process stapio (pid: 5603, threadinfo=a800000072adc000, task=a8000000722e0438, tls=000000002b4bcda0)
Stack : ffffffff81a40000 ffffffff81a40000 ffffffffc0220030 ffffffff8111511c
ffffffffc0218008 0000000000000001 ffffffffc0218008 0000000000000001
ffffffffc0220000 ffffffffc021efe8 1000000000000000 0000000000000008
efffffffffffffff ffffffffc0220000 ffffffffc0220000 ffffffffc021d500
0000000000000022 0000000000000002 1111000072be02b8 0000000000000000
00000000000015e6 00000000000015e6 00000000007d0f00 a800000072be02b8
0000000000000000 ffffffff811d16c8 a80000000382e3b0 ffffffff811d5ba0
ffffffff81b0a270 ffffffff81b0a270 ffffffffc0212000 0000000000000013
ffffffffc0220030 ffffffffc021ed00 a800000089114c80 000000007f90d590
a800000072adfe38 a800000089114c80 0000000010020000 0000000010020000
...
Call Trace:
[<ffffffff81113204>] arch_prepare_kprobe+0x1c/0xe8
[<ffffffff8111511c>] register_kprobe+0x33c/0x730
[<ffffffffc021d500>] _stp_ctl_write_cmd+0x8e8/0xa88 [atomicm_foo]
[<ffffffff812925cc>] vfs_write+0xb4/0x178
[<ffffffff81292828>] SyS_write+0x58/0x148
[<ffffffff81103844>] handle_sysn32+0x44/0x84
Code: ffb20010 ffb00000 dc820028 <8c44fffc> 8c500000 0c4449e0 0004203c 14400029 3c048199
Signed-off-by: Maneesh Soni <manesoni@cisco.com>
Signed-off-by: Victor Kamensky <kamensky@cisco.com>
---
---
arch/mips/kernel/kprobes.c | 15 ++++++++++++---
1 files changed, 12 insertions(+), 3 deletions(-)
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index ee28683..9fb1876 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -25,6 +25,7 @@
#include <linux/kprobes.h>
#include <linux/preempt.h>
+#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
@@ -118,11 +119,19 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
union mips_instruction prev_insn;
int ret = 0;
- prev_insn = p->addr[-1];
insn = p->addr[0];
- if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
- pr_notice("Kprobes for branch and jump instructions are not supported\n");
+ if (insn_has_delayslot(insn)) {
+ pr_notice("Kprobes for branch and jump instructions are not"
+ "supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((probe_kernel_read(&prev_insn, p->addr - 1,
+ sizeof(mips_instruction)) == 0) &&
+ insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch delayslot are not supported\n");
ret = -EINVAL;
goto out;
}
--
1.7.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe()
2011-11-08 17:04 ` [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe() Maneesh Soni
@ 2011-11-08 19:57 ` David Daney
2011-11-09 5:26 ` Maneesh Soni
2011-11-17 23:17 ` Ralf Baechle
1 sibling, 1 reply; 15+ messages in thread
From: David Daney @ 2011-11-08 19:57 UTC (permalink / raw)
To: manesoni@cisco.com
Cc: Ralf Baechle, ananth@in.ibm.com, kamensky@cisco.com,
linux-kernel@vger.kernel.org, linux-mips@linux-mips.org
On 11/08/2011 09:04 AM, Maneesh Soni wrote:
[...]
>
> diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
> index ee28683..9fb1876 100644
> --- a/arch/mips/kernel/kprobes.c
> +++ b/arch/mips/kernel/kprobes.c
> @@ -25,6 +25,7 @@
>
> #include<linux/kprobes.h>
> #include<linux/preempt.h>
> +#include<linux/uaccess.h>
> #include<linux/kdebug.h>
> #include<linux/slab.h>
>
> @@ -118,11 +119,19 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
> union mips_instruction prev_insn;
> int ret = 0;
>
> - prev_insn = p->addr[-1];
> insn = p->addr[0];
>
> - if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
> - pr_notice("Kprobes for branch and jump instructions are not supported\n");
> + if (insn_has_delayslot(insn)) {
> + pr_notice("Kprobes for branch and jump instructions are not"
> + "supported\n");
Don't wrap these strings.
It is better to go a little bit over 80 columns, than have this.
David Daney
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe()
2011-11-08 19:57 ` David Daney
@ 2011-11-09 5:26 ` Maneesh Soni
0 siblings, 0 replies; 15+ messages in thread
From: Maneesh Soni @ 2011-11-09 5:26 UTC (permalink / raw)
To: David Daney
Cc: Ralf Baechle, ananth@in.ibm.com, kamensky@cisco.com,
linux-kernel@vger.kernel.org, linux-mips@linux-mips.org
On Tue, Nov 08, 2011 at 11:57:45AM -0800, David Daney wrote:
> On 11/08/2011 09:04 AM, Maneesh Soni wrote:
> [...]
> >
> >diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
> >index ee28683..9fb1876 100644
> >--- a/arch/mips/kernel/kprobes.c
> >+++ b/arch/mips/kernel/kprobes.c
> >@@ -25,6 +25,7 @@
> >
> > #include<linux/kprobes.h>
> > #include<linux/preempt.h>
> >+#include<linux/uaccess.h>
> > #include<linux/kdebug.h>
> > #include<linux/slab.h>
> >
> >@@ -118,11 +119,19 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
> > union mips_instruction prev_insn;
> > int ret = 0;
> >
> >- prev_insn = p->addr[-1];
> > insn = p->addr[0];
> >
> >- if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
> >- pr_notice("Kprobes for branch and jump instructions are not supported\n");
> >+ if (insn_has_delayslot(insn)) {
> >+ pr_notice("Kprobes for branch and jump instructions are not"
> >+ "supported\n");
>
> Don't wrap these strings.
>
> It is better to go a little bit over 80 columns, than have this.
>
> David Daney
Ok.. will keep that in mind for future patches. This line actually
goes away in patch 4/4.
Thanks
Maneesh
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe()
2011-11-08 17:04 ` [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe() Maneesh Soni
2011-11-08 19:57 ` David Daney
@ 2011-11-17 23:17 ` Ralf Baechle
1 sibling, 0 replies; 15+ messages in thread
From: Ralf Baechle @ 2011-11-17 23:17 UTC (permalink / raw)
To: Maneesh Soni; +Cc: David Daney, ananth, kamensky, linux-kernel, linux-mips
Queued for 3.3. Let's see how well it's holding up :-)
Thanks,
Ralf
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions
2011-11-08 17:03 [PATCH 0/4] MIPS Kprobes Maneesh Soni
2011-11-08 17:04 ` [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe() Maneesh Soni
@ 2011-11-08 17:05 ` Maneesh Soni
2011-11-08 20:01 ` David Daney
2011-11-17 23:18 ` Ralf Baechle
2011-11-08 17:07 ` [PATCH 3/4] MIPS Kprobes: Refactoring Branch emulation Maneesh Soni
2011-11-08 17:08 ` [PATCH 4/4] MIPS Kprobes: Support branch instructions probing - v2 Maneesh Soni
3 siblings, 2 replies; 15+ messages in thread
From: Maneesh Soni @ 2011-11-08 17:05 UTC (permalink / raw)
To: Ralf Baechle; +Cc: David Daney, ananth, kamensky, linux-kernel, linux-mips
From: Maneesh Soni <manesoni@cisco.com>
Deny probes on ll/sc instructions for MIPS kprobes
As ll/sc instruction are for atomic read-modify-write operations, allowing
probes on top of these insturctions is a bad idea.
Signed-off-by: Victor Kamensky <kamensky@cisco.com>
Signed-off-by: Maneesh Soni <manesoni@cisco.com>
---
arch/mips/kernel/kprobes.c | 31 +++++++++++++++++++++++++++++++
1 files changed, 31 insertions(+), 0 deletions(-)
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 9fb1876..0ab1a5f 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -113,6 +113,30 @@ insn_ok:
return 0;
}
+/*
+ * insn_has_ll_or_sc function checks whether instruction is ll or sc
+ * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
+ * so we need to prevent it and refuse kprobes insertion for such
+ * instructions; cannot do much about breakpoint in the middle of
+ * ll/sc pair; it is upto user to avoid those places
+ */
+static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
+{
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
union mips_instruction insn;
@@ -121,6 +145,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
insn = p->addr[0];
+ if (insn_has_ll_or_sc(insn)) {
+ pr_notice("Kprobes for ll and sc instructions are not"
+ "supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
if (insn_has_delayslot(insn)) {
pr_notice("Kprobes for branch and jump instructions are not"
"supported\n");
--
1.7.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions
2011-11-08 17:05 ` [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions Maneesh Soni
@ 2011-11-08 20:01 ` David Daney
2011-11-08 23:26 ` Victor Kamensky
2011-11-17 23:18 ` Ralf Baechle
1 sibling, 1 reply; 15+ messages in thread
From: David Daney @ 2011-11-08 20:01 UTC (permalink / raw)
To: manesoni@cisco.com
Cc: Ralf Baechle, ananth@in.ibm.com, kamensky@cisco.com,
linux-kernel@vger.kernel.org, linux-mips@linux-mips.org
On 11/08/2011 09:05 AM, Maneesh Soni wrote:
>
> From: Maneesh Soni<manesoni@cisco.com>
>
> Deny probes on ll/sc instructions for MIPS kprobes
>
> As ll/sc instruction are for atomic read-modify-write operations, allowing
> probes on top of these insturctions is a bad idea.
>
s/insturctions/instructions/
Not only is it a bad idea, it will probably make them fail 100% of the time.
It is also an equally bad idea to place a probe between any LL and SC
instructions. How do you prevent that?
If you cannot prevent probes between LL and SC, why bother with this at all?
David Daney
> Signed-off-by: Victor Kamensky<kamensky@cisco.com>
> Signed-off-by: Maneesh Soni<manesoni@cisco.com>
> ---
> arch/mips/kernel/kprobes.c | 31 +++++++++++++++++++++++++++++++
> 1 files changed, 31 insertions(+), 0 deletions(-)
>
> diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
> index 9fb1876..0ab1a5f 100644
> --- a/arch/mips/kernel/kprobes.c
> +++ b/arch/mips/kernel/kprobes.c
> @@ -113,6 +113,30 @@ insn_ok:
> return 0;
> }
>
> +/*
> + * insn_has_ll_or_sc function checks whether instruction is ll or sc
> + * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
> + * so we need to prevent it and refuse kprobes insertion for such
> + * instructions; cannot do much about breakpoint in the middle of
> + * ll/sc pair; it is upto user to avoid those places
> + */
> +static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
> +{
> + int ret = 0;
> +
> + switch (insn.i_format.opcode) {
> + case ll_op:
> + case lld_op:
> + case sc_op:
> + case scd_op:
> + ret = 1;
> + break;
> + default:
> + break;
> + }
> + return ret;
> +}
> +
> int __kprobes arch_prepare_kprobe(struct kprobe *p)
> {
> union mips_instruction insn;
> @@ -121,6 +145,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
>
> insn = p->addr[0];
>
> + if (insn_has_ll_or_sc(insn)) {
> + pr_notice("Kprobes for ll and sc instructions are not"
> + "supported\n");
> + ret = -EINVAL;
> + goto out;
> + }
> +
> if (insn_has_delayslot(insn)) {
> pr_notice("Kprobes for branch and jump instructions are not"
> "supported\n");
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions
2011-11-08 20:01 ` David Daney
@ 2011-11-08 23:26 ` Victor Kamensky
2011-11-16 12:12 ` Ralf Baechle
0 siblings, 1 reply; 15+ messages in thread
From: Victor Kamensky @ 2011-11-08 23:26 UTC (permalink / raw)
To: David Daney
Cc: manesoni@cisco.com, Ralf Baechle, ananth@in.ibm.com,
linux-kernel@vger.kernel.org, linux-mips@linux-mips.org
[-- Attachment #1: Type: TEXT/PLAIN, Size: 3993 bytes --]
Hi David,
Thank you for your feedback! Please see response inline.
On Tue, 8 Nov 2011, David Daney wrote:
> On 11/08/2011 09:05 AM, Maneesh Soni wrote:
> >
> > From: Maneesh Soni<manesoni@cisco.com>
> >
> > Deny probes on ll/sc instructions for MIPS kprobes
> >
> > As ll/sc instruction are for atomic read-modify-write operations, allowing
> > probes on top of these insturctions is a bad idea.
> >
>
> s/insturctions/instructions/
>
> Not only is it a bad idea, it will probably make them fail 100% of the time.
>
> It is also an equally bad idea to place a probe between any LL and SC
> instructions. How do you prevent that?
As per below code comment we don't prevent that. There is no way to do
that.
> If you cannot prevent probes between LL and SC, why bother with this at all?
We just trying to be a bit practical here. It is better than nothing,
right? Breakpoint on top of ll/sc simply won't work and that is the fact.
Breakpoint between related pair of ll/sc won't work too, but nothing we
can do about that.
We run into this situation with SystemTap function wildcard based tracing,
as per attached unit test note. Basically SystemTap wildcard probe picked
inline assembler function which had first 'll' instruction and as result
it was spinning there till SystemTap module reached threshold and shut
itself off so code proceeded after that. Note attached unit test presents
simplified version of real issue we run into, so it may look a bit
artificial. Note it is highly unlikely that SystemTap wildcard tracing
would pick up anything between related pair of ll/sc. In order to have
breakpoint between ll/sc user had use 'statement' SystemTap directive and
it would be specifically targeting given address and therefore could be
removed easily. In case of wildcard tracing there is no easy workaround
for user to drop functions that start with 'll'.
Ideally we would want to push this check into SystemTap compile time,
along with check for branch delay slot instruction, but currently in
SystemTap there is no infrastructure that would check instruction opcode
at compile time. I believe that disallowed instruction check in SystemTap
compiler is missing for any CPU. Adding it would be small feature that we
did not have time to pursue.
Thanks,
Victor
> David Daney
>
> > Signed-off-by: Victor Kamensky<kamensky@cisco.com>
> > Signed-off-by: Maneesh Soni<manesoni@cisco.com>
> > ---
> > arch/mips/kernel/kprobes.c | 31 +++++++++++++++++++++++++++++++
> > 1 files changed, 31 insertions(+), 0 deletions(-)
> >
> > diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
> > index 9fb1876..0ab1a5f 100644
> > --- a/arch/mips/kernel/kprobes.c
> > +++ b/arch/mips/kernel/kprobes.c
> > @@ -113,6 +113,30 @@ insn_ok:
> > return 0;
> > }
> >
> > +/*
> > + * insn_has_ll_or_sc function checks whether instruction is ll or sc
> > + * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
> > + * so we need to prevent it and refuse kprobes insertion for such
> > + * instructions; cannot do much about breakpoint in the middle of
> > + * ll/sc pair; it is upto user to avoid those places
> > + */
> > +static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
> > +{
> > + int ret = 0;
> > +
> > + switch (insn.i_format.opcode) {
> > + case ll_op:
> > + case lld_op:
> > + case sc_op:
> > + case scd_op:
> > + ret = 1;
> > + break;
> > + default:
> > + break;
> > + }
> > + return ret;
> > +}
> > +
> > int __kprobes arch_prepare_kprobe(struct kprobe *p)
> > {
> > union mips_instruction insn;
> > @@ -121,6 +145,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
> >
> > insn = p->addr[0];
> >
> > + if (insn_has_ll_or_sc(insn)) {
> > + pr_notice("Kprobes for ll and sc instructions are not"
> > + "supported\n");
> > + ret = -EINVAL;
> > + goto out;
> > + }
> > +
> > if (insn_has_delayslot(insn)) {
> > pr_notice("Kprobes for branch and jump instructions are not"
> > "supported\n");
>
>
[-- Attachment #2: Type: TEXT/PLAIN, Size: 5985 bytes --]
Kernel module source
--------------------
sjc-lds-154$ cat Makefile
obj-m := hellom.o
hellom-objs := hello.o hello1.o
sjc-lds-154$ cat hello.c
#include <linux/sched.h>
#include <linux/pid.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kthread.h>
MODULE_DESCRIPTION("simple hello world module");
MODULE_LICENSE("GPL");
static struct task_struct *my_kthread;
int count_down = 100;
void print_value (int i)
{
printk("print_value received %d\n", i);
}
unsigned int bar;
void do_atomic_things (unsigned int i, unsigned int *p);
void foo (int mask)
{
do_atomic_things(1, &bar);
if (mask & 0x1) {
print_value(5);
} else if (mask & 0x2) {
print_value(4);
} else if (mask & 0x4) {
print_value(3);
} else if (mask & (0x8 | 0x10)) {
print_value(2);
}
}
int
hellom_start_func1 (int p)
{
printk("hellom: %s called, p = %d\n", __FUNCTION__, p);
foo(p);
return p * p;
}
int
hellom_start_func2 (int p)
{
printk("hellom: %s called, p = %d\n", __FUNCTION__, p);
return hellom_start_func1(p);
}
int
hellom_start_func3 (int p)
{
printk("hellom: %s called, p = %d\n", __FUNCTION__, p);
return hellom_start_func2(p);
}
static int
mythread(void *arg)
{
while (!kthread_should_stop()) {
printk("mythread wakeup: count_doun = %d\n", count_down);
schedule_timeout_interruptible(3 * HZ);
count_down--;
hellom_start_func3(count_down);
}
return 0;
}
void
create_mythread (void)
{
my_kthread = kthread_run(mythread, NULL, "hello");
}
static int __init init_hello(void)
{
printk("hello module loaded\n");
create_mythread();
hellom_start_func3(5);
return 0;
}
int
hellom_end_func1 (int p)
{
printk("hellom: %s called, p = %d\n", __FUNCTION__, p);
return p * p;
}
int
hellom_end_func2 (int p)
{
printk("hellom: %s called, p = %d\n", __FUNCTION__, p);
return hellom_end_func1(p);
}
int
hellom_end_func3 (int p)
{
printk("hellom: %s called, p = %d\n", __FUNCTION__, p);
return hellom_end_func2(p);
}
static void __exit exit_hello(void)
{
hellom_end_func3(6);
printk("hello module removed\n");
}
module_init(init_hello);
module_exit(exit_hello);
sjc-lds-154$ cat hello1.c
static __inline__
void my_atomic_sub(unsigned int *p, unsigned int v)
{
unsigned int temp;
__asm__ __volatile__ (
".set push\n\t"
".set noreorder\n\t"
"1:\tll %0, %3\n\t" /* load old value */
"subu %0, %0, %2\n\t" /* calculate new value */
"sc %0, %1\n\t" /* attempt to store */
"beqz %0, 1b\n\t" /* spin if failed */
"nop\n\t"
".set pop\n\t"
: "=&r" (temp), "=m" (*p)
: "r" (v), "m" (*p)
: "memory");
}
int k1;
int k2;
int l1;
int l2;
void do_atomic_things (unsigned int i, unsigned int *p)
{
k1 += 1;
k2 += 2;
my_atomic_sub(p, i);
l2 -= 2;
l1 -= 3;
}
Tracing Script
--------------
sjc-lds-154$ cat atomicm_foo.stp
probe module("hellom").function("*").call {
printf ("%s -> %s\n", thread_indent(1), probefunc())
}
probe module("hellom").function("*").return {
printf ("%s <- %s\n", thread_indent(-1), probefunc())
}
probe module("hellom").function("*").inline {
printf ("%s => %s\n", thread_indent(1), probefunc())
thread_indent(-1)
}
Run logs
--------
When script activated without fixes it keeps printing my_atomic_sub
forever:
[my6300:~]$ staprun atomicm_foo.ko
0 hello(3807): -> hellom_start_func3
17 hello(3807): -> hellom_start_func2
25 hello(3807): -> hellom_start_func1
33 hello(3807): -> foo
39 hello(3807): -> do_atomic_things
44 hello(3807): => my_atomic_sub
51 hello(3807): => my_atomic_sub
58 hello(3807): => my_atomic_sub
65 hello(3807): => my_atomic_sub
72 hello(3807): => my_atomic_sub
79 hello(3807): => my_atomic_sub
86 hello(3807): => my_atomic_sub
93 hello(3807): => my_atomic_sub
100 hello(3807): => my_atomic_sub
107 hello(3807): => my_atomic_sub
114 hello(3807): => my_atomic_sub
121 hello(3807): => my_atomic_sub
128 hello(3807): => my_atomic_sub
...
After the fix:
[my6300:~]$ staprun atomicm_foo.ko
WARNING: probe module("hellom").function("my_atomic_sub@/ws/kamensky-sjc/nova/atomicm/hello1.c:3").inline (address 0xffffffffc0212408) registration error (rc -22)
0 hello(5605): -> hellom_start_func3
24 hello(5605): -> hellom_start_func2
33 hello(5605): -> hellom_start_func1
42 hello(5605): -> foo
48 hello(5605): -> do_atomic_things
54 hello(5605): <- do_atomic_things
60 hello(5605): => print_value
68 hello(5605): => print_value
77 hello(5605): <- foo
82 hello(5605): <- hellom_start_func1
86 hello(5605): <- hellom_start_func2
90 hello(5605): <- hellom_start_func3
0 hello(5605): -> hellom_start_func3
23 hello(5605): -> hellom_start_func2
32 hello(5605): -> hellom_start_func1
41 hello(5605): -> foo
46 hello(5605): -> do_atomic_things
53 hello(5605): <- do_atomic_things
59 hello(5605): => print_value
66 hello(5605): => print_value
75 hello(5605): <- foo
80 hello(5605): <- hellom_start_func1
84 hello(5605): <- hellom_start_func2
89 hello(5605): <- hellom_start_func3
from dmegs
atomicm_foo: systemtap: 1.4/0.152, base: ffffffffc0218000, memory: 23data/28text/58ctx/13net/262alloc kb, probes: 27
Kprobes for ll and sc instructions are not supported
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions
2011-11-08 23:26 ` Victor Kamensky
@ 2011-11-16 12:12 ` Ralf Baechle
2011-11-16 17:39 ` Victor Kamensky
0 siblings, 1 reply; 15+ messages in thread
From: Ralf Baechle @ 2011-11-16 12:12 UTC (permalink / raw)
To: Victor Kamensky
Cc: David Daney, manesoni@cisco.com, ananth@in.ibm.com,
linux-kernel@vger.kernel.org, linux-mips@linux-mips.org
On Tue, Nov 08, 2011 at 03:26:42PM -0800, Victor Kamensky wrote:
> > s/insturctions/instructions/
> >
> > Not only is it a bad idea, it will probably make them fail 100% of the time.
> >
> > It is also an equally bad idea to place a probe between any LL and SC
> > instructions. How do you prevent that?
>
> As per below code comment we don't prevent that. There is no way to do
> that.
Similar to the way that the addresses of loads and stores from userspace
are recorded in a special section we could build a list of forbidden
address range.
Is it worth it?
Ralf
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions
2011-11-16 12:12 ` Ralf Baechle
@ 2011-11-16 17:39 ` Victor Kamensky
0 siblings, 0 replies; 15+ messages in thread
From: Victor Kamensky @ 2011-11-16 17:39 UTC (permalink / raw)
To: Ralf Baechle
Cc: David Daney, manesoni@cisco.com, ananth@in.ibm.com,
linux-kernel@vger.kernel.org, linux-mips@linux-mips.org
Hi Ralf,
Please see inline
On Wed, 16 Nov 2011, Ralf Baechle wrote:
> On Tue, Nov 08, 2011 at 03:26:42PM -0800, Victor Kamensky wrote:
>
> > > s/insturctions/instructions/
> > >
> > > Not only is it a bad idea, it will probably make them fail 100% of the time.
> > >
> > > It is also an equally bad idea to place a probe between any LL and SC
> > > instructions. How do you prevent that?
> >
> > As per below code comment we don't prevent that. There is no way to do
> > that.
>
> Similar to the way that the addresses of loads and stores from userspace
> are recorded in a special section we could build a list of forbidden
> address range.
>
> Is it worth it?
Yes, probably it could be done this way. It would require to change all
places where ll/sc used. Infrastructure to look at those tables in kernel
and in all loaded modules will be required. Cost of check in kprobes layer
will go up as well, but probably on acceptable level.
In my personal opinion benefits it would bring will not worth the effort.
Couple more, hopefully relevant, notes:
- on kprobes CPU independent layer there is already __kprobes marker
(attribute section) that could be used to mark function where kprobes
insertion is not allowed. So one may use it, albeit on function level
granularity, not code range.
- in my personal opinion all these checks have just practical meaning and
practical limitations. For example current mips kprobes check whether
instruction is in delay slot looks at previous 4 bytes to match jump or
branch instruction pattern. It works and really helps in 99.99% of the
cases but it will break in some exotic case where the instruction
follows data (jump table for example) or padding that happens to match
jump or branch instruction pattern. Or even if instruction follows jump
and branch instruction, it could be jumped directly on it (i.e serve as
branch delay slot in one case and regular instruction in another case).
In all such obscure cases current delay slot instruction check would
produce false positive. And it is perfectly fine, given practical
consideration. I just bring this to illustrate my point that in this
sort of situations, where we are trying to prevent API caller to shot
himself/herself in the foot, we don't need to push to absolute
solutions, just practical one.
Thanks,
Victor
> Ralf
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions
2011-11-08 17:05 ` [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions Maneesh Soni
2011-11-08 20:01 ` David Daney
@ 2011-11-17 23:18 ` Ralf Baechle
1 sibling, 0 replies; 15+ messages in thread
From: Ralf Baechle @ 2011-11-17 23:18 UTC (permalink / raw)
To: Maneesh Soni; +Cc: David Daney, ananth, kamensky, linux-kernel, linux-mips
Queued for 3.3. Thanks,
Ralf
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH 3/4] MIPS Kprobes: Refactoring Branch emulation
2011-11-08 17:03 [PATCH 0/4] MIPS Kprobes Maneesh Soni
2011-11-08 17:04 ` [PATCH 1/4] MIPS Kprobes: Fix OOPS in arch_prepare_kprobe() Maneesh Soni
2011-11-08 17:05 ` [PATCH 2/4] MIPS Kprobes: Deny probes on ll/sc instructions Maneesh Soni
@ 2011-11-08 17:07 ` Maneesh Soni
2011-11-17 23:18 ` Ralf Baechle
2011-11-08 17:08 ` [PATCH 4/4] MIPS Kprobes: Support branch instructions probing - v2 Maneesh Soni
3 siblings, 1 reply; 15+ messages in thread
From: Maneesh Soni @ 2011-11-08 17:07 UTC (permalink / raw)
To: Ralf Baechle; +Cc: David Daney, ananth, kamensky, linux-kernel, linux-mips
From: Maneesh Soni <manesoni@cisco.com>
MIPS Refactoring Branch emulation
This patch refactors MIPS branch emulation code so as to allow skipping delay
slot instruction in case of branch likely instructions when branch is not
taken. This is useful for keeping the code common for use cases like kprobes
where one would like to handle the branch instructions keeping the delay slot
instuction also in picture for branch likely instructions. Also allow
emulation when instruction to be decoded is not at pt_regs->cp0_epc as in
case of kprobes where pt_regs->cp0_epc points to the breakpoint instruction.
The patch also exports the function for modules.
Signed-off-by: Maneesh Soni <manesoni@cisco.com>
Signed-off-by: Victor Kamensky <kamensky@cisco.com>
---
arch/mips/include/asm/branch.h | 5 ++
arch/mips/kernel/branch.c | 128 ++++++++++++++++++++++++++--------------
arch/mips/math-emu/cp1emu.c | 2 +-
3 files changed, 90 insertions(+), 45 deletions(-)
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index 37c6857..888766a 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -9,6 +9,7 @@
#define _ASM_BRANCH_H
#include <asm/ptrace.h>
+#include <asm/inst.h>
static inline int delay_slot(struct pt_regs *regs)
{
@@ -23,7 +24,11 @@ static inline unsigned long exception_epc(struct pt_regs *regs)
return regs->cp0_epc + 4;
}
+#define BRANCH_LIKELY_TAKEN 0x0001
+
extern int __compute_return_epc(struct pt_regs *regs);
+extern int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn);
static inline int compute_return_epc(struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 32103cc..4d735d0 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/module.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -17,28 +18,22 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-/*
- * Compute the return address and do emulate branch simulation, if required.
+/**
+ * __compute_return_epc_for_insn - Computes the return address and do emulate
+ * branch simulation, if required.
+ *
+ * @regs: Pointer to pt_regs
+ * @insn: branch instruction to decode
+ * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+ * evaluating the branch.
*/
-int __compute_return_epc(struct pt_regs *regs)
+int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn)
{
- unsigned int __user *addr;
unsigned int bit, fcr31, dspcontrol;
- long epc;
- union mips_instruction insn;
-
- epc = regs->cp0_epc;
- if (epc & 3)
- goto unaligned;
-
- /*
- * Read the instruction
- */
- addr = (unsigned int __user *) epc;
- if (__get_user(insn.word, addr)) {
- force_sig(SIGSEGV, current);
- return -EFAULT;
- }
+ long epc = regs->cp0_epc;
+ int ret = 0;
switch (insn.i_format.opcode) {
/*
@@ -64,18 +59,22 @@ int __compute_return_epc(struct pt_regs *regs)
switch (insn.i_format.rt) {
case bltz_op:
case bltzl_op:
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
case bgez_op:
case bgezl_op:
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -83,9 +82,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bltzal_op:
case bltzall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -93,12 +94,15 @@ int __compute_return_epc(struct pt_regs *regs)
case bgezal_op:
case bgezall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
+
case bposge32_op:
if (!cpu_has_dsp)
goto sigill;
@@ -133,9 +137,11 @@ int __compute_return_epc(struct pt_regs *regs)
case beq_op:
case beql_op:
if (regs->regs[insn.i_format.rs] ==
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == beql_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -143,9 +149,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bne_op:
case bnel_op:
if (regs->regs[insn.i_format.rs] !=
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -153,9 +161,11 @@ int __compute_return_epc(struct pt_regs *regs)
case blez_op: /* not really i_format */
case blezl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] <= 0)
+ if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -163,9 +173,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bgtz_op:
case bgtzl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] > 0)
+ if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -187,18 +199,22 @@ int __compute_return_epc(struct pt_regs *regs)
switch (insn.i_format.rt & 3) {
case 0: /* bc1f */
case 2: /* bc1fl */
- if (~fcr31 & (1 << bit))
+ if (~fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 2)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
case 1: /* bc1t */
case 3: /* bc1tl */
- if (fcr31 & (1 << bit))
+ if (fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 3)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -239,15 +255,39 @@ int __compute_return_epc(struct pt_regs *regs)
#endif
}
- return 0;
+ return ret;
-unaligned:
- printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+sigill:
+ printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
-sigill:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+int __compute_return_epc(struct pt_regs *regs)
+{
+ unsigned int __user *addr;
+ long epc;
+ union mips_instruction insn;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ addr = (unsigned int __user *) epc;
+ if (__get_user(insn.word, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+
+ return __compute_return_epc_for_insn(regs, insn);
+
+unaligned:
+ printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+
}
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index dbf2f93..a03bf00 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -245,7 +245,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
*/
emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */
- if (__compute_return_epc(xcp)) {
+ if (__compute_return_epc(xcp) < 0) {
#ifdef CP1DBG
printk("failed to emulate branch at %p\n",
(void *) (xcp->cp0_epc));
--
1.7.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH 4/4] MIPS Kprobes: Support branch instructions probing - v2
2011-11-08 17:03 [PATCH 0/4] MIPS Kprobes Maneesh Soni
` (2 preceding siblings ...)
2011-11-08 17:07 ` [PATCH 3/4] MIPS Kprobes: Refactoring Branch emulation Maneesh Soni
@ 2011-11-08 17:08 ` Maneesh Soni
2011-11-17 23:18 ` Ralf Baechle
3 siblings, 1 reply; 15+ messages in thread
From: Maneesh Soni @ 2011-11-08 17:08 UTC (permalink / raw)
To: Ralf Baechle; +Cc: David Daney, ananth, kamensky, linux-kernel, linux-mips
From: Maneesh Soni <manesoni@cisco.com>
MIPS Kprobes: Support branch instructions probing - v2
This patch provides support for kprobes on branch instructions. The branch
instruction at the probed address is actually emulated and not executed
out-of-line like other normal instructions. Instead the delay-slot instruction
is copied and single stepped out of line.
At the time of probe hit, the original branch instruction is evaluated
and the target cp0_epc is computed similar to compute_retrun_epc(). It
is also checked if the delay slot instruction can be skipped, which is
true if there is a NOP in delay slot or branch is taken in case of
branch likely instructions. Once the delay slot instruction is single
stepped the normal execution resume with the cp0_epc updated the earlier
computed cp0_epc as per the branch instructions.
o Changes from v1
- added missing preempt_enable_no_reshced()
- using refactored __compute_return_epc() to avoid missing instructions
Signed-off-by: Maneesh Soni <manesoni@cisco.com>
Signed-off-by: Victor Kamensky <kamensky@cisco.com>
---
arch/mips/include/asm/kprobes.h | 5 ++
arch/mips/kernel/kprobes.c | 145 ++++++++++++++++++++++++++++++---------
2 files changed, 117 insertions(+), 33 deletions(-)
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index e6ea4d4..1fbbca0 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -74,6 +74,8 @@ struct prev_kprobe {
: MAX_JPROBES_STACK_SIZE)
+#define SKIP_DELAYSLOT 0x0001
+
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned long kprobe_status;
@@ -82,6 +84,9 @@ struct kprobe_ctlblk {
unsigned long kprobe_saved_epc;
unsigned long jprobe_saved_sp;
struct pt_regs jprobe_saved_regs;
+ /* Per-thread fields, used while emulating branches */
+ unsigned long flags;
+ unsigned long target_epc;
u8 jprobes_stack[MAX_JPROBES_STACK_SIZE];
struct prev_kprobe prev_kprobe;
};
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 0ab1a5f..158467d 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <asm/ptrace.h>
+#include <asm/branch.h>
#include <asm/break.h>
#include <asm/inst.h>
@@ -152,13 +153,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
goto out;
}
- if (insn_has_delayslot(insn)) {
- pr_notice("Kprobes for branch and jump instructions are not"
- "supported\n");
- ret = -EINVAL;
- goto out;
- }
-
if ((probe_kernel_read(&prev_insn, p->addr - 1,
sizeof(mips_instruction)) == 0) &&
insn_has_delayslot(prev_insn)) {
@@ -178,9 +172,20 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
* In the kprobe->ainsn.insn[] array we store the original
* instruction at index zero and a break trap instruction at
* index one.
+ *
+ * On MIPS arch if the instruction at probed address is a
+ * branch instruction, we need to execute the instruction at
+ * Branch Delayslot (BD) at the time of probe hit. As MIPS also
+ * doesn't have single stepping support, the BD instruction can
+ * not be executed in-line and it would be executed on SSOL slot
+ * using a normal breakpoint instruction in the next slot.
+ * So, read the instruction and save it for later execution.
*/
+ if (insn_has_delayslot(insn))
+ memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
+ else
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
- memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
p->ainsn.insn[1] = breakpoint2_insn;
p->opcode = *p->addr;
@@ -231,16 +236,96 @@ static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_saved_epc = regs->cp0_epc;
}
-static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+/**
+ * evaluate_branch_instrucion -
+ *
+ * Evaluate the branch instruction at probed address during probe hit. The
+ * result of evaluation would be the updated epc. The insturction in delayslot
+ * would actually be single stepped using a normal breakpoint) on SSOL slot.
+ *
+ * The result is also saved in the kprobe control block for later use,
+ * in case we need to execute the delayslot instruction. The latter will be
+ * false for NOP instruction in dealyslot and the branch-likely instructions
+ * when the branch is taken. And for those cases we set a flag as
+ * SKIP_DELAYSLOT in the kprobe control block
+ */
+static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
+ union mips_instruction insn = p->opcode;
+ long epc;
+ int ret = 0;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ if (p->ainsn.insn->word == 0)
+ kcb->flags |= SKIP_DELAYSLOT;
+ else
+ kcb->flags &= ~SKIP_DELAYSLOT;
+
+ ret = __compute_return_epc_for_insn(regs, insn);
+ if (ret < 0)
+ return ret;
+
+ if (ret == BRANCH_LIKELY_TAKEN)
+ kcb->flags |= SKIP_DELAYSLOT;
+
+ kcb->target_epc = regs->cp0_epc;
+
+ return 0;
+
+unaligned:
+ pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS, current);
+ return -EFAULT;
+
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ int ret = 0;
+
regs->cp0_status &= ~ST0_IE;
/* single step inline if the instruction is a break */
if (p->opcode.word == breakpoint_insn.word ||
p->opcode.word == breakpoint2_insn.word)
regs->cp0_epc = (unsigned long)p->addr;
- else
- regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+ else if (insn_has_delayslot(p->opcode)) {
+ ret = evaluate_branch_instruction(p, regs, kcb);
+ if (ret < 0) {
+ pr_notice("Kprobes: Error in evaluating branch\n");
+ return;
+ }
+ }
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap. In case of branch instructions, the target
+ * epc to be restored.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (insn_has_delayslot(p->opcode))
+ regs->cp0_epc = kcb->target_epc;
+ else {
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+ }
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
@@ -279,8 +364,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
+ prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_REENTER;
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ resume_execution(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ }
return 1;
} else {
if (addr->word != breakpoint_insn.word) {
@@ -324,8 +414,16 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
}
ss_probe:
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
+ prepare_singlestep(p, regs, kcb);
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+ resume_execution(p, regs, kcb);
+ preempt_enable_no_resched();
+ } else
+ kcb->kprobe_status = KPROBE_HIT_SS;
+
return 1;
no_kprobe:
@@ -334,25 +432,6 @@ no_kprobe:
}
-/*
- * Called after single-stepping. p->addr is the address of the
- * instruction whose first byte has been replaced by the "break 0"
- * instruction. To avoid the SMP problems that can occur when we
- * temporarily put back the original opcode to single-step, we
- * single-stepped a copy of the instruction. The address of this
- * copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * breakpoint trap.
- */
-static void __kprobes resume_execution(struct kprobe *p,
- struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- unsigned long orig_epc = kcb->kprobe_saved_epc;
- regs->cp0_epc = orig_epc + 4;
-}
-
static inline int post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
--
1.7.1
^ permalink raw reply related [flat|nested] 15+ messages in thread