* [RFC Patch 1/5] Allow arch-specific cleanup before breakpoint unregistration
[not found] <20100524034520.964014525@linux.vnet.ibm.com>
@ 2010-05-24 4:02 ` K.Prasad
2010-05-24 4:02 ` [RFC Patch 2/5] PPC64-HWBKPT: Implement hw-breakpoints for PowerPC Book III S K.Prasad
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: K.Prasad @ 2010-05-24 4:02 UTC (permalink / raw)
To: linuxppc-dev@ozlabs.org, Paul Mackerras
Cc: Michael Neuling, Benjamin Herrenschmidt, shaggy,
Frederic Weisbecker, David Gibson, Alan Stern, K.Prasad,
Roland McGrath
Certain architectures (such as PowerPC Book III S) have a need to cleanup
data-structures before the breakpoint is unregistered. This patch introduces
an arch-specific hook in release_bp_slot() along with a weak definition in
the form of a stub funciton.
Signed-off-by: K.Prasad <prasad@linux.vnet.ibm.com>
---
kernel/hw_breakpoint.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
Index: linux-2.6.ppc64_test/kernel/hw_breakpoint.c
===================================================================
--- linux-2.6.ppc64_test.orig/kernel/hw_breakpoint.c
+++ linux-2.6.ppc64_test/kernel/hw_breakpoint.c
@@ -242,6 +242,17 @@ toggle_bp_slot(struct perf_event *bp, bo
}
/*
+ * Function to perform processor-specific cleanup during unregistration
+ */
+__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
+{
+ /*
+ * A weak stub function here for those archs that don't define
+ * it inside arch/.../kernel/hw_breakpoint.c
+ */
+}
+
+/*
* Contraints to check before allowing this new breakpoint counter:
*
* == Non-pinned counter == (Considered as pinned for now)
@@ -339,6 +350,7 @@ void release_bp_slot(struct perf_event *
{
mutex_lock(&nr_bp_mutex);
+ arch_unregister_hw_breakpoint(bp);
__release_bp_slot(bp);
mutex_unlock(&nr_bp_mutex);
^ permalink raw reply [flat|nested] 6+ messages in thread
* [RFC Patch 2/5] PPC64-HWBKPT: Implement hw-breakpoints for PowerPC Book III S
[not found] <20100524034520.964014525@linux.vnet.ibm.com>
2010-05-24 4:02 ` [RFC Patch 1/5] Allow arch-specific cleanup before breakpoint unregistration K.Prasad
@ 2010-05-24 4:02 ` K.Prasad
2010-05-24 4:38 ` K.Prasad
2010-05-24 4:02 ` [RFC Patch 3/5] PPC64-HWBKPT: Handle concurrent alignment interrupts K.Prasad
` (2 subsequent siblings)
4 siblings, 1 reply; 6+ messages in thread
From: K.Prasad @ 2010-05-24 4:02 UTC (permalink / raw)
To: linuxppc-dev@ozlabs.org, Paul Mackerras
Cc: Michael Neuling, Benjamin Herrenschmidt, shaggy,
Frederic Weisbecker, David Gibson, Alan Stern, K.Prasad,
Roland McGrath
Implement perf-events based hw-breakpoint interfaces for PowerPC Book III S
processors. These interfaces help arbitrate requests from various users and
schedules them as appropriate.
Signed-off-by: K.Prasad <prasad@linux.vnet.ibm.com>
---
arch/powerpc/Kconfig | 1
arch/powerpc/include/asm/cputable.h | 4
arch/powerpc/include/asm/hw_breakpoint.h | 49 ++++
arch/powerpc/include/asm/processor.h | 8
arch/powerpc/kernel/Makefile | 1
arch/powerpc/kernel/hw_breakpoint.c | 365 +++++++++++++++++++++++++++++++
arch/powerpc/kernel/machine_kexec_64.c | 3
arch/powerpc/kernel/process.c | 6
arch/powerpc/kernel/ptrace.c | 64 +++++
arch/powerpc/lib/Makefile | 1
include/linux/hw_breakpoint.h | 1
11 files changed, 503 insertions(+)
Index: linux-2.6.ppc64_test/arch/powerpc/include/asm/hw_breakpoint.h
===================================================================
--- /dev/null
+++ linux-2.6.ppc64_test/arch/powerpc/include/asm/hw_breakpoint.h
@@ -0,0 +1,49 @@
+#ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H
+#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+struct arch_hw_breakpoint {
+ u8 len; /* length of the target data symbol */
+ int type;
+ unsigned long address;
+};
+
+#include <linux/kdebug.h>
+#include <asm/reg.h>
+#include <asm/system.h>
+
+static inline int hw_breakpoint_slots(int type)
+{
+ return HBP_NUM;
+}
+struct perf_event;
+struct pmu;
+struct perf_sample_data;
+
+#define HW_BREAKPOINT_ALIGN 0x7
+/* Maximum permissible length of any HW Breakpoint */
+#define HW_BREAKPOINT_LEN 0x8
+
+extern int arch_bp_generic_fields(int type, int *gen_bp_type);
+extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
+extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+extern struct pmu perf_ops_bp;
+extern void ptrace_triggered(struct perf_event *bp, int nmi,
+ struct perf_sample_data *data, struct pt_regs *regs);
+static inline void hw_breakpoint_disable(void)
+{
+ set_dabr(0);
+}
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* __KERNEL__ */
+#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/hw_breakpoint.c
===================================================================
--- /dev/null
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/hw_breakpoint.c
@@ -0,0 +1,365 @@
+/*
+ * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
+ * using the CPU's debug registers. Derived from
+ * "arch/x86/kernel/hw_breakpoint.c"
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright 2010 IBM Corporation
+ * Author: K.Prasad <prasad@linux.vnet.ibm.com>
+ *
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/percpu.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/hw_breakpoint.h>
+#include <asm/processor.h>
+#include <asm/sstep.h>
+
+/*
+ * Store the 'bp' that caused the hw-breakpoint exception just before we
+ * single-step. Use it to distinguish a single-step exception (due to a
+ * previous hw-breakpoint exception) from a normal one
+ */
+static DEFINE_PER_CPU(struct perf_event *, last_hit_bp);
+
+/*
+ * Stores the breakpoints currently in use on each breakpoint address
+ * register for every cpu
+ */
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
+
+/*
+ * Install a perf counter breakpoint.
+ *
+ * We seek a free debug address register and use it for this
+ * breakpoint.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+
+ *slot = bp;
+
+ /*
+ * Do not install DABR values if the instruction must be single-stepped.
+ * If so, DABR will be populated in single_step_dabr_instruction().
+ */
+ if (current->thread.last_hit_ubp != bp)
+ set_dabr(info->address | info->type | DABR_TRANSLATION);
+
+ return 0;
+}
+
+/*
+ * Uninstall the breakpoint contained in the given counter.
+ *
+ * First we search the debug address register it uses and then we disable
+ * it.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+
+ if (*slot != bp) {
+ WARN_ONCE(1, "Can't find the breakpoint");
+ return;
+ }
+
+ *slot = NULL;
+ set_dabr(0);
+}
+
+/*
+ * Perform cleanup of arch-specific counters during unregistration
+ * of the perf-event
+ */
+void arch_unregister_hw_breakpoint(struct perf_event *bp)
+{
+ int cpu = get_cpu();
+
+ /*
+ * If the breakpoint is unregistered between a hw_breakpoint_handler()
+ * and the single_step_dabr_instruction(), then cleanup the breakpoint
+ * restoration variables to prevent dangling pointers.
+ */
+ if (per_cpu(last_hit_bp, cpu) == bp) {
+ per_cpu(last_hit_bp, cpu) = NULL;
+ return;
+ }
+ if (bp->ctx->task)
+ bp->ctx->task->thread.last_hit_ubp = NULL;
+
+ put_cpu();
+}
+
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ return is_kernel_addr(info->address);
+}
+
+int arch_bp_generic_fields(int type, int *gen_bp_type)
+{
+ switch (type) {
+ case DABR_DATA_READ:
+ *gen_bp_type = HW_BREAKPOINT_R;
+ break;
+ case DABR_DATA_WRITE:
+ *gen_bp_type = HW_BREAKPOINT_W;
+ break;
+ case (DABR_DATA_WRITE | DABR_DATA_READ):
+ *gen_bp_type = (HW_BREAKPOINT_W | HW_BREAKPOINT_R);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings
+ */
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
+{
+ int ret = -EINVAL;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ if (!bp)
+ return ret;
+
+ switch (bp->attr.bp_type) {
+ case HW_BREAKPOINT_R:
+ info->type = DABR_DATA_READ;
+ break;
+ case HW_BREAKPOINT_W:
+ info->type = DABR_DATA_WRITE;
+ break;
+ case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
+ info->type = (DABR_DATA_READ | DABR_DATA_WRITE);
+ break;
+ default:
+ return ret;
+ }
+
+ info->address = bp->attr.bp_addr;
+ info->len = bp->attr.bp_len;
+
+ /*
+ * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
+ * and breakpoint addresses are aligned to nearest double-word
+ * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
+ * 'symbolsize' should satisfy the check below.
+ */
+ if (info->len >
+ (HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN)))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_handler(struct die_args *args)
+{
+ bool is_kernel, is_ptrace_bp = false;
+ int rc = NOTIFY_STOP;
+ struct perf_event *bp;
+ struct pt_regs *regs = args->regs;
+ unsigned long dar = regs->dar;
+ int stepped = 1;
+ struct arch_hw_breakpoint *info;
+
+ /* Disable breakpoints during exception handling */
+ set_dabr(0);
+ /*
+ * The counter may be concurrently released but that can only
+ * occur from a call_rcu() path. We can then safely fetch
+ * the breakpoint, use its callback, touch its counter
+ * while we are in an rcu_read_lock() path.
+ */
+ rcu_read_lock();
+
+ bp = __get_cpu_var(bp_per_reg);
+ if (!bp)
+ goto out;
+ info = counter_arch_bp(bp);
+ is_kernel = is_kernel_addr(bp->attr.bp_addr);
+ is_ptrace_bp = (bp->overflow_handler == ptrace_triggered) ?
+ true : false;
+
+ /*
+ * Verify if dar lies within the address range occupied by the symbol
+ * being watched to filter extraneous exceptions.
+ */
+ if (!((bp->attr.bp_addr <= dar) &&
+ (dar <= (bp->attr.bp_addr + bp->attr.bp_len))) &&
+ (!is_ptrace_bp))
+ /*
+ * This exception is triggered not because of a memory access on
+ * the monitored variable but in the double-word address range
+ * in which it is contained. We will consume this exception,
+ * considering it as 'noise'.
+ */
+ goto restore_bp;
+
+ /*
+ * Return early after invoking user-callback function without restoring
+ * DABR if the breakpoint is from ptrace which always operates in
+ * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
+ * generated in do_dabr().
+ */
+ if (is_ptrace_bp) {
+ perf_bp_event(bp, regs);
+ rc = NOTIFY_DONE;
+ goto out;
+ }
+
+ /*
+ * Do not emulate user-space instructions from kernel-space,
+ * instead single-step them.
+ */
+ if (!is_kernel) {
+ bp->ctx->task->thread.last_hit_ubp = bp;
+ regs->msr |= MSR_SE;
+ goto out;
+ }
+
+ stepped = emulate_step(regs, regs->nip);
+ /* emulate_step() could not execute it, single-step them */
+ if (stepped == 0) {
+ regs->msr |= MSR_SE;
+ __get_cpu_var(last_hit_bp) = bp;
+ goto out;
+ }
+ /*
+ * As a policy, the callback is invoked in a 'trigger-after-execute'
+ * fashion
+ */
+ perf_bp_event(bp, regs);
+
+restore_bp:
+ set_dabr(info->address | info->type | DABR_TRANSLATION);
+out:
+ rcu_read_unlock();
+ return rc;
+}
+
+/*
+ * Handle single-step exceptions following a DABR hit.
+ */
+int __kprobes single_step_dabr_instruction(struct die_args *args)
+{
+ struct pt_regs *regs = args->regs;
+ struct perf_event *bp = NULL, *kernel_bp, *user_bp;
+ struct arch_hw_breakpoint *bp_info;
+
+ /*
+ * Identify the cause of single-stepping and find the corresponding
+ * breakpoint structure
+ */
+ user_bp = current->thread.last_hit_ubp;
+ kernel_bp = __get_cpu_var(last_hit_bp);
+ if (user_bp) {
+ bp = user_bp;
+ current->thread.last_hit_ubp = NULL;
+ } else if (kernel_bp) {
+ bp = kernel_bp;
+ __get_cpu_var(last_hit_bp) = NULL;
+ }
+
+ /*
+ * Check if we are single-stepping as a result of a
+ * previous HW Breakpoint exception
+ */
+ if (!bp)
+ return NOTIFY_DONE;
+
+ bp_info = counter_arch_bp(bp);
+
+ /*
+ * We shall invoke the user-defined callback function in the single
+ * stepping handler to confirm to 'trigger-after-execute' semantics
+ */
+ perf_bp_event(bp, regs);
+
+ /*
+ * Do not disable MSR_SE if the process was already in
+ * single-stepping mode. We cannot reliable detect single-step mode
+ * for kernel-space breakpoints, so this cannot work along with other
+ * debuggers (like KGDB, xmon) which may be single-stepping kernel code.
+ */
+ if (!(user_bp && test_thread_flag(TIF_SINGLESTEP)))
+ regs->msr &= ~MSR_SE;
+
+ set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
+ return NOTIFY_STOP;
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_exceptions_notify(
+ struct notifier_block *unused, unsigned long val, void *data)
+{
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_DABR_MATCH:
+ ret = hw_breakpoint_handler(data);
+ break;
+ case DIE_SSTEP:
+ ret = single_step_dabr_instruction(data);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Release the user breakpoints used by ptrace
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ struct thread_struct *t = &tsk->thread;
+
+ unregister_hw_breakpoint(t->ptrace_bps[0]);
+ t->ptrace_bps[0] = NULL;
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+ /* TODO */
+}
+
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/Makefile
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/Makefile
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/Makefile
@@ -34,6 +34,7 @@ obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o
Index: linux-2.6.ppc64_test/arch/powerpc/include/asm/processor.h
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/include/asm/processor.h
+++ linux-2.6.ppc64_test/arch/powerpc/include/asm/processor.h
@@ -209,6 +209,14 @@ struct thread_struct {
#ifdef CONFIG_PPC64
unsigned long start_tb; /* Start purr when proc switched in */
unsigned long accum_tb; /* Total accumilated purr for process */
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct perf_event *ptrace_bps[HBP_NUM];
+ /*
+ * Helps identify source of single-step exception and subsequent
+ * hw-breakpoint enablement
+ */
+ struct perf_event *last_hit_ubp;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
unsigned long dabr; /* Data address breakpoint register */
#ifdef CONFIG_ALTIVEC
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/ptrace.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/ptrace.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/ptrace.c
@@ -32,6 +32,8 @@
#ifdef CONFIG_PPC32
#include <linux/module.h>
#endif
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -866,9 +868,34 @@ void user_disable_single_step(struct tas
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+void ptrace_triggered(struct perf_event *bp, int nmi,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event_attr attr;
+
+ /*
+ * Disable the breakpoint request here since ptrace has defined a
+ * one-shot behaviour for breakpoint exceptions in PPC64.
+ * The SIGTRAP signal is generated automatically for us in do_dabr().
+ * We don't have to do anything about that here
+ */
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
unsigned long data)
{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret;
+ struct thread_struct *thread = &(task->thread);
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
@@ -896,6 +923,43 @@ int ptrace_set_debugreg(struct task_stru
/* Ensure breakpoint translation bit is set */
if (data && !(data & DABR_TRANSLATION))
return -EIO;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ bp = thread->ptrace_bps[0];
+ if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
+ if (bp) {
+ unregister_hw_breakpoint(bp);
+ thread->ptrace_bps[0] = NULL;
+ }
+ return 0;
+ }
+ if (bp) {
+ attr = bp->attr;
+ attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
+ arch_bp_generic_fields(data &
+ (DABR_DATA_WRITE | DABR_DATA_READ),
+ &attr.bp_type);
+ ret = modify_user_hw_breakpoint(bp, &attr);
+ if (ret)
+ return ret;
+ thread->ptrace_bps[0] = bp;
+ thread->dabr = data;
+ return 0;
+ }
+
+ /* Create a new breakpoint request if one doesn't exist already */
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
+ arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
+ &attr.bp_type);
+
+ thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
+ ptrace_triggered, task);
+ if (IS_ERR(bp)) {
+ thread->ptrace_bps[0] = NULL;
+ return PTR_ERR(bp);
+ }
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* Move contents to the DABR register */
task->thread.dabr = data;
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/process.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/process.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/process.c
@@ -462,8 +462,14 @@ struct task_struct *__switch_to(struct t
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
switch_booke_debug_regs(&new->thread);
#else
+/*
+ * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
+ * schedule DABR
+ */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
set_dabr(new->thread.dabr);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
Index: linux-2.6.ppc64_test/arch/powerpc/include/asm/cputable.h
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/include/asm/cputable.h
+++ linux-2.6.ppc64_test/arch/powerpc/include/asm/cputable.h
@@ -516,6 +516,10 @@ static inline int cpu_has_feature(unsign
& feature);
}
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#define HBP_NUM 1
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/machine_kexec_64.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/machine_kexec_64.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/machine_kexec_64.c
@@ -15,6 +15,7 @@
#include <linux/thread_info.h>
#include <linux/init_task.h>
#include <linux/errno.h>
+#include <linux/hw_breakpoint.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -165,6 +166,7 @@ static void kexec_smp_down(void *arg)
while(kexec_all_irq_disabled == 0)
cpu_relax();
mb(); /* make sure all irqs are disabled before this */
+ hw_breakpoint_disable();
/*
* Now every CPU has IRQs off, we can clear out any pending
* IPIs and be sure that no more will come in after this.
@@ -180,6 +182,7 @@ static void kexec_prepare_cpus_wait(int
{
int my_cpu, i, notified=-1;
+ hw_breakpoint_disable();
my_cpu = get_cpu();
/* Make sure each CPU has atleast made it to the state we need */
for (i=0; i < NR_CPUS; i++) {
Index: linux-2.6.ppc64_test/arch/powerpc/lib/Makefile
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/lib/Makefile
+++ linux-2.6.ppc64_test/arch/powerpc/lib/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PPC64) += copypage_64.o cop
memcpy_64.o usercopy_64.o mem_64.o string.o
obj-$(CONFIG_XMON) += sstep.o
obj-$(CONFIG_KPROBES) += sstep.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o
ifeq ($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP) += locks.o
Index: linux-2.6.ppc64_test/include/linux/hw_breakpoint.h
===================================================================
--- linux-2.6.ppc64_test.orig/include/linux/hw_breakpoint.h
+++ linux-2.6.ppc64_test/include/linux/hw_breakpoint.h
@@ -139,6 +139,7 @@ static inline struct arch_hw_breakpoint
{
return NULL;
}
+static inline void hw_breakpoint_disable(void) { }
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */
Index: linux-2.6.ppc64_test/arch/powerpc/Kconfig
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/Kconfig
+++ linux-2.6.ppc64_test/arch/powerpc/Kconfig
@@ -141,6 +141,7 @@ config PPC
select GENERIC_ATOMIC64 if PPC32
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
config EARLY_PRINTK
bool
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [RFC Patch 2/5] PPC64-HWBKPT: Implement hw-breakpoints for PowerPC Book III S
2010-05-24 4:02 ` [RFC Patch 2/5] PPC64-HWBKPT: Implement hw-breakpoints for PowerPC Book III S K.Prasad
@ 2010-05-24 4:38 ` K.Prasad
0 siblings, 0 replies; 6+ messages in thread
From: K.Prasad @ 2010-05-24 4:38 UTC (permalink / raw)
To: linuxppc-dev@ozlabs.org, Paul Mackerras
Cc: Michael Neuling, Benjamin Herrenschmidt, shaggy,
Frederic Weisbecker, David Gibson, Alan Stern, Roland McGrath
On Mon, May 24, 2010 at 09:32:25AM +0530, K.Prasad wrote:
> Implement perf-events based hw-breakpoint interfaces for PowerPC Book III S
> processors. These interfaces help arbitrate requests from various users and
> schedules them as appropriate.
>
<snipped>
> +/*
> + * Handle debug exception notifications.
> + */
> +int __kprobes hw_breakpoint_handler(struct die_args *args)
> +{
> + bool is_kernel, is_ptrace_bp = false;
> + int rc = NOTIFY_STOP;
> + struct perf_event *bp;
> + struct pt_regs *regs = args->regs;
> + unsigned long dar = regs->dar;
> + int stepped = 1;
> + struct arch_hw_breakpoint *info;
> +
> + /* Disable breakpoints during exception handling */
> + set_dabr(0);
> + /*
> + * The counter may be concurrently released but that can only
> + * occur from a call_rcu() path. We can then safely fetch
> + * the breakpoint, use its callback, touch its counter
> + * while we are in an rcu_read_lock() path.
> + */
> + rcu_read_lock();
> +
> + bp = __get_cpu_var(bp_per_reg);
> + if (!bp)
> + goto out;
> + info = counter_arch_bp(bp);
> + is_kernel = is_kernel_addr(bp->attr.bp_addr);
> + is_ptrace_bp = (bp->overflow_handler == ptrace_triggered) ?
> + true : false;
> +
> + /*
> + * Verify if dar lies within the address range occupied by the symbol
> + * being watched to filter extraneous exceptions.
> + */
> + if (!((bp->attr.bp_addr <= dar) &&
> + (dar <= (bp->attr.bp_addr + bp->attr.bp_len))) &&
> + (!is_ptrace_bp))
> + /*
> + * This exception is triggered not because of a memory access on
> + * the monitored variable but in the double-word address range
> + * in which it is contained. We will consume this exception,
> + * considering it as 'noise'.
> + */
> + goto restore_bp;
> +
> + /*
> + * Return early after invoking user-callback function without restoring
> + * DABR if the breakpoint is from ptrace which always operates in
> + * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
> + * generated in do_dabr().
> + */
> + if (is_ptrace_bp) {
> + perf_bp_event(bp, regs);
> + rc = NOTIFY_DONE;
> + goto out;
> + }
> +
> + /*
> + * Do not emulate user-space instructions from kernel-space,
> + * instead single-step them.
> + */
> + if (!is_kernel) {
> + bp->ctx->task->thread.last_hit_ubp = bp;
> + regs->msr |= MSR_SE;
> + goto out;
> + }
> +
> + stepped = emulate_step(regs, regs->nip);
> + /* emulate_step() could not execute it, single-step them */
> + if (stepped == 0) {
As I was responding to one of the previous mails, I realised that I
had not made changes here as Paul Mackerras had suggested
(reference linuxppc-dev message-id:
20100520131003.GB29903@brick.ozlabs.ibm.com) i.e. uninstall breakpoint
if single-stepping failed.
I'll quickly send out a revised patch as a reply to this one. Regrets
for the confusion caused.
Thanks,
K.Prasad
> + regs->msr |= MSR_SE;
> + __get_cpu_var(last_hit_bp) = bp;
> + goto out;
> + }
> + /*
> + * As a policy, the callback is invoked in a 'trigger-after-execute'
> + * fashion
> + */
> + perf_bp_event(bp, regs);
> +
> +restore_bp:
> + set_dabr(info->address | info->type | DABR_TRANSLATION);
> +out:
> + rcu_read_unlock();
> + return rc;
> +}
^ permalink raw reply [flat|nested] 6+ messages in thread
* [RFC Patch 3/5] PPC64-HWBKPT: Handle concurrent alignment interrupts
[not found] <20100524034520.964014525@linux.vnet.ibm.com>
2010-05-24 4:02 ` [RFC Patch 1/5] Allow arch-specific cleanup before breakpoint unregistration K.Prasad
2010-05-24 4:02 ` [RFC Patch 2/5] PPC64-HWBKPT: Implement hw-breakpoints for PowerPC Book III S K.Prasad
@ 2010-05-24 4:02 ` K.Prasad
2010-05-24 4:03 ` [RFC Patch 4/5] PPC64-HWBKPT: Enable hw-breakpoints while handling intervening signals K.Prasad
2010-05-24 4:04 ` [RFC Patch 5/5] PPC64-HWBKPT: Enable proper distinction of per-task and per-cpu breakpoints K.Prasad
4 siblings, 0 replies; 6+ messages in thread
From: K.Prasad @ 2010-05-24 4:02 UTC (permalink / raw)
To: linuxppc-dev@ozlabs.org, Paul Mackerras
Cc: Michael Neuling, Benjamin Herrenschmidt, shaggy,
Frederic Weisbecker, David Gibson, Alan Stern, K.Prasad,
Roland McGrath
An alignment interrupt may intervene between a DSI/hw-breakpoint exception
and the single-step exception. Enable the alignment interrupt (through
modifications to emulate_single_step()) to notify the single-step exception
handler for proper restoration of hw-breakpoints.
Signed-off-by: K.Prasad <prasad@linux.vnet.ibm.com>
---
arch/powerpc/kernel/traps.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/traps.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/traps.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/traps.c
@@ -602,7 +602,7 @@ void RunModeException(struct pt_regs *re
void __kprobes single_step_exception(struct pt_regs *regs)
{
- regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
+ clear_single_step(regs);
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
@@ -621,10 +621,7 @@ void __kprobes single_step_exception(str
*/
static void emulate_single_step(struct pt_regs *regs)
{
- if (single_stepping(regs)) {
- clear_single_step(regs);
- _exception(SIGTRAP, regs, TRAP_TRACE, 0);
- }
+ single_step_exception(regs);
}
static inline int __parse_fpscr(unsigned long fpscr)
^ permalink raw reply [flat|nested] 6+ messages in thread
* [RFC Patch 4/5] PPC64-HWBKPT: Enable hw-breakpoints while handling intervening signals
[not found] <20100524034520.964014525@linux.vnet.ibm.com>
` (2 preceding siblings ...)
2010-05-24 4:02 ` [RFC Patch 3/5] PPC64-HWBKPT: Handle concurrent alignment interrupts K.Prasad
@ 2010-05-24 4:03 ` K.Prasad
2010-05-24 4:04 ` [RFC Patch 5/5] PPC64-HWBKPT: Enable proper distinction of per-task and per-cpu breakpoints K.Prasad
4 siblings, 0 replies; 6+ messages in thread
From: K.Prasad @ 2010-05-24 4:03 UTC (permalink / raw)
To: linuxppc-dev@ozlabs.org, Paul Mackerras
Cc: Michael Neuling, Benjamin Herrenschmidt, shaggy,
Frederic Weisbecker, David Gibson, Alan Stern, K.Prasad,
Roland McGrath
A signal delivered between a hw_breakpoint_handler() and the
single_step_dabr_instruction() will not have the breakpoint active during
signal handling (since breakpoint will not be restored through single-stepping
due to absence of MSR_SE bit on the signal frame). Enable breakpoints before
signal delivery and clear them during sigreturn() syscall.
Limitation: Nested hw-breakpoint exceptions (where second exception is raised
inside signal context) will cause a 'double-hit' i.e. the first breakpoint
exception will be taken twice.
Restore hw-breakpoints if the user-context is altered in the signal handler
(causing loss of MSR_SE).
Side-effect: 'Double-hit' of breakpoint if the instruction pointer is
unaltered in the new context.
Signed-off-by: K.Prasad <prasad@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_breakpoint.h | 3 +++
arch/powerpc/kernel/hw_breakpoint.c | 28 ++++++++++++++++++++++++++++
arch/powerpc/kernel/signal.c | 8 ++++++++
arch/powerpc/kernel/signal_32.c | 10 ++++++++++
arch/powerpc/kernel/signal_64.c | 7 +++++++
5 files changed, 56 insertions(+)
Index: linux-2.6.ppc64_test/arch/powerpc/include/asm/hw_breakpoint.h
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/include/asm/hw_breakpoint.h
+++ linux-2.6.ppc64_test/arch/powerpc/include/asm/hw_breakpoint.h
@@ -43,6 +43,9 @@ static inline void hw_breakpoint_disable
{
set_dabr(0);
}
+extern void sighandler_install_bp(struct task_struct *tsk);
+extern void sigreturn_uninstall_bp(struct task_struct *tsk);
+extern void thread_change_pc(struct task_struct *tsk, unsigned long msr);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/hw_breakpoint.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/hw_breakpoint.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/hw_breakpoint.c
@@ -188,6 +188,34 @@ int arch_validate_hwbkpt_settings(struct
return 0;
}
+void sighandler_install_bp(struct task_struct *tsk)
+{
+ struct arch_hw_breakpoint *info;
+
+ if (likely(!tsk->thread.last_hit_ubp))
+ return;
+
+ info = counter_arch_bp(tsk->thread.last_hit_ubp);
+ set_dabr(info->address | info->type | DABR_TRANSLATION);
+}
+
+void sigreturn_uninstall_bp(struct task_struct *tsk)
+{
+ if (unlikely(tsk->thread.last_hit_ubp))
+ set_dabr(0);
+}
+
+void thread_change_pc(struct task_struct *tsk, unsigned long new_msr)
+{
+ /*
+ * Do not bother to restore breakpoints if single-stepping is not
+ * cleared. single_step_dabr_instruction() will handle it if MSR_SE
+ * is set.
+ */
+ if (!(new_msr & MSR_SE))
+ sighandler_install_bp(tsk);
+}
+
/*
* Handle debug exception notifications.
*/
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/signal.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/signal.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
#include <linux/tracehook.h>
#include <linux/signal.h>
+#include <asm/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -149,6 +150,13 @@ static int do_signal_pending(sigset_t *o
if (current->thread.dabr)
set_dabr(current->thread.dabr);
#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ /*
+ * Re-enable the breakpoints (if it was previously cleared in
+ * hw_breakpoint_handler()) for the signal stack.
+ */
+ sighandler_install_bp(current);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
if (is32) {
if (ka.sa.sa_flags & SA_SIGINFO)
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/signal_64.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/signal_64.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/signal_64.c
@@ -33,6 +33,7 @@
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/vdso.h>
+#include <asm/hw_breakpoint.h>
#include "signal.h"
@@ -312,6 +313,9 @@ int sys_swapcontext(struct ucontext __us
|| __copy_to_user(&old_ctx->uc_sigmask,
¤t->blocked, sizeof(sigset_t)))
return -EFAULT;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ thread_change_pc(current, new_msr);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
if (new_ctx == NULL)
return 0;
@@ -364,6 +368,9 @@ int sys_rt_sigreturn(unsigned long r3, u
if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto badframe;
restore_sigmask(&set);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ sigreturn_uninstall_bp(current);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
goto badframe;
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/signal_32.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/signal_32.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/signal_32.c
@@ -42,6 +42,7 @@
#include <asm/syscalls.h>
#include <asm/sigcontext.h>
#include <asm/vdso.h>
+#include <asm/hw_breakpoint.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
#include <asm/unistd.h>
@@ -996,6 +997,9 @@ long sys_swapcontext(struct ucontext __u
|| put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
|| __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
return -EFAULT;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ thread_change_pc(current, new_msr);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
if (new_ctx == NULL)
return 0;
@@ -1034,6 +1038,9 @@ long sys_rt_sigreturn(int r3, int r4, in
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
goto bad;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ sigreturn_uninstall_bp(current);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
if (do_setcontext(&rt_sf->uc, regs, 1))
goto bad;
@@ -1279,6 +1286,9 @@ long sys_sigreturn(int r3, int r4, int r
#endif
restore_sigmask(&set);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ sigreturn_uninstall_bp(current);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
addr = sr;
if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
^ permalink raw reply [flat|nested] 6+ messages in thread
* [RFC Patch 5/5] PPC64-HWBKPT: Enable proper distinction of per-task and per-cpu breakpoints
[not found] <20100524034520.964014525@linux.vnet.ibm.com>
` (3 preceding siblings ...)
2010-05-24 4:03 ` [RFC Patch 4/5] PPC64-HWBKPT: Enable hw-breakpoints while handling intervening signals K.Prasad
@ 2010-05-24 4:04 ` K.Prasad
4 siblings, 0 replies; 6+ messages in thread
From: K.Prasad @ 2010-05-24 4:04 UTC (permalink / raw)
To: linuxppc-dev@ozlabs.org, Paul Mackerras,
Linux Kernel Mailing List
Cc: Michael Neuling, Benjamin Herrenschmidt, shaggy,
Frederic Weisbecker, David Gibson, Alan Stern, K.Prasad,
Roland McGrath
Per-task and per-cpu breakpoints have to be unambiguously identified for
proper restoration of hw-breakpoints. The notion of pure user- and kernel-
space breakpoints is antiquated. Store 'pid' of the process against which
the perf-counter was requested to help proper distinction.
This helps seamless handling of kernel-space breakpoints within the context
of a user-space process and breakpoints for kernel-threads.
Signed-off-by: K.Prasad <prasad@linux.vnet.ibm.com>
---
arch/powerpc/kernel/hw_breakpoint.c | 24 +++++++++++++++++-------
include/linux/perf_event.h | 1 +
kernel/perf_event.c | 9 ++++++---
3 files changed, 24 insertions(+), 10 deletions(-)
Index: linux-2.6.ppc64_test/include/linux/perf_event.h
===================================================================
--- linux-2.6.ppc64_test.orig/include/linux/perf_event.h
+++ linux-2.6.ppc64_test/include/linux/perf_event.h
@@ -698,6 +698,7 @@ struct perf_event {
int oncpu;
int cpu;
+ pid_t pid;
struct list_head owner_entry;
struct task_struct *owner;
Index: linux-2.6.ppc64_test/kernel/perf_event.c
===================================================================
--- linux-2.6.ppc64_test.orig/kernel/perf_event.c
+++ linux-2.6.ppc64_test/kernel/perf_event.c
@@ -4684,6 +4684,7 @@ static const struct pmu *sw_perf_event_i
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr,
int cpu,
+ pid_t pid,
struct perf_event_context *ctx,
struct perf_event *group_leader,
struct perf_event *parent_event,
@@ -4717,6 +4718,7 @@ perf_event_alloc(struct perf_event_attr
mutex_init(&event->mmap_mutex);
event->cpu = cpu;
+ event->pid = pid;
event->attr = *attr;
event->group_leader = group_leader;
event->pmu = NULL;
@@ -5015,7 +5017,7 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_put_context;
}
- event = perf_event_alloc(&attr, cpu, ctx, group_leader,
+ event = perf_event_alloc(&attr, cpu, pid, ctx, group_leader,
NULL, NULL, GFP_KERNEL);
err = PTR_ERR(event);
if (IS_ERR(event))
@@ -5090,7 +5092,7 @@ perf_event_create_kernel_counter(struct
goto err_exit;
}
- event = perf_event_alloc(attr, cpu, ctx, NULL,
+ event = perf_event_alloc(attr, cpu, pid, ctx, NULL,
NULL, overflow_handler, GFP_KERNEL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
@@ -5142,7 +5144,8 @@ inherit_event(struct perf_event *parent_
parent_event = parent_event->parent;
child_event = perf_event_alloc(&parent_event->attr,
- parent_event->cpu, child_ctx,
+ parent_event->cpu, child->pid,
+ child_ctx,
group_leader, parent_event,
NULL, GFP_KERNEL);
if (IS_ERR(child_event))
Index: linux-2.6.ppc64_test/arch/powerpc/kernel/hw_breakpoint.c
===================================================================
--- linux-2.6.ppc64_test.orig/arch/powerpc/kernel/hw_breakpoint.c
+++ linux-2.6.ppc64_test/arch/powerpc/kernel/hw_breakpoint.c
@@ -221,7 +221,7 @@ void thread_change_pc(struct task_struct
*/
int __kprobes hw_breakpoint_handler(struct die_args *args)
{
- bool is_kernel, is_ptrace_bp = false;
+ bool is_kernel, is_taskbound_bp, is_ptrace_bp = false;
int rc = NOTIFY_STOP;
struct perf_event *bp;
struct pt_regs *regs = args->regs;
@@ -246,6 +246,7 @@ int __kprobes hw_breakpoint_handler(stru
is_kernel = is_kernel_addr(bp->attr.bp_addr);
is_ptrace_bp = (bp->overflow_handler == ptrace_triggered) ?
true : false;
+ is_taskbound_bp = (bp->pid > 0) ? true : false;
/*
* Verify if dar lies within the address range occupied by the symbol
@@ -288,7 +289,14 @@ int __kprobes hw_breakpoint_handler(stru
/* emulate_step() could not execute it, single-step them */
if (stepped == 0) {
regs->msr |= MSR_SE;
- __get_cpu_var(last_hit_bp) = bp;
+ /*
+ * Kernel-space addresses can also be bound to a task. If so,
+ * store the breakpoint in its 'thread_struct'
+ */
+ if (is_taskbound_bp)
+ bp->ctx->task->thread.last_hit_ubp = bp;
+ else
+ __get_cpu_var(last_hit_bp) = bp;
goto out;
}
/*
@@ -310,17 +318,17 @@ out:
int __kprobes single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
- struct perf_event *bp = NULL, *kernel_bp, *user_bp;
+ struct perf_event *bp = NULL, *kernel_bp, *per_task_bp;
struct arch_hw_breakpoint *bp_info;
/*
* Identify the cause of single-stepping and find the corresponding
* breakpoint structure
*/
- user_bp = current->thread.last_hit_ubp;
+ per_task_bp = current->thread.last_hit_ubp;
kernel_bp = __get_cpu_var(last_hit_bp);
- if (user_bp) {
- bp = user_bp;
+ if (per_task_bp) {
+ bp = per_task_bp;
current->thread.last_hit_ubp = NULL;
} else if (kernel_bp) {
bp = kernel_bp;
@@ -348,7 +356,9 @@ int __kprobes single_step_dabr_instructi
* for kernel-space breakpoints, so this cannot work along with other
* debuggers (like KGDB, xmon) which may be single-stepping kernel code.
*/
- if (!(user_bp && test_thread_flag(TIF_SINGLESTEP)))
+ if (!(per_task_bp &&
+ (!is_kernel_addr(bp->attr.bp_addr)) &&
+ test_thread_flag(TIF_SINGLESTEP)))
regs->msr &= ~MSR_SE;
set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
^ permalink raw reply [flat|nested] 6+ messages in thread