* [Qemu-devel] [RFC 1/7] cpu: move break/watchpoints into arrays.
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
@ 2016-06-17 16:33 ` Alex Bennée
2016-06-17 17:03 ` Paolo Bonzini
2016-06-17 16:33 ` [Qemu-devel] [RFC 2/7] exec: keep CPUWatchpoint references internal Alex Bennée
` (7 subsequent siblings)
8 siblings, 1 reply; 23+ messages in thread
From: Alex Bennée @ 2016-06-17 16:33 UTC (permalink / raw)
To: mttcg, qemu-devel, fred.konrad, a.rigo, serge.fdrv, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana, Alex Bennée, Peter Crosthwaite, Riku Voipio,
Eduardo Habkost, Michael Walle, open list:ARM
Before we can protect the lists we need a structure a little more
amenable to RCU protection. This moves all the lists into a re-sizeable
array. The array still only points to allocated structures because a
number of the architectures still need to look at the results of a hit
by examining the field.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
cpu-exec.c | 6 +-
exec.c | 167 ++++++++++++++++++++++++++++++---------------
include/qom/cpu.h | 22 +++---
linux-user/main.c | 22 +++---
qom/cpu.c | 2 -
target-arm/translate-a64.c | 6 +-
target-arm/translate.c | 6 +-
target-i386/bpt_helper.c | 6 +-
target-lm32/helper.c | 6 +-
9 files changed, 157 insertions(+), 86 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index b840e1d..2b49337 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -388,9 +388,11 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
+ int i;
- if (!cpu->watchpoint_hit) {
- QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
+ if (!cpu->watchpoint_hit && cpu->watchpoints) {
+ for (i = 0; i < cpu->watchpoints->len; i++) {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
diff --git a/exec.c b/exec.c
index 0122ef7..e73c909 100644
--- a/exec.c
+++ b/exec.c
@@ -769,17 +769,22 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
return -EINVAL;
}
- wp = g_malloc(sizeof(*wp));
+ /* Allocate if no previous watchpoints */
+ if (!cpu->watchpoints) {
+ cpu->watchpoints = g_array_new(false, false, sizeof(CPUWatchpoint *));
+ }
+
+ wp = g_malloc(sizeof(*wp));
wp->vaddr = addr;
wp->len = len;
wp->flags = flags;
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB) {
- QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
+ g_array_prepend_val(cpu->watchpoints, wp);
} else {
- QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
+ g_array_append_val(cpu->watchpoints, wp);
}
tlb_flush_page(cpu, addr);
@@ -794,13 +799,17 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags)
{
CPUWatchpoint *wp;
+ int i = 0;
- QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (addr == wp->vaddr && len == wp->len
+ if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
+ do {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ if (wp && addr == wp->vaddr && len == wp->len
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
- cpu_watchpoint_remove_by_ref(cpu, wp);
- return 0;
- }
+ cpu_watchpoint_remove_by_ref(cpu, wp);
+ return 0;
+ }
+ } while (i++ < cpu->watchpoints->len);
}
return -ENOENT;
}
@@ -808,7 +817,18 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
/* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
{
- QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
+ CPUWatchpoint *wp;
+ int i;
+
+ g_assert(cpu->watchpoints);
+
+ for (i = 0; i < cpu->watchpoints->len; i++) {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ if (wp == watchpoint) {
+ g_array_remove_index_fast(cpu->watchpoints, i);
+ break;
+ }
+ }
tlb_flush_page(cpu, watchpoint->vaddr);
@@ -818,11 +838,15 @@ void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
/* Remove all matching watchpoints. */
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
- CPUWatchpoint *wp, *next;
+ CPUWatchpoint *wp;
+ int i;
- QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
- if (wp->flags & mask) {
- cpu_watchpoint_remove_by_ref(cpu, wp);
+ if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
+ for (i = cpu->watchpoints->len; i == 0; i--) {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ if (wp->flags & mask) {
+ cpu_watchpoint_remove_by_ref(cpu, wp);
+ }
}
}
}
@@ -855,6 +879,11 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
{
CPUBreakpoint *bp;
+ /* Allocate if no previous breakpoints */
+ if (!cpu->breakpoints) {
+ cpu->breakpoints = g_array_new(false, false, sizeof(CPUBreakpoint *));
+ }
+
bp = g_malloc(sizeof(*bp));
bp->pc = pc;
@@ -862,9 +891,9 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB) {
- QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
+ g_array_prepend_val(cpu->breakpoints, bp);
} else {
- QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
+ g_array_append_val(cpu->breakpoints, bp);
}
breakpoint_invalidate(cpu, pc);
@@ -879,8 +908,12 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{
CPUBreakpoint *bp;
+ int i;
+
+ g_assert(cpu->breakpoints);
- QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
+ for (i = 0; i < cpu->breakpoints->len; i++) {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(cpu, bp);
return 0;
@@ -892,7 +925,16 @@ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
/* Remove a specific breakpoint by reference. */
void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
{
- QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
+ CPUBreakpoint *bp;
+ int i;
+
+ for (i = 0; i < cpu->breakpoints->len; i++) {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ if (bp == breakpoint) {
+ g_array_remove_index_fast(cpu->breakpoints, i);
+ break;
+ }
+ }
breakpoint_invalidate(cpu, breakpoint->pc);
@@ -902,11 +944,15 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
/* Remove all matching breakpoints. */
void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
{
- CPUBreakpoint *bp, *next;
+ CPUBreakpoint *bp;
+ int i;
- QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
- if (bp->flags & mask) {
- cpu_breakpoint_remove_by_ref(cpu, bp);
+ if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ for (i = cpu->breakpoints->len; i == 0; i--) {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ if (bp->flags & mask) {
+ cpu_breakpoint_remove_by_ref(cpu, bp);
+ }
}
}
}
@@ -1068,7 +1114,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
target_ulong *address)
{
hwaddr iotlb;
- CPUWatchpoint *wp;
if (memory_region_is_ram(section->mr)) {
/* Normal RAM. */
@@ -1088,13 +1133,18 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
- /* Avoid trapping reads of pages with a write breakpoint. */
- if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
- iotlb = PHYS_SECTION_WATCH + paddr;
- *address |= TLB_MMIO;
- break;
+ if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
+ CPUWatchpoint *wp;
+ int i;
+ for (i = 0; i < cpu->watchpoints->len; i++) {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
+ /* Avoid trapping reads of pages with a write breakpoint. */
+ if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
+ iotlb = PHYS_SECTION_WATCH + paddr;
+ *address |= TLB_MMIO;
+ break;
+ }
}
}
}
@@ -2055,7 +2105,6 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr;
- CPUWatchpoint *wp;
uint32_t cpu_flags;
if (cpu->watchpoint_hit) {
@@ -2066,35 +2115,41 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
return;
}
vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- if (cpu_watchpoint_address_matches(wp, vaddr, len)
- && (wp->flags & flags)) {
- if (flags == BP_MEM_READ) {
- wp->flags |= BP_WATCHPOINT_HIT_READ;
- } else {
- wp->flags |= BP_WATCHPOINT_HIT_WRITE;
- }
- wp->hitaddr = vaddr;
- wp->hitattrs = attrs;
- if (!cpu->watchpoint_hit) {
- if (wp->flags & BP_CPU &&
- !cc->debug_check_watchpoint(cpu, wp)) {
- wp->flags &= ~BP_WATCHPOINT_HIT;
- continue;
- }
- cpu->watchpoint_hit = wp;
- tb_check_watchpoint(cpu);
- if (wp->flags & BP_STOP_BEFORE_ACCESS) {
- cpu->exception_index = EXCP_DEBUG;
- cpu_loop_exit(cpu);
+
+ if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
+ CPUWatchpoint *wp;
+ int i;
+ for (i = 0; i < cpu->watchpoints->len; i++) {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ if (cpu_watchpoint_address_matches(wp, vaddr, len)
+ && (wp->flags & flags)) {
+ if (flags == BP_MEM_READ) {
+ wp->flags |= BP_WATCHPOINT_HIT_READ;
} else {
- cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
- tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
- cpu_loop_exit_noexc(cpu);
+ wp->flags |= BP_WATCHPOINT_HIT_WRITE;
+ }
+ wp->hitaddr = vaddr;
+ wp->hitattrs = attrs;
+ if (!cpu->watchpoint_hit) {
+ if (wp->flags & BP_CPU &&
+ !cc->debug_check_watchpoint(cpu, wp)) {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
+ continue;
+ }
+ cpu->watchpoint_hit = wp;
+ tb_check_watchpoint(cpu);
+ if (wp->flags & BP_STOP_BEFORE_ACCESS) {
+ cpu->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cpu);
+ } else {
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
+ tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
+ cpu_loop_exit_noexc(cpu);
+ }
}
+ } else {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
}
- } else {
- wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 32f3af3..820a56d 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -204,7 +204,6 @@ typedef struct icount_decr_u16 {
typedef struct CPUBreakpoint {
vaddr pc;
int flags; /* BP_* */
- QTAILQ_ENTRY(CPUBreakpoint) entry;
} CPUBreakpoint;
struct CPUWatchpoint {
@@ -213,7 +212,6 @@ struct CPUWatchpoint {
vaddr hitaddr;
MemTxAttrs hitattrs;
int flags; /* BP_* */
- QTAILQ_ENTRY(CPUWatchpoint) entry;
};
struct KVMState;
@@ -321,10 +319,13 @@ struct CPUState {
int gdb_num_g_regs;
QTAILQ_ENTRY(CPUState) node;
- /* ice debug support */
- QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
-
- QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
+ /* Debugging support:
+ *
+ * Both the gdbstub and architectural debug support will add
+ * references to these arrays.
+ */
+ GArray *breakpoints;
+ GArray *watchpoints;
CPUWatchpoint *watchpoint_hit;
void *opaque;
@@ -823,10 +824,11 @@ void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
/* Return true if PC matches an installed breakpoint. */
static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
{
- CPUBreakpoint *bp;
-
- if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
- QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
+ if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ CPUBreakpoint *bp;
+ int i;
+ for (i = 0; i < cpu->breakpoints->len; i++) {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
if (bp->pc == pc && (bp->flags & mask)) {
return true;
}
diff --git a/linux-user/main.c b/linux-user/main.c
index b9a4e0e..84a1ede 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3802,8 +3802,6 @@ CPUArchState *cpu_copy(CPUArchState *env)
CPUState *cpu = ENV_GET_CPU(env);
CPUState *new_cpu = cpu_init(cpu_model);
CPUArchState *new_env = new_cpu->env_ptr;
- CPUBreakpoint *bp;
- CPUWatchpoint *wp;
/* Reset non arch specific state */
cpu_reset(new_cpu);
@@ -3813,13 +3811,21 @@ CPUArchState *cpu_copy(CPUArchState *env)
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
- QTAILQ_INIT(&new_cpu->breakpoints);
- QTAILQ_INIT(&new_cpu->watchpoints);
- QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
- cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
+ if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ CPUBreakpoint *bp;
+ int i;
+ for (i = 0; i < cpu->breakpoints->len; i++) {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
+ }
}
- QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
- cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
+ if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
+ CPUWatchpoint *wp;
+ int i;
+ for (i = 0; i < cpu->watchpoints->len; i++) {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
+ }
}
return new_env;
diff --git a/qom/cpu.c b/qom/cpu.c
index 751e992..7943194 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -328,8 +328,6 @@ static void cpu_common_initfn(Object *obj)
cpu->cpu_index = -1;
cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
qemu_mutex_init(&cpu->work_mutex);
- QTAILQ_INIT(&cpu->breakpoints);
- QTAILQ_INIT(&cpu->watchpoints);
}
static void cpu_common_finalize(Object *obj)
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index f5e29d2..597e973 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -11201,9 +11201,11 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
tcg_gen_insn_start(dc->pc, 0, 0);
num_insns++;
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ if (unlikely(cs->breakpoints) && unlikely(cs->breakpoints->len)) {
CPUBreakpoint *bp;
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ int i;
+ for (i = 0; i < cs->breakpoints->len; i++) {
+ bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
if (bp->pc == dc->pc) {
if (bp->flags & BP_CPU) {
gen_a64_set_pc_im(dc->pc);
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 3e71467..4e20ab5 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -11783,9 +11783,11 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
}
#endif
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ if (unlikely(cs->breakpoints) && unlikely(cs->breakpoints->len)) {
CPUBreakpoint *bp;
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ int i;
+ for (i = 0; i < cs->breakpoints->len; i++) {
+ bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
if (bp->pc == dc->pc) {
if (bp->flags & BP_CPU) {
gen_set_condexec(dc);
diff --git a/target-i386/bpt_helper.c b/target-i386/bpt_helper.c
index 6fd7fe0..84adf71 100644
--- a/target-i386/bpt_helper.c
+++ b/target-i386/bpt_helper.c
@@ -210,7 +210,6 @@ void breakpoint_handler(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- CPUBreakpoint *bp;
if (cs->watchpoint_hit) {
if (cs->watchpoint_hit->flags & BP_CPU) {
@@ -222,7 +221,10 @@ void breakpoint_handler(CPUState *cs)
}
}
} else {
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ CPUBreakpoint *bp;
+ int i;
+ for (i = 0; i < cs->breakpoints->len; i++) {
+ bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, true);
diff --git a/target-lm32/helper.c b/target-lm32/helper.c
index 891da18..ff61a1f 100644
--- a/target-lm32/helper.c
+++ b/target-lm32/helper.c
@@ -133,7 +133,6 @@ void lm32_debug_excp_handler(CPUState *cs)
{
LM32CPU *cpu = LM32_CPU(cs);
CPULM32State *env = &cpu->env;
- CPUBreakpoint *bp;
if (cs->watchpoint_hit) {
if (cs->watchpoint_hit->flags & BP_CPU) {
@@ -145,7 +144,10 @@ void lm32_debug_excp_handler(CPUState *cs)
}
}
} else {
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ CPUBreakpoint *bp;
+ int i;
+ for (i = 0; i < cs->breakpoints->len; i++) {
+ bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
if (bp->pc == env->pc) {
if (bp->flags & BP_CPU) {
raise_exception(env, EXCP_BREAKPOINT);
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 1/7] cpu: move break/watchpoints into arrays.
2016-06-17 16:33 ` [Qemu-devel] [RFC 1/7] cpu: move break/watchpoints into arrays Alex Bennée
@ 2016-06-17 17:03 ` Paolo Bonzini
0 siblings, 0 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-17 17:03 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo,
serge.fdrv, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana,
Peter Crosthwaite, Riku Voipio, Eduardo Habkost, Michael Walle,
open list:ARM
On 17/06/2016 18:33, Alex Bennée wrote:
> Before we can protect the lists we need a structure a little more
> amenable to RCU protection. This moves all the lists into a re-sizeable
> array. The array still only points to allocated structures because a
> number of the architectures still need to look at the results of a hit
> by examining the field.
>
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> cpu-exec.c | 6 +-
> exec.c | 167 ++++++++++++++++++++++++++++++---------------
Can you look into moving this to cpu-exec.c? (or cpu-exec-common.c
perhaps?) It's TCG only, so it doesn't really belong in exec.c if we
can help it.
Paolo
> include/qom/cpu.h | 22 +++---
> linux-user/main.c | 22 +++---
> qom/cpu.c | 2 -
> target-arm/translate-a64.c | 6 +-
> target-arm/translate.c | 6 +-
> target-i386/bpt_helper.c | 6 +-
> target-lm32/helper.c | 6 +-
> 9 files changed, 157 insertions(+), 86 deletions(-)
^ permalink raw reply [flat|nested] 23+ messages in thread
* [Qemu-devel] [RFC 2/7] exec: keep CPUWatchpoint references internal
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
2016-06-17 16:33 ` [Qemu-devel] [RFC 1/7] cpu: move break/watchpoints into arrays Alex Bennée
@ 2016-06-17 16:33 ` Alex Bennée
2016-06-17 17:17 ` Paolo Bonzini
2016-06-17 16:33 ` [Qemu-devel] [RFC 3/7] exec: keep CPUBreakpoint " Alex Bennée
` (6 subsequent siblings)
8 siblings, 1 reply; 23+ messages in thread
From: Alex Bennée @ 2016-06-17 16:33 UTC (permalink / raw)
To: mttcg, qemu-devel, fred.konrad, a.rigo, serge.fdrv, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana, Alex Bennée, Peter Crosthwaite, Riku Voipio,
Eduardo Habkost, Michael Walle, Alexander Graf, Max Filippov,
open list:ARM
In preparation for the conversion to an RCU controlled list of
watchpoints I've removed all $ARCH local copies of the watchpoint
structures. They can be accessed with cpu_watchpoint_get_by_ref() which
will eventually offer them for the lifetime of the rcu_read_lock().
Instead of using pointers as handles to architecture specific registers
they now use plain integer references.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
exec.c | 121 ++++++++++++++++++++++++++++++++--------------
gdbstub.c | 2 +-
include/qom/cpu.h | 63 ++++++++++++++++++++++--
linux-user/main.c | 2 +-
target-arm/cpu.h | 1 -
target-arm/helper.c | 12 ++---
target-arm/op_helper.c | 4 +-
target-i386/bpt_helper.c | 25 +++++-----
target-i386/cpu.h | 3 +-
target-lm32/cpu.h | 1 -
target-lm32/helper.c | 15 ++----
target-s390x/helper.c | 10 ++--
target-xtensa/cpu.h | 3 --
target-xtensa/helper.c | 4 +-
target-xtensa/op_helper.c | 16 ++----
15 files changed, 177 insertions(+), 105 deletions(-)
diff --git a/exec.c b/exec.c
index e73c909..3dc3332 100644
--- a/exec.c
+++ b/exec.c
@@ -747,21 +747,42 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
return -ENOSYS;
}
-void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
+int cpu_watchpoint_remove_by_ref(CPUState *cpu, int ref)
{
+ return -ENOENT;
}
-int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
- int flags, CPUWatchpoint **watchpoint)
+int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, int ref)
+{
+ return -ENOSYS;
+}
+int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags)
{
return -ENOSYS;
}
+CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
+{
+ return NULL;
+}
#else
+/* Find watchpoint with external reference */
+CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
+{
+ CPUWatchpoint *wp = NULL;
+ int i = 0;
+ do {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i++);
+ } while (wp && wp->ref != ref);
+
+ return wp;
+}
+
/* Add a watchpoint. */
-int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
- int flags, CPUWatchpoint **watchpoint)
+int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, int ref)
{
- CPUWatchpoint *wp;
+ CPUWatchpoint *wp = NULL;
/* forbid ranges which are empty or run off the end of the address space */
if (len == 0 || (addr + len - 1) < addr) {
@@ -772,28 +793,55 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
/* Allocate if no previous watchpoints */
if (!cpu->watchpoints) {
- cpu->watchpoints = g_array_new(false, false, sizeof(CPUWatchpoint *));
+ cpu->watchpoints = g_array_new(true, false, sizeof(CPUWatchpoint *));
}
- wp = g_malloc(sizeof(*wp));
- wp->vaddr = addr;
- wp->len = len;
- wp->flags = flags;
+ /* Find old watchpoint */
+ if (ref != WP_NOREF) {
+ wp = cpu_watchpoint_get_by_ref(cpu, ref);
+ }
- /* keep all GDB-injected watchpoints in front */
- if (flags & BP_GDB) {
- g_array_prepend_val(cpu->watchpoints, wp);
+ if (wp) {
+ wp->vaddr = addr;
+ wp->len = len;
+ wp->flags = flags;
+ wp->ref = ref;
} else {
- g_array_append_val(cpu->watchpoints, wp);
+ wp = g_malloc(sizeof(*wp));
+
+ wp->vaddr = addr;
+ wp->len = len;
+ wp->flags = flags;
+ wp->ref = ref;
+
+ /* keep all GDB-injected watchpoints in front */
+ if (flags & BP_GDB) {
+ g_array_prepend_val(cpu->watchpoints, wp);
+ } else {
+ g_array_append_val(cpu->watchpoints, wp);
+ }
}
+
tlb_flush_page(cpu, addr);
- if (watchpoint)
- *watchpoint = wp;
return 0;
}
+int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags)
+{
+ return cpu_watchpoint_insert_with_ref(cpu, addr, len, flags, WP_NOREF);
+}
+
+static void cpu_watchpoint_delete(CPUState *cpu, int index)
+{
+ CPUWatchpoint *wp;
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, index);
+ g_array_remove_index(cpu->watchpoints, index);
+ tlb_flush_page(cpu, wp->vaddr);
+ g_free(wp);
+}
+
/* Remove a specific watchpoint. */
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags)
@@ -806,48 +854,49 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
if (wp && addr == wp->vaddr && len == wp->len
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
- cpu_watchpoint_remove_by_ref(cpu, wp);
+ cpu_watchpoint_delete(cpu, i);
return 0;
}
} while (i++ < cpu->watchpoints->len);
}
+
return -ENOENT;
}
-/* Remove a specific watchpoint by reference. */
-void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
+/* Remove a specific watchpoint by external reference. */
+int cpu_watchpoint_remove_by_ref(CPUState *cpu, int ref)
{
CPUWatchpoint *wp;
- int i;
-
- g_assert(cpu->watchpoints);
+ int i = 0;
- for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
- if (wp == watchpoint) {
- g_array_remove_index_fast(cpu->watchpoints, i);
- break;
- }
+ if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
+ do {
+ wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ if (wp && wp->ref == ref) {
+ cpu_watchpoint_delete(cpu, i);
+ return 0;
+ }
+ } while (wp && i++ < cpu->watchpoints->len);
}
- tlb_flush_page(cpu, watchpoint->vaddr);
-
- g_free(watchpoint);
+ return -ENOENT;
}
/* Remove all matching watchpoints. */
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
CPUWatchpoint *wp;
- int i;
if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
- for (i = cpu->watchpoints->len; i == 0; i--) {
+ int i = cpu->watchpoints->len;
+ do {
wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
if (wp->flags & mask) {
- cpu_watchpoint_remove_by_ref(cpu, wp);
+ cpu_watchpoint_delete(cpu, i);
+ } else {
+ i--;
}
- }
+ } while (cpu->watchpoints->len && i >= 0);
}
}
diff --git a/gdbstub.c b/gdbstub.c
index 5da66f1..548144e 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -688,7 +688,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
case GDB_WATCHPOINT_ACCESS:
CPU_FOREACH(cpu) {
err = cpu_watchpoint_insert(cpu, addr, len,
- xlat_gdb_type(cpu, type), NULL);
+ xlat_gdb_type(cpu, type));
if (err) {
break;
}
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 820a56d..81edfdb 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -206,12 +206,15 @@ typedef struct CPUBreakpoint {
int flags; /* BP_* */
} CPUBreakpoint;
+#define WP_NOREF -1
+
struct CPUWatchpoint {
vaddr vaddr;
vaddr len;
vaddr hitaddr;
MemTxAttrs hitattrs;
int flags; /* BP_* */
+ int ref; /* reference or WP_NOREF */
};
struct KVMState;
@@ -837,11 +840,61 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
return false;
}
-int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
- int flags, CPUWatchpoint **watchpoint);
-int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
- vaddr len, int flags);
-void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
+/**
+ * cpu_watchpoint_insert:
+ *
+ * @cpu: CPU to monitor
+ * @addr: address of watchpoint
+ * @len: length of watched region
+ * @flags: accesss/type flags
+ *
+ * Watchpoints added this way can only be removed by resetting all
+ * watchpoints.
+ */
+int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags);
+
+/**
+ * cpu_watchpoint_insert_with_ref:
+ *
+ * @cpu: CPU to monitor
+ * @addr: address of watchpoint
+ * @len: length of watched region
+ * @flags: accesss/type flags
+ * @ref: unique reference
+ *
+ * Inserting a watchpoint with a matching reference will replace any
+ * current watchpoint with the same reference.
+ */
+int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, int ref);
+
+/**
+ * cpu_watchpoint_get_by_ref:
+ *
+ * @cpu: CPU to monitor
+ * @ref: unique reference
+ *
+ * @return: a pointer to working copy of the watchpoint.
+ *
+ * Return a working copy of the current referenced watchpoint. This
+ * obviously only works for watchpoints inserted with a reference. The
+ * lifetime of this objected will be limited and should not be kept
+ * beyond its immediate use. Otherwise return NULL.
+ */
+CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref);
+
+/**
+ * cpu_watchpoint_remove_by_ref:
+ *
+ * @cpu: CPU to monitor
+ * @ref: unique reference
+ *
+ * Remove a referenced watchpoint
+ */
+int cpu_watchpoint_remove_by_ref(CPUState *cpu, int ref);
+int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, int flags);
+
+
void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
/**
diff --git a/linux-user/main.c b/linux-user/main.c
index 84a1ede..2d65508 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3824,7 +3824,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
int i;
for (i = 0; i < cpu->watchpoints->len; i++) {
wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
- cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
+ cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags);
}
}
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 942aa36..4d04a76 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -494,7 +494,6 @@ typedef struct CPUARMState {
#endif
struct CPUBreakpoint *cpu_breakpoint[16];
- struct CPUWatchpoint *cpu_watchpoint[16];
CPU_COMMON
diff --git a/target-arm/helper.c b/target-arm/helper.c
index c9730d6..50d70ce 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -3942,13 +3942,10 @@ void hw_watchpoint_update(ARMCPU *cpu, int n)
int mask;
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
- if (env->cpu_watchpoint[n]) {
- cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
- env->cpu_watchpoint[n] = NULL;
- }
if (!extract64(wcr, 0, 1)) {
/* E bit clear : watchpoint disabled */
+ cpu_watchpoint_remove_by_ref(CPU(cpu), n);
return;
}
@@ -4012,22 +4009,19 @@ void hw_watchpoint_update(ARMCPU *cpu, int n)
wvr += basstart;
}
- cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
- &env->cpu_watchpoint[n]);
+ cpu_watchpoint_insert_with_ref(CPU(cpu), wvr, len, flags, n);
}
void hw_watchpoint_update_all(ARMCPU *cpu)
{
int i;
- CPUARMState *env = &cpu->env;
/* Completely clear out existing QEMU watchpoints and our array, to
* avoid possible stale entries following migration load.
*/
cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
- memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
- for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
+ for (i = 0; i < 16; i++) {
hw_watchpoint_update(cpu, i);
}
}
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 35912a1..02d6265 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -1056,7 +1056,7 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
int access_el = arm_current_el(env);
if (is_wp) {
- CPUWatchpoint *wp = env->cpu_watchpoint[n];
+ CPUWatchpoint *wp = cpu_watchpoint_get_by_ref(CPU(cpu), n);
if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
return false;
@@ -1153,7 +1153,7 @@ static bool check_watchpoints(ARMCPU *cpu)
return false;
}
- for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
+ for (n = 0; n < 16; n++) {
if (bp_wp_matches(cpu, n, true)) {
return true;
}
diff --git a/target-i386/bpt_helper.c b/target-i386/bpt_helper.c
index 84adf71..1793e17 100644
--- a/target-i386/bpt_helper.c
+++ b/target-i386/bpt_helper.c
@@ -73,19 +73,17 @@ static int hw_breakpoint_insert(CPUX86State *env, int index)
case DR7_TYPE_DATA_WR:
if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_watchpoint_insert(cs, drN,
- hw_breakpoint_len(dr7, index),
- BP_CPU | BP_MEM_WRITE,
- &env->cpu_watchpoint[index]);
+ err = cpu_watchpoint_insert_with_ref(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_WRITE, index);
}
break;
case DR7_TYPE_DATA_RW:
if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_watchpoint_insert(cs, drN,
- hw_breakpoint_len(dr7, index),
- BP_CPU | BP_MEM_ACCESS,
- &env->cpu_watchpoint[index]);
+ err = cpu_watchpoint_insert_with_ref(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_ACCESS, index);
}
break;
}
@@ -109,10 +107,7 @@ static void hw_breakpoint_remove(CPUX86State *env, int index)
case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW:
- if (env->cpu_breakpoint[index]) {
- cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
- env->cpu_breakpoint[index] = NULL;
- }
+ cpu_watchpoint_remove_by_ref(cs, index);
break;
case DR7_TYPE_IO_RW:
@@ -183,11 +178,13 @@ static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
break;
case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW:
- if (env->cpu_watchpoint[reg] &&
- env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
+ {
+ CPUWatchpoint *wp = cpu_watchpoint_get_by_ref(CPU(env), reg);
+ if (wp && wp->flags & BP_WATCHPOINT_HIT) {
wp_match = true;
}
break;
+ }
case DR7_TYPE_IO_RW:
break;
}
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index d9ab884..87f62be 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1059,8 +1059,7 @@ typedef struct CPUX86State {
target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
union {
struct CPUBreakpoint *cpu_breakpoint[4];
- struct CPUWatchpoint *cpu_watchpoint[4];
- }; /* break/watchpoints for dr[0..3] */
+ }; /* breakpoints for dr[0..3] */
int old_exception; /* exception in flight */
uint64_t vm_vmcb;
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index 62880f7..1512119 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -163,7 +163,6 @@ struct CPULM32State {
uint32_t wp[4]; /* watchpoints */
struct CPUBreakpoint *cpu_breakpoint[4];
- struct CPUWatchpoint *cpu_watchpoint[4];
CPU_COMMON
diff --git a/target-lm32/helper.c b/target-lm32/helper.c
index ff61a1f..8c6ae52 100644
--- a/target-lm32/helper.c
+++ b/target-lm32/helper.c
@@ -98,21 +98,14 @@ void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address,
}
if (flags != 0) {
- cpu_watchpoint_insert(CPU(cpu), address, 1, flags,
- &env->cpu_watchpoint[idx]);
+ cpu_watchpoint_insert_with_ref(CPU(cpu), address, 1, flags, idx);
}
}
void lm32_watchpoint_remove(CPULM32State *env, int idx)
{
LM32CPU *cpu = lm32_env_get_cpu(env);
-
- if (!env->cpu_watchpoint[idx]) {
- return;
- }
-
- cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[idx]);
- env->cpu_watchpoint[idx] = NULL;
+ cpu_watchpoint_remove_by_ref(CPU(cpu), idx);
}
static bool check_watchpoints(CPULM32State *env)
@@ -121,8 +114,8 @@ static bool check_watchpoints(CPULM32State *env)
int i;
for (i = 0; i < cpu->num_watchpoints; i++) {
- if (env->cpu_watchpoint[i] &&
- env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
+ CPUWatchpoint *wp = cpu_watchpoint_get_by_ref(CPU(cpu), i);
+ if (wp && wp->flags & BP_WATCHPOINT_HIT) {
return true;
}
}
diff --git a/target-s390x/helper.c b/target-s390x/helper.c
index 9a744df..cfa2b20 100644
--- a/target-s390x/helper.c
+++ b/target-s390x/helper.c
@@ -646,19 +646,19 @@ void s390_cpu_recompute_watchpoints(CPUState *cs)
if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
/* We can't create a watchoint spanning the whole memory range, so
split it in two parts. */
- cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
- cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
+ cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags);
+ cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags);
} else if (env->cregs[10] > env->cregs[11]) {
/* The address range loops, create two watchpoints. */
cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
- wp_flags, NULL);
- cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
+ wp_flags);
+ cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags);
} else {
/* Default case, create a single watchpoint. */
cpu_watchpoint_insert(cs, env->cregs[10],
env->cregs[11] - env->cregs[10] + 1,
- wp_flags, NULL);
+ wp_flags);
}
}
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index 442176a..5d1ea4a 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -374,9 +374,6 @@ typedef struct CPUXtensaState {
int exception_taken;
- /* Watchpoints for DBREAK registers */
- struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
-
CPU_COMMON
} CPUXtensaState;
diff --git a/target-xtensa/helper.c b/target-xtensa/helper.c
index 768b32c..9505701 100644
--- a/target-xtensa/helper.c
+++ b/target-xtensa/helper.c
@@ -86,8 +86,8 @@ static uint32_t check_hw_breakpoints(CPUXtensaState *env)
unsigned i;
for (i = 0; i < env->config->ndbreak; ++i) {
- if (env->cpu_watchpoint[i] &&
- env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
+ CPUWatchpoint *wp = cpu_watchpoint_get_by_ref(CPU(env), i);
+ if (wp && wp->flags & BP_WATCHPOINT_HIT) {
return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
}
}
diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
index bc3667f..01656b2 100644
--- a/target-xtensa/op_helper.c
+++ b/target-xtensa/op_helper.c
@@ -797,9 +797,6 @@ static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
uint32_t mask = dbreakc | ~DBREAKC_MASK;
- if (env->cpu_watchpoint[i]) {
- cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
- }
if (dbreakc & DBREAKC_SB) {
flags |= BP_MEM_WRITE;
}
@@ -812,9 +809,8 @@ static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
/* cut mask after the first zero bit */
mask = 0xffffffff << (32 - clo32(mask));
}
- if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
- flags, &env->cpu_watchpoint[i])) {
- env->cpu_watchpoint[i] = NULL;
+ if (cpu_watchpoint_insert_with_ref(cs, dbreaka & mask, ~mask + 1,
+ flags, i)) {
qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
dbreaka & mask, ~mask + 1);
}
@@ -837,12 +833,8 @@ void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
if (v & DBREAKC_SB_LB) {
set_dbreak(env, i, env->sregs[DBREAKA + i], v);
} else {
- if (env->cpu_watchpoint[i]) {
- CPUState *cs = CPU(xtensa_env_get_cpu(env));
-
- cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
- env->cpu_watchpoint[i] = NULL;
- }
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ cpu_watchpoint_remove_by_ref(cs, i);
}
}
env->sregs[DBREAKC + i] = v;
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 2/7] exec: keep CPUWatchpoint references internal
2016-06-17 16:33 ` [Qemu-devel] [RFC 2/7] exec: keep CPUWatchpoint references internal Alex Bennée
@ 2016-06-17 17:17 ` Paolo Bonzini
0 siblings, 0 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-17 17:17 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo,
serge.fdrv, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana,
Peter Crosthwaite, Riku Voipio, Eduardo Habkost, Michael Walle,
Alexander Graf, Max Filippov, open list:ARM
On 17/06/2016 18:33, Alex Bennée wrote:
> + wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, index);
Worth adding macros or inline functions cpu_breakpoint_at(cpu, index)
and cpu_watchpoint_at(cpu, index)? This will also be less churn in
patch 4, because the macros will always return CPU{Break,Watch}point *.
(I'm making the suggestion as I skim the patches, but they often apply
earlier in the series. Sorry).
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* [Qemu-devel] [RFC 3/7] exec: keep CPUBreakpoint references internal
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
2016-06-17 16:33 ` [Qemu-devel] [RFC 1/7] cpu: move break/watchpoints into arrays Alex Bennée
2016-06-17 16:33 ` [Qemu-devel] [RFC 2/7] exec: keep CPUWatchpoint references internal Alex Bennée
@ 2016-06-17 16:33 ` Alex Bennée
2016-06-17 16:33 ` [Qemu-devel] [RFC 4/7] break/watchpoints: store inside array Alex Bennée
` (5 subsequent siblings)
8 siblings, 0 replies; 23+ messages in thread
From: Alex Bennée @ 2016-06-17 16:33 UTC (permalink / raw)
To: mttcg, qemu-devel, fred.konrad, a.rigo, serge.fdrv, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana, Alex Bennée, Peter Crosthwaite, Riku Voipio,
Eduardo Habkost, Michael Walle, open list:ARM
In preparation for the conversion to an RCU controlled list of
breakpoints I've removed all $ARCH local references to breakpoint
structures. They can be accessed with cpu_breakpoint_get_by_ref() which
will eventually offer them for the lifetime of the rcu_read_lock().
Instead of using pointers as handles to architecture specific registers
they now use plain integer references.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
exec.c | 116 +++++++++++++++++++++++++++++++----------------
gdbstub.c | 2 +-
include/qom/cpu.h | 59 +++++++++++++++++++++---
linux-user/main.c | 2 +-
target-arm/cpu.h | 2 -
target-arm/helper.c | 12 ++---
target-arm/op_helper.c | 6 +--
target-i386/bpt_helper.c | 25 ++++------
target-i386/cpu.h | 3 --
target-lm32/cpu.h | 2 -
target-lm32/helper.c | 10 +---
11 files changed, 148 insertions(+), 91 deletions(-)
diff --git a/exec.c b/exec.c
index 3dc3332..e80c9fe 100644
--- a/exec.c
+++ b/exec.c
@@ -797,7 +797,7 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
}
/* Find old watchpoint */
- if (ref != WP_NOREF) {
+ if (ref != BPWP_NOREF) {
wp = cpu_watchpoint_get_by_ref(cpu, ref);
}
@@ -830,7 +830,7 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags)
{
- return cpu_watchpoint_insert_with_ref(cpu, addr, len, flags, WP_NOREF);
+ return cpu_watchpoint_insert_with_ref(cpu, addr, len, flags, BPWP_NOREF);
}
static void cpu_watchpoint_delete(CPUState *cpu, int index)
@@ -921,10 +921,20 @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
}
#endif
+/* Find watchpoint with external reference */
+CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
+{
+ CPUBreakpoint *bp = NULL;
+ int i = 0;
+ do {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i++);
+ } while (bp && bp->ref != ref);
+
+ return bp;
+}
/* Add a breakpoint. */
-int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
- CPUBreakpoint **breakpoint)
+int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref)
{
CPUBreakpoint *bp;
@@ -933,76 +943,102 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
cpu->breakpoints = g_array_new(false, false, sizeof(CPUBreakpoint *));
}
- bp = g_malloc(sizeof(*bp));
-
- bp->pc = pc;
- bp->flags = flags;
+ /* Find old watchpoint */
+ if (ref != BPWP_NOREF) {
+ bp = cpu_breakpoint_get_by_ref(cpu, ref);
+ }
- /* keep all GDB-injected breakpoints in front */
- if (flags & BP_GDB) {
- g_array_prepend_val(cpu->breakpoints, bp);
+ if (bp) {
+ bp->pc = pc;
+ bp->flags = flags;
+ bp->ref = ref;
} else {
- g_array_append_val(cpu->breakpoints, bp);
+ bp = g_malloc(sizeof(*bp));
+
+ bp->pc = pc;
+ bp->flags = flags;
+ bp->ref = ref;
+
+ /* keep all GDB-injected breakpoints in front */
+ if (flags & BP_GDB) {
+ g_array_prepend_val(cpu->breakpoints, bp);
+ } else {
+ g_array_append_val(cpu->breakpoints, bp);
+ }
}
breakpoint_invalidate(cpu, pc);
- if (breakpoint) {
- *breakpoint = bp;
- }
return 0;
}
+int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags)
+{
+ return cpu_breakpoint_insert_with_ref(cpu, pc, flags, BPWP_NOREF);
+}
+
+static void cpu_breakpoint_delete(CPUState *cpu, int index)
+{
+ CPUBreakpoint *bp;
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, index);
+ g_array_remove_index(cpu->breakpoints, index);
+ breakpoint_invalidate(cpu, bp->pc);
+ g_free(bp);
+}
+
/* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{
CPUBreakpoint *bp;
- int i;
- g_assert(cpu->breakpoints);
-
- for (i = 0; i < cpu->breakpoints->len; i++) {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
- if (bp->pc == pc && bp->flags == flags) {
- cpu_breakpoint_remove_by_ref(cpu, bp);
- return 0;
- }
+ if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ int i = 0;
+ do {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ if (bp && bp->pc == pc && bp->flags == flags) {
+ cpu_breakpoint_delete(cpu, i);
+ } else {
+ i++;
+ }
+ } while (i < cpu->breakpoints->len);
}
+
return -ENOENT;
}
/* Remove a specific breakpoint by reference. */
-void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
+void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref)
{
CPUBreakpoint *bp;
- int i;
- for (i = 0; i < cpu->breakpoints->len; i++) {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
- if (bp == breakpoint) {
- g_array_remove_index_fast(cpu->breakpoints, i);
- break;
- }
+ if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ int i = 0;
+ do {
+ bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ if (bp && bp->ref == ref) {
+ cpu_breakpoint_delete(cpu, i);
+ } else {
+ i++;
+ }
+ } while (i < cpu->breakpoints->len);
}
-
- breakpoint_invalidate(cpu, breakpoint->pc);
-
- g_free(breakpoint);
}
/* Remove all matching breakpoints. */
void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
{
CPUBreakpoint *bp;
- int i;
if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
- for (i = cpu->breakpoints->len; i == 0; i--) {
+ int i = 0;
+ do {
bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
if (bp->flags & mask) {
- cpu_breakpoint_remove_by_ref(cpu, bp);
+ cpu_breakpoint_delete(cpu, i);
+ } else {
+ i++;
}
- }
+ } while (i < cpu->breakpoints->len);
}
}
diff --git a/gdbstub.c b/gdbstub.c
index 548144e..c73ea42 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -676,7 +676,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
CPU_FOREACH(cpu) {
- err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
+ err = cpu_breakpoint_insert(cpu, addr, BP_GDB);
if (err) {
break;
}
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 81edfdb..e582da0 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -201,20 +201,21 @@ typedef struct icount_decr_u16 {
} icount_decr_u16;
#endif
+#define BPWP_NOREF -1
+
typedef struct CPUBreakpoint {
vaddr pc;
int flags; /* BP_* */
+ int ref; /* reference or WPBP_NOREF */
} CPUBreakpoint;
-#define WP_NOREF -1
-
struct CPUWatchpoint {
vaddr vaddr;
vaddr len;
vaddr hitaddr;
MemTxAttrs hitattrs;
int flags; /* BP_* */
- int ref; /* reference or WP_NOREF */
+ int ref; /* reference or WPBP_NOREF */
};
struct KVMState;
@@ -818,10 +819,56 @@ void cpu_single_step(CPUState *cpu, int enabled);
#define BP_WATCHPOINT_HIT_WRITE 0x80
#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
-int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
- CPUBreakpoint **breakpoint);
+/*
+ * cpu_breakpoint_insert:
+ *
+ * @cpu: CPU to monitor
+ * @pc: address of breakpoint
+ * @flags: accesss/type flags
+ *
+ * Breakpoints added this way can only be removed by resetting all
+ * breakpoints.
+ */
+int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags);
+
+/*
+ * cpu_breakpoint_insert_with_ref:
+ *
+ * @cpu: CPU to monitor
+ * @pc: address of breakpoint
+ * @flags: accesss/type flags
+ *
+ * Inserting a breakpoint with a matching reference will replace any
+ * current breakpoint with the same reference.
+ */
+int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref);
+
+/**
+ * cpu_breakpoint_get_by_ref:
+ *
+ * @cpu: CPU to monitor
+ * @ref: unique reference
+ *
+ * @return: a pointer to working copy of the breakpoint.
+ *
+ * Return a working copy of the current referenced breakpoint. This
+ * obviously only works for breakpoints inserted with a reference. The
+ * lifetime of this objected will be limited and should not be kept
+ * beyond its immediate use. Otherwise return NULL.
+ */
+CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref);
+
+/**
+ * cpu_breakpoint_remove_by_ref:
+ *
+ * @cpu: CPU to monitor
+ * @ref: unique reference
+ *
+ * Remove a referenced breakpoint
+ */
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
-void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
+void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref);
+
void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
/* Return true if PC matches an installed breakpoint. */
diff --git a/linux-user/main.c b/linux-user/main.c
index 2d65508..8f71608 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3816,7 +3816,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
int i;
for (i = 0; i < cpu->breakpoints->len; i++) {
bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
- cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
+ cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags);
}
}
if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 4d04a76..279ae42 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -493,8 +493,6 @@ typedef struct CPUARMState {
int eabi;
#endif
- struct CPUBreakpoint *cpu_breakpoint[16];
-
CPU_COMMON
/* These fields after the common ones so they are preserved on reset. */
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 50d70ce..3c751a4 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -4061,12 +4061,8 @@ void hw_breakpoint_update(ARMCPU *cpu, int n)
int bt;
int flags = BP_CPU;
- if (env->cpu_breakpoint[n]) {
- cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
- env->cpu_breakpoint[n] = NULL;
- }
-
if (!extract64(bcr, 0, 1)) {
+ cpu_breakpoint_remove_by_ref(CPU(cpu), n);
/* E bit clear : watchpoint disabled */
return;
}
@@ -4125,21 +4121,19 @@ void hw_breakpoint_update(ARMCPU *cpu, int n)
return;
}
- cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
+ cpu_breakpoint_insert_with_ref(CPU(cpu), addr, flags, n);
}
void hw_breakpoint_update_all(ARMCPU *cpu)
{
int i;
- CPUARMState *env = &cpu->env;
/* Completely clear out existing QEMU breakpoints and our array, to
* avoid possible stale entries following migration load.
*/
cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
- memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
- for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
+ for (i = 0; i < 16; i++) {
hw_breakpoint_update(cpu, i);
}
}
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 02d6265..2a24720 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -1071,8 +1071,8 @@ static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
}
} else {
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
-
- if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
+ CPUBreakpoint *bp = cpu_breakpoint_get_by_ref(CPU(cpu), n);
+ if (!bp || bp->pc != pc) {
return false;
}
cr = env->cp15.dbgbcr[n];
@@ -1174,7 +1174,7 @@ static bool check_breakpoints(ARMCPU *cpu)
return false;
}
- for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
+ for (n = 0; n < 16; n++) {
if (bp_wp_matches(cpu, n, false)) {
return true;
}
diff --git a/target-i386/bpt_helper.c b/target-i386/bpt_helper.c
index 1793e17..4f2c75f 100644
--- a/target-i386/bpt_helper.c
+++ b/target-i386/bpt_helper.c
@@ -56,13 +56,11 @@ static int hw_breakpoint_insert(CPUX86State *env, int index)
CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong dr7 = env->dr[7];
target_ulong drN = env->dr[index];
- int err = 0;
switch (hw_breakpoint_type(dr7, index)) {
case DR7_TYPE_BP_INST:
if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_breakpoint_insert(cs, drN, BP_CPU,
- &env->cpu_breakpoint[index]);
+ cpu_breakpoint_insert_with_ref(cs, drN, BP_CPU, index);
}
break;
@@ -73,23 +71,21 @@ static int hw_breakpoint_insert(CPUX86State *env, int index)
case DR7_TYPE_DATA_WR:
if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_watchpoint_insert_with_ref(cs, drN,
- hw_breakpoint_len(dr7, index),
- BP_CPU | BP_MEM_WRITE, index);
+ cpu_watchpoint_insert_with_ref(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_WRITE, index);
}
break;
case DR7_TYPE_DATA_RW:
if (hw_breakpoint_enabled(dr7, index)) {
- err = cpu_watchpoint_insert_with_ref(cs, drN,
- hw_breakpoint_len(dr7, index),
- BP_CPU | BP_MEM_ACCESS, index);
+ cpu_watchpoint_insert_with_ref(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_ACCESS, index);
}
break;
}
- if (err) {
- env->cpu_breakpoint[index] = NULL;
- }
+
return 0;
}
@@ -99,10 +95,7 @@ static void hw_breakpoint_remove(CPUX86State *env, int index)
switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST:
- if (env->cpu_breakpoint[index]) {
- cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
- env->cpu_breakpoint[index] = NULL;
- }
+ cpu_breakpoint_remove_by_ref(cs, index);
break;
case DR7_TYPE_DATA_WR:
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 87f62be..fd5eed3 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1057,9 +1057,6 @@ typedef struct CPUX86State {
int exception_is_int;
target_ulong exception_next_eip;
target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
- union {
- struct CPUBreakpoint *cpu_breakpoint[4];
- }; /* breakpoints for dr[0..3] */
int old_exception; /* exception in flight */
uint64_t vm_vmcb;
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index 1512119..8a64acd 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -162,8 +162,6 @@ struct CPULM32State {
uint32_t bp[4]; /* breakpoints */
uint32_t wp[4]; /* watchpoints */
- struct CPUBreakpoint *cpu_breakpoint[4];
-
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
diff --git a/target-lm32/helper.c b/target-lm32/helper.c
index 8c6ae52..029fbc5 100644
--- a/target-lm32/helper.c
+++ b/target-lm32/helper.c
@@ -60,20 +60,14 @@ void lm32_breakpoint_insert(CPULM32State *env, int idx, target_ulong address)
{
LM32CPU *cpu = lm32_env_get_cpu(env);
- cpu_breakpoint_insert(CPU(cpu), address, BP_CPU,
- &env->cpu_breakpoint[idx]);
+ cpu_breakpoint_insert_with_ref(CPU(cpu), address, BP_CPU, idx);
}
void lm32_breakpoint_remove(CPULM32State *env, int idx)
{
LM32CPU *cpu = lm32_env_get_cpu(env);
- if (!env->cpu_breakpoint[idx]) {
- return;
- }
-
- cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[idx]);
- env->cpu_breakpoint[idx] = NULL;
+ cpu_breakpoint_remove_by_ref(CPU(cpu), idx);
}
void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address,
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [Qemu-devel] [RFC 4/7] break/watchpoints: store inside array
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
` (2 preceding siblings ...)
2016-06-17 16:33 ` [Qemu-devel] [RFC 3/7] exec: keep CPUBreakpoint " Alex Bennée
@ 2016-06-17 16:33 ` Alex Bennée
2016-06-17 17:15 ` Paolo Bonzini
2016-06-17 16:33 ` [Qemu-devel] [RFC 5/7] breakpoints: put breakpoints under RCU control Alex Bennée
` (4 subsequent siblings)
8 siblings, 1 reply; 23+ messages in thread
From: Alex Bennée @ 2016-06-17 16:33 UTC (permalink / raw)
To: mttcg, qemu-devel, fred.konrad, a.rigo, serge.fdrv, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana, Alex Bennée, Peter Crosthwaite, Riku Voipio,
Eduardo Habkost, Michael Walle, open list:ARM
Instead of dynamically allocating each break/watchpoint just include in
the array. This will make it easier to use RCU to update the array as
well as make the scanning of the current list more cache-line friendly.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
cpu-exec.c | 2 +-
exec.c | 68 ++++++++++++++++++++++------------------------
include/qom/cpu.h | 2 +-
linux-user/main.c | 4 +--
target-arm/translate-a64.c | 2 +-
target-arm/translate.c | 2 +-
target-i386/bpt_helper.c | 2 +-
target-lm32/helper.c | 2 +-
8 files changed, 40 insertions(+), 44 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index 2b49337..2736a27 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -392,7 +392,7 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
if (!cpu->watchpoint_hit && cpu->watchpoints) {
for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
diff --git a/exec.c b/exec.c
index e80c9fe..c8e8823 100644
--- a/exec.c
+++ b/exec.c
@@ -772,8 +772,8 @@ CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
CPUWatchpoint *wp = NULL;
int i = 0;
do {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i++);
- } while (wp && wp->ref != ref);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i++);
+ } while (i < cpu->watchpoints->len && wp && wp->ref != ref);
return wp;
}
@@ -793,7 +793,7 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
/* Allocate if no previous watchpoints */
if (!cpu->watchpoints) {
- cpu->watchpoints = g_array_new(true, false, sizeof(CPUWatchpoint *));
+ cpu->watchpoints = g_array_new(false, true, sizeof(CPUWatchpoint));
}
/* Find old watchpoint */
@@ -807,18 +807,17 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
wp->flags = flags;
wp->ref = ref;
} else {
- wp = g_malloc(sizeof(*wp));
-
- wp->vaddr = addr;
- wp->len = len;
- wp->flags = flags;
- wp->ref = ref;
+ CPUWatchpoint watch;
+ watch.vaddr = addr;
+ watch.len = len;
+ watch.flags = flags;
+ watch.ref = ref;
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB) {
- g_array_prepend_val(cpu->watchpoints, wp);
+ g_array_prepend_val(cpu->watchpoints, watch);
} else {
- g_array_append_val(cpu->watchpoints, wp);
+ g_array_append_val(cpu->watchpoints, watch);
}
}
@@ -836,10 +835,9 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags)
static void cpu_watchpoint_delete(CPUState *cpu, int index)
{
CPUWatchpoint *wp;
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, index);
- g_array_remove_index(cpu->watchpoints, index);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, index);
tlb_flush_page(cpu, wp->vaddr);
- g_free(wp);
+ g_array_remove_index(cpu->watchpoints, index);
}
/* Remove a specific watchpoint. */
@@ -851,7 +849,7 @@ int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
do {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
if (wp && addr == wp->vaddr && len == wp->len
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_delete(cpu, i);
@@ -871,7 +869,7 @@ int cpu_watchpoint_remove_by_ref(CPUState *cpu, int ref)
if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
do {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
if (wp && wp->ref == ref) {
cpu_watchpoint_delete(cpu, i);
return 0;
@@ -890,7 +888,7 @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
int i = cpu->watchpoints->len;
do {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
if (wp->flags & mask) {
cpu_watchpoint_delete(cpu, i);
} else {
@@ -927,8 +925,8 @@ CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
CPUBreakpoint *bp = NULL;
int i = 0;
do {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i++);
- } while (bp && bp->ref != ref);
+ bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i++);
+ } while (i < cpu->breakpoints->len && bp && bp->ref != ref);
return bp;
}
@@ -936,11 +934,11 @@ CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
/* Add a breakpoint. */
int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref)
{
- CPUBreakpoint *bp;
+ CPUBreakpoint *bp = NULL;
/* Allocate if no previous breakpoints */
if (!cpu->breakpoints) {
- cpu->breakpoints = g_array_new(false, false, sizeof(CPUBreakpoint *));
+ cpu->breakpoints = g_array_new(false, true, sizeof(CPUBreakpoint));
}
/* Find old watchpoint */
@@ -953,17 +951,16 @@ int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref)
bp->flags = flags;
bp->ref = ref;
} else {
- bp = g_malloc(sizeof(*bp));
-
- bp->pc = pc;
- bp->flags = flags;
- bp->ref = ref;
+ CPUBreakpoint brk;
+ brk.pc = pc;
+ brk.flags = flags;
+ brk.ref = ref;
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB) {
- g_array_prepend_val(cpu->breakpoints, bp);
+ g_array_prepend_val(cpu->breakpoints, brk);
} else {
- g_array_append_val(cpu->breakpoints, bp);
+ g_array_append_val(cpu->breakpoints, brk);
}
}
@@ -980,10 +977,9 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags)
static void cpu_breakpoint_delete(CPUState *cpu, int index)
{
CPUBreakpoint *bp;
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, index);
- g_array_remove_index(cpu->breakpoints, index);
+ bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, index);
breakpoint_invalidate(cpu, bp->pc);
- g_free(bp);
+ g_array_remove_index(cpu->breakpoints, index);
}
/* Remove a specific breakpoint. */
@@ -994,7 +990,7 @@ int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
int i = 0;
do {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
if (bp && bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_delete(cpu, i);
} else {
@@ -1014,7 +1010,7 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref)
if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
int i = 0;
do {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
if (bp && bp->ref == ref) {
cpu_breakpoint_delete(cpu, i);
} else {
@@ -1032,7 +1028,7 @@ void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
int i = 0;
do {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
if (bp->flags & mask) {
cpu_breakpoint_delete(cpu, i);
} else {
@@ -1222,7 +1218,7 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
CPUWatchpoint *wp;
int i;
for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
/* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
@@ -2205,7 +2201,7 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
CPUWatchpoint *wp;
int i;
for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
if (cpu_watchpoint_address_matches(wp, vaddr, len)
&& (wp->flags & flags)) {
if (flags == BP_MEM_READ) {
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index e582da0..f695afb 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -878,7 +878,7 @@ static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
CPUBreakpoint *bp;
int i;
for (i = 0; i < cpu->breakpoints->len; i++) {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
if (bp->pc == pc && (bp->flags & mask)) {
return true;
}
diff --git a/linux-user/main.c b/linux-user/main.c
index 8f71608..179f2f2 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3815,7 +3815,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
CPUBreakpoint *bp;
int i;
for (i = 0; i < cpu->breakpoints->len; i++) {
- bp = g_array_index(cpu->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags);
}
}
@@ -3823,7 +3823,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
CPUWatchpoint *wp;
int i;
for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = g_array_index(cpu->watchpoints, CPUWatchpoint *, i);
+ wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags);
}
}
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 597e973..8bee51e 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -11205,7 +11205,7 @@ void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
CPUBreakpoint *bp;
int i;
for (i = 0; i < cs->breakpoints->len; i++) {
- bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cs->breakpoints, CPUBreakpoint, i);
if (bp->pc == dc->pc) {
if (bp->flags & BP_CPU) {
gen_a64_set_pc_im(dc->pc);
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 4e20ab5..a5393dc 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -11787,7 +11787,7 @@ void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
CPUBreakpoint *bp;
int i;
for (i = 0; i < cs->breakpoints->len; i++) {
- bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cs->breakpoints, CPUBreakpoint, i);
if (bp->pc == dc->pc) {
if (bp->flags & BP_CPU) {
gen_set_condexec(dc);
diff --git a/target-i386/bpt_helper.c b/target-i386/bpt_helper.c
index 4f2c75f..d7426af 100644
--- a/target-i386/bpt_helper.c
+++ b/target-i386/bpt_helper.c
@@ -214,7 +214,7 @@ void breakpoint_handler(CPUState *cs)
CPUBreakpoint *bp;
int i;
for (i = 0; i < cs->breakpoints->len; i++) {
- bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cs->breakpoints, CPUBreakpoint, i);
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, true);
diff --git a/target-lm32/helper.c b/target-lm32/helper.c
index 029fbc5..3d07a5e 100644
--- a/target-lm32/helper.c
+++ b/target-lm32/helper.c
@@ -134,7 +134,7 @@ void lm32_debug_excp_handler(CPUState *cs)
CPUBreakpoint *bp;
int i;
for (i = 0; i < cs->breakpoints->len; i++) {
- bp = g_array_index(cs->breakpoints, CPUBreakpoint *, i);
+ bp = &g_array_index(cs->breakpoints, CPUBreakpoint, i);
if (bp->pc == env->pc) {
if (bp->flags & BP_CPU) {
raise_exception(env, EXCP_BREAKPOINT);
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 4/7] break/watchpoints: store inside array
2016-06-17 16:33 ` [Qemu-devel] [RFC 4/7] break/watchpoints: store inside array Alex Bennée
@ 2016-06-17 17:15 ` Paolo Bonzini
0 siblings, 0 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-17 17:15 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo,
serge.fdrv, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana,
Peter Crosthwaite, Riku Voipio, Eduardo Habkost, Michael Walle,
open list:ARM
On 17/06/2016 18:33, Alex Bennée wrote:
> @@ -807,18 +807,17 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
> wp->flags = flags;
> wp->ref = ref;
> } else {
> - wp = g_malloc(sizeof(*wp));
> -
> - wp->vaddr = addr;
> - wp->len = len;
> - wp->flags = flags;
> - wp->ref = ref;
> + CPUWatchpoint watch;
> + watch.vaddr = addr;
> + watch.len = len;
> + watch.flags = flags;
> + watch.ref = ref;
>
> /* keep all GDB-injected watchpoints in front */
> if (flags & BP_GDB) {
> - g_array_prepend_val(cpu->watchpoints, wp);
> + g_array_prepend_val(cpu->watchpoints, watch);
> } else {
> - g_array_append_val(cpu->watchpoints, wp);
> + g_array_append_val(cpu->watchpoints, watch);
> }
Please define "watch" outside the "if", so that the "then" side can do just
*wp = watch;
and there is less duplication.
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* [Qemu-devel] [RFC 5/7] breakpoints: put breakpoints under RCU control
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
` (3 preceding siblings ...)
2016-06-17 16:33 ` [Qemu-devel] [RFC 4/7] break/watchpoints: store inside array Alex Bennée
@ 2016-06-17 16:33 ` Alex Bennée
2016-06-17 16:59 ` Paolo Bonzini
2016-06-17 16:33 ` [Qemu-devel] [RFC 6/7] linux-user: don't clone watchpoints Alex Bennée
` (3 subsequent siblings)
8 siblings, 1 reply; 23+ messages in thread
From: Alex Bennée @ 2016-06-17 16:33 UTC (permalink / raw)
To: mttcg, qemu-devel, fred.konrad, a.rigo, serge.fdrv, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana, Alex Bennée, Peter Crosthwaite, Riku Voipio
Each time breakpoints are added/removed from the array it's done using
an read-copy-update cycle. Simultaneous writes are protected by the
debug_update_lock.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
cpus.c | 3 +
exec.c | 167 ++++++++++++++++++++++++++++++++++++++++++++----------
include/qom/cpu.h | 42 ++++++++++----
linux-user/main.c | 11 +---
4 files changed, 175 insertions(+), 48 deletions(-)
diff --git a/cpus.c b/cpus.c
index 84c3520..b76300b 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1441,6 +1441,9 @@ void qemu_init_vcpu(CPUState *cpu)
cpu_address_space_init(cpu, as, 0);
}
+ /* protects updates to debug info */
+ qemu_mutex_init(&cpu->update_debug_lock);
+
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (tcg_enabled()) {
diff --git a/exec.c b/exec.c
index c8e8823..d1d14c1 100644
--- a/exec.c
+++ b/exec.c
@@ -919,31 +919,94 @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
}
#endif
-/* Find watchpoint with external reference */
-CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
+
+/* Create a working copy of the breakpoints.
+ *
+ * The rcu_read_lock() may already be held depending on where this
+ * update has been triggered from. However it is safe to nest
+ * rcu_read_locks() so we do the copy under the lock here.
+ */
+
+static GArray *rcu_copy_breakpoints(CPUState *cpu)
{
- CPUBreakpoint *bp = NULL;
+ GArray *old, *new;
+
+ rcu_read_lock();
+ old = atomic_rcu_read(&cpu->breakpoints);
+ if (old) {
+ new = g_array_sized_new(false, false, sizeof(CPUBreakpoint), old->len);
+ memcpy(new->data, old->data, old->len * sizeof(CPUBreakpoint));
+ new->len = old->len;
+ } else {
+ new = g_array_new(false, false, sizeof(CPUBreakpoint));
+ }
+ rcu_read_unlock();
+
+ return new;
+}
+
+struct BreakRCU {
+ struct rcu_head rcu;
+ GArray *bkpts;
+};
+
+/* RCU reclaim step */
+static void rcu_free_breakpoints(struct BreakRCU *rcu_free)
+{
+ g_array_free(rcu_free->bkpts, false);
+ g_free(rcu_free);
+}
+
+/* Called with update lock held */
+static void rcu_update_breakpoints(CPUState *cpu, GArray *new_bkpts)
+{
+ GArray *bpts = atomic_rcu_read(&cpu->breakpoints);
+ atomic_rcu_set(&cpu->breakpoints, new_bkpts);
+
+ if (bpts) {
+ struct BreakRCU *rcu_free = g_malloc(sizeof(*rcu_free));
+ rcu_free->bkpts = bpts;
+ call_rcu(rcu_free, rcu_free_breakpoints, rcu);
+ }
+}
+
+/* Find watchpoint with external reference, only valid for duration of
+ * rcu_read_lock */
+static CPUBreakpoint *find_bkpt_with_ref(GArray *bkpts, int ref)
+{
+ CPUBreakpoint *bp;
int i = 0;
+
do {
- bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i++);
- } while (i < cpu->breakpoints->len && bp && bp->ref != ref);
+ bp = &g_array_index(bkpts, CPUBreakpoint, i);
+ if (bp->ref == ref) {
+ return bp;
+ }
+ } while (i++ < bkpts->len);
- return bp;
+ return NULL;
+}
+
+CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
+{
+ GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
+ return find_bkpt_with_ref(bkpts, ref);
}
/* Add a breakpoint. */
int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref)
{
+ GArray *bkpts;
CPUBreakpoint *bp = NULL;
- /* Allocate if no previous breakpoints */
- if (!cpu->breakpoints) {
- cpu->breakpoints = g_array_new(false, true, sizeof(CPUBreakpoint));
- }
+ qemu_mutex_lock(&cpu->update_debug_lock);
+
+ /* This will allocate if no previous breakpoints */
+ bkpts = rcu_copy_breakpoints(cpu);
/* Find old watchpoint */
if (ref != BPWP_NOREF) {
- bp = cpu_breakpoint_get_by_ref(cpu, ref);
+ bp = find_bkpt_with_ref(bkpts, ref);
}
if (bp) {
@@ -958,14 +1021,17 @@ int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref)
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB) {
- g_array_prepend_val(cpu->breakpoints, brk);
+ g_array_prepend_val(bkpts, brk);
} else {
- g_array_append_val(cpu->breakpoints, brk);
+ g_array_append_val(bkpts, brk);
}
}
breakpoint_invalidate(cpu, pc);
+ rcu_update_breakpoints(cpu, bkpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
+
return 0;
}
@@ -974,67 +1040,110 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags)
return cpu_breakpoint_insert_with_ref(cpu, pc, flags, BPWP_NOREF);
}
-static void cpu_breakpoint_delete(CPUState *cpu, int index)
+/* Called with update_debug_lock held */
+static void cpu_breakpoint_delete(CPUState *cpu, GArray *bkpts, int index)
{
CPUBreakpoint *bp;
- bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, index);
+ bp = &g_array_index(bkpts, CPUBreakpoint, index);
breakpoint_invalidate(cpu, bp->pc);
- g_array_remove_index(cpu->breakpoints, index);
+ g_array_remove_index(bkpts, index);
}
/* Remove a specific breakpoint. */
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{
+ GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
CPUBreakpoint *bp;
+ int retval = -ENOENT;
- if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ if (unlikely(bkpts) && unlikely(bkpts->len)) {
int i = 0;
+
+ qemu_mutex_lock(&cpu->update_debug_lock);
+ bkpts = rcu_copy_breakpoints(cpu);
+
do {
- bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
+ bp = &g_array_index(bkpts, CPUBreakpoint, i);
if (bp && bp->pc == pc && bp->flags == flags) {
- cpu_breakpoint_delete(cpu, i);
+ cpu_breakpoint_delete(cpu, bkpts, i);
+ retval = 0;
} else {
i++;
}
- } while (i < cpu->breakpoints->len);
+ } while (i < bkpts->len);
+
+ rcu_update_breakpoints(cpu, bkpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
+
}
- return -ENOENT;
+ return retval;
}
+#ifdef CONFIG_USER_ONLY
+void cpu_breakpoints_clone(CPUState *old_cpu, CPUState *new_cpu)
+{
+ GArray *bkpts = atomic_rcu_read(&old_cpu->breakpoints);
+
+ if (unlikely(bkpts) && unlikely(bkpts->len)) {
+ qemu_mutex_lock(&new_cpu->update_debug_lock);
+ bkpts = rcu_copy_breakpoints(old_cpu);
+ rcu_update_breakpoints(new_cpu, bkpts);
+ qemu_mutex_unlock(&new_cpu->update_debug_lock);
+ }
+}
+#endif
+
+
/* Remove a specific breakpoint by reference. */
void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref)
{
+ GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
CPUBreakpoint *bp;
- if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ if (unlikely(bkpts) && unlikely(bkpts->len)) {
int i = 0;
+
+ qemu_mutex_lock(&cpu->update_debug_lock);
+ bkpts = rcu_copy_breakpoints(cpu);
+
do {
- bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
+ bp = &g_array_index(bkpts, CPUBreakpoint, i);
if (bp && bp->ref == ref) {
- cpu_breakpoint_delete(cpu, i);
+ cpu_breakpoint_delete(cpu, bkpts, i);
} else {
i++;
}
- } while (i < cpu->breakpoints->len);
+ } while (i < bkpts->len);
+
+ rcu_update_breakpoints(cpu, bkpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
}
}
/* Remove all matching breakpoints. */
void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
{
+ GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
CPUBreakpoint *bp;
- if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ if (unlikely(bkpts) && unlikely(bkpts->len)) {
int i = 0;
+
+ qemu_mutex_lock(&cpu->update_debug_lock);
+ bkpts = rcu_copy_breakpoints(cpu);
+
do {
- bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
+ bp = &g_array_index(bkpts, CPUBreakpoint, i);
if (bp->flags & mask) {
- cpu_breakpoint_delete(cpu, i);
+ cpu_breakpoint_delete(cpu, bkpts, i);
} else {
i++;
}
- } while (i < cpu->breakpoints->len);
+ } while (i < bkpts->len);
+
+ rcu_update_breakpoints(cpu, bkpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
}
}
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index f695afb..51ca28c 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -326,8 +326,11 @@ struct CPUState {
/* Debugging support:
*
* Both the gdbstub and architectural debug support will add
- * references to these arrays.
+ * references to these arrays. The list of breakpoints and
+ * watchpoints are updated with RCU. The update_debug_lock is only
+ * required for updating, not just reading.
*/
+ QemuMutex update_debug_lock;
GArray *breakpoints;
GArray *watchpoints;
CPUWatchpoint *watchpoint_hit;
@@ -849,12 +852,13 @@ int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref);
* @cpu: CPU to monitor
* @ref: unique reference
*
- * @return: a pointer to working copy of the breakpoint.
+ * @return: a pointer to working copy of the breakpoint or NULL.
*
- * Return a working copy of the current referenced breakpoint. This
- * obviously only works for breakpoints inserted with a reference. The
- * lifetime of this objected will be limited and should not be kept
- * beyond its immediate use. Otherwise return NULL.
+ * Return short term working copy of the a breakpoint marked with an
+ * external reference. This obviously only works for breakpoints
+ * inserted with a reference. The lifetime of this object is the
+ * duration of the rcu_read_lock() and it may be released when
+ * rcu_synchronize is called.
*/
CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref);
@@ -871,14 +875,32 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref);
void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
-/* Return true if PC matches an installed breakpoint. */
+#ifdef CONFIG_USER_ONLY
+/**
+ * cpu_breakpoints_clone:
+ *
+ * @old_cpu: source of breakpoints
+ * @new_cpu: destination for breakpoints
+ *
+ * When a new user-mode thread is created the CPU structure behind it
+ * needs a copy of the exiting breakpoint conditions. This helper does
+ * the copying.
+ */
+void cpu_breakpoints_clone(CPUState *old_cpu, CPUState *new_cpu);
+#endif
+
+/* Return true if PC matches an installed breakpoint.
+ * Called while the RCU read lock is held.
+ */
static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
{
- if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
+ GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
+
+ if (unlikely(bkpts) && unlikely(bkpts->len)) {
CPUBreakpoint *bp;
int i;
- for (i = 0; i < cpu->breakpoints->len; i++) {
- bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
+ for (i = 0; i < bkpts->len; i++) {
+ bp = &g_array_index(bkpts, CPUBreakpoint, i);
if (bp->pc == pc && (bp->flags & mask)) {
return true;
}
diff --git a/linux-user/main.c b/linux-user/main.c
index 179f2f2..2901626 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3808,17 +3808,10 @@ CPUArchState *cpu_copy(CPUArchState *env)
memcpy(new_env, env, sizeof(CPUArchState));
- /* Clone all break/watchpoints.
+ /* Clone all breakpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
- if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
- CPUBreakpoint *bp;
- int i;
- for (i = 0; i < cpu->breakpoints->len; i++) {
- bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
- cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags);
- }
- }
+ cpu_breakpoints_clone(cpu, new_cpu);
if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
CPUWatchpoint *wp;
int i;
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 5/7] breakpoints: put breakpoints under RCU control
2016-06-17 16:33 ` [Qemu-devel] [RFC 5/7] breakpoints: put breakpoints under RCU control Alex Bennée
@ 2016-06-17 16:59 ` Paolo Bonzini
0 siblings, 0 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-17 16:59 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo,
serge.fdrv, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana,
Peter Crosthwaite, Riku Voipio
On 17/06/2016 18:33, Alex Bennée wrote:
> Each time breakpoints are added/removed from the array it's done using
> an read-copy-update cycle. Simultaneous writes are protected by the
> debug_update_lock.
>
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> cpus.c | 3 +
> exec.c | 167 ++++++++++++++++++++++++++++++++++++++++++++----------
> include/qom/cpu.h | 42 ++++++++++----
> linux-user/main.c | 11 +---
> 4 files changed, 175 insertions(+), 48 deletions(-)
>
> diff --git a/cpus.c b/cpus.c
> index 84c3520..b76300b 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -1441,6 +1441,9 @@ void qemu_init_vcpu(CPUState *cpu)
> cpu_address_space_init(cpu, as, 0);
> }
>
> + /* protects updates to debug info */
No need to comment here.
> + qemu_mutex_init(&cpu->update_debug_lock);
Missing qemu_mutex_destroy.
> if (kvm_enabled()) {
> qemu_kvm_start_vcpu(cpu);
> } else if (tcg_enabled()) {
> diff --git a/exec.c b/exec.c
> index c8e8823..d1d14c1 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -919,31 +919,94 @@ static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
> }
>
> #endif
> -/* Find watchpoint with external reference */
> -CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
> +
> +/* Create a working copy of the breakpoints.
> + *
> + * The rcu_read_lock() may already be held depending on where this
> + * update has been triggered from. However it is safe to nest
> + * rcu_read_locks() so we do the copy under the lock here.
More precisely, the rcu_read_lock() and atomic_rcu_read() are
unnecessary in update paths because these already take the update lock.
However it is safe to nest rcu_read_lock() within itself or within the
update lock, so your code is fine.
> + */
> +
> +static GArray *rcu_copy_breakpoints(CPUState *cpu)
> {
> - CPUBreakpoint *bp = NULL;
> + GArray *old, *new;
> +
> + rcu_read_lock();
> + old = atomic_rcu_read(&cpu->breakpoints);
> + if (old) {
> + new = g_array_sized_new(false, false, sizeof(CPUBreakpoint), old->len);
> + memcpy(new->data, old->data, old->len * sizeof(CPUBreakpoint));
> + new->len = old->len;
> + } else {
> + new = g_array_new(false, false, sizeof(CPUBreakpoint));
> + }
> + rcu_read_unlock();
> +
> + return new;
> +}
> +
> +struct BreakRCU {
What about creating a generic g_array_free_rcu(GArray *array, gboolean
free_segment)? Or better g_array_unref_rcu(GArray *array) since we
require glib 2.22.
> + struct rcu_head rcu;
> + GArray *bkpts;
> +};
> +
> +/* RCU reclaim step */
> +static void rcu_free_breakpoints(struct BreakRCU *rcu_free)
> +{
> + g_array_free(rcu_free->bkpts, false);
Why false? Doesn't this leak? (g_array_unref is another valid
replacement, as mentioned above).
> + g_free(rcu_free);
> +}
> +
> +/* Called with update lock held */
> +static void rcu_update_breakpoints(CPUState *cpu, GArray *new_bkpts)
> +{
> + GArray *bpts = atomic_rcu_read(&cpu->breakpoints);
> + atomic_rcu_set(&cpu->breakpoints, new_bkpts);
> +
> + if (bpts) {
> + struct BreakRCU *rcu_free = g_malloc(sizeof(*rcu_free));
> + rcu_free->bkpts = bpts;
> + call_rcu(rcu_free, rcu_free_breakpoints, rcu);
> + }
> +}
> +
> +/* Find watchpoint with external reference, only valid for duration of
> + * rcu_read_lock */
> +static CPUBreakpoint *find_bkpt_with_ref(GArray *bkpts, int ref)
breakpoint_get_by_ref, please.
> +{
> + CPUBreakpoint *bp;
> int i = 0;
> +
> do {
> - bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i++);
> - } while (i < cpu->breakpoints->len && bp && bp->ref != ref);
> + bp = &g_array_index(bkpts, CPUBreakpoint, i);
> + if (bp->ref == ref) {
> + return bp;
> + }
> + } while (i++ < bkpts->len);
>
> - return bp;
> + return NULL;
> +}
> +
> +CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref)
> +{
> + GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
> + return find_bkpt_with_ref(bkpts, ref);
> }
>
> /* Add a breakpoint. */
> int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref)
> {
> + GArray *bkpts;
> CPUBreakpoint *bp = NULL;
>
> - /* Allocate if no previous breakpoints */
> - if (!cpu->breakpoints) {
> - cpu->breakpoints = g_array_new(false, true, sizeof(CPUBreakpoint));
> - }
> + qemu_mutex_lock(&cpu->update_debug_lock);
> +
> + /* This will allocate if no previous breakpoints */
> + bkpts = rcu_copy_breakpoints(cpu);
>
> /* Find old watchpoint */
> if (ref != BPWP_NOREF) {
> - bp = cpu_breakpoint_get_by_ref(cpu, ref);
> + bp = find_bkpt_with_ref(bkpts, ref);
> }
>
> if (bp) {
> @@ -958,14 +1021,17 @@ int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref)
>
> /* keep all GDB-injected breakpoints in front */
> if (flags & BP_GDB) {
> - g_array_prepend_val(cpu->breakpoints, brk);
> + g_array_prepend_val(bkpts, brk);
> } else {
> - g_array_append_val(cpu->breakpoints, brk);
> + g_array_append_val(bkpts, brk);
> }
> }
>
> breakpoint_invalidate(cpu, pc);
>
> + rcu_update_breakpoints(cpu, bkpts);
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> +
> return 0;
> }
>
> @@ -974,67 +1040,110 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags)
> return cpu_breakpoint_insert_with_ref(cpu, pc, flags, BPWP_NOREF);
> }
>
> -static void cpu_breakpoint_delete(CPUState *cpu, int index)
> +/* Called with update_debug_lock held */
> +static void cpu_breakpoint_delete(CPUState *cpu, GArray *bkpts, int index)
> {
> CPUBreakpoint *bp;
> - bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, index);
> + bp = &g_array_index(bkpts, CPUBreakpoint, index);
> breakpoint_invalidate(cpu, bp->pc);
> - g_array_remove_index(cpu->breakpoints, index);
> + g_array_remove_index(bkpts, index);
> }
>
> /* Remove a specific breakpoint. */
> int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
> {
> + GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
> CPUBreakpoint *bp;
> + int retval = -ENOENT;
>
> - if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> + if (unlikely(bkpts) && unlikely(bkpts->len)) {
> int i = 0;
> +
> + qemu_mutex_lock(&cpu->update_debug_lock);
> + bkpts = rcu_copy_breakpoints(cpu);
> +
> do {
> - bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> + bp = &g_array_index(bkpts, CPUBreakpoint, i);
> if (bp && bp->pc == pc && bp->flags == flags) {
> - cpu_breakpoint_delete(cpu, i);
> + cpu_breakpoint_delete(cpu, bkpts, i);
> + retval = 0;
> } else {
> i++;
> }
> - } while (i < cpu->breakpoints->len);
> + } while (i < bkpts->len);
> +
> + rcu_update_breakpoints(cpu, bkpts);
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> +
> }
>
> - return -ENOENT;
> + return retval;
> }
>
> +#ifdef CONFIG_USER_ONLY
> +void cpu_breakpoints_clone(CPUState *old_cpu, CPUState *new_cpu)
> +{
> + GArray *bkpts = atomic_rcu_read(&old_cpu->breakpoints);
> +
> + if (unlikely(bkpts) && unlikely(bkpts->len)) {
> + qemu_mutex_lock(&new_cpu->update_debug_lock);
> + bkpts = rcu_copy_breakpoints(old_cpu);
> + rcu_update_breakpoints(new_cpu, bkpts);
> + qemu_mutex_unlock(&new_cpu->update_debug_lock);
> + }
> +}
> +#endif
> +
> +
> /* Remove a specific breakpoint by reference. */
> void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref)
> {
> + GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
> CPUBreakpoint *bp;
>
> - if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> + if (unlikely(bkpts) && unlikely(bkpts->len)) {
> int i = 0;
> +
> + qemu_mutex_lock(&cpu->update_debug_lock);
> + bkpts = rcu_copy_breakpoints(cpu);
> +
> do {
> - bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> + bp = &g_array_index(bkpts, CPUBreakpoint, i);
> if (bp && bp->ref == ref) {
> - cpu_breakpoint_delete(cpu, i);
> + cpu_breakpoint_delete(cpu, bkpts, i);
> } else {
> i++;
> }
> - } while (i < cpu->breakpoints->len);
> + } while (i < bkpts->len);
> +
> + rcu_update_breakpoints(cpu, bkpts);
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> }
> }
>
> /* Remove all matching breakpoints. */
> void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
> {
> + GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
> CPUBreakpoint *bp;
>
> - if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> + if (unlikely(bkpts) && unlikely(bkpts->len)) {
> int i = 0;
> +
> + qemu_mutex_lock(&cpu->update_debug_lock);
> + bkpts = rcu_copy_breakpoints(cpu);
> +
> do {
> - bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> + bp = &g_array_index(bkpts, CPUBreakpoint, i);
> if (bp->flags & mask) {
> - cpu_breakpoint_delete(cpu, i);
> + cpu_breakpoint_delete(cpu, bkpts, i);
> } else {
> i++;
> }
> - } while (i < cpu->breakpoints->len);
> + } while (i < bkpts->len);
> +
> + rcu_update_breakpoints(cpu, bkpts);
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> }
> }
>
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index f695afb..51ca28c 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -326,8 +326,11 @@ struct CPUState {
> /* Debugging support:
> *
> * Both the gdbstub and architectural debug support will add
> - * references to these arrays.
> + * references to these arrays. The list of breakpoints and
> + * watchpoints are updated with RCU. The update_debug_lock is only
> + * required for updating, not just reading.
> */
> + QemuMutex update_debug_lock;
> GArray *breakpoints;
> GArray *watchpoints;
> CPUWatchpoint *watchpoint_hit;
> @@ -849,12 +852,13 @@ int cpu_breakpoint_insert_with_ref(CPUState *cpu, vaddr pc, int flags, int ref);
> * @cpu: CPU to monitor
> * @ref: unique reference
> *
> - * @return: a pointer to working copy of the breakpoint.
> + * @return: a pointer to working copy of the breakpoint or NULL.
> *
> - * Return a working copy of the current referenced breakpoint. This
> - * obviously only works for breakpoints inserted with a reference. The
> - * lifetime of this objected will be limited and should not be kept
> - * beyond its immediate use. Otherwise return NULL.
> + * Return short term working copy of the a breakpoint marked with an
> + * external reference. This obviously only works for breakpoints
> + * inserted with a reference. The lifetime of this object is the
> + * duration of the rcu_read_lock() and it may be released when
> + * rcu_synchronize is called.
"and it may be released..." is implicit.
> */
> CPUBreakpoint *cpu_breakpoint_get_by_ref(CPUState *cpu, int ref);
>
> @@ -871,14 +875,32 @@ void cpu_breakpoint_remove_by_ref(CPUState *cpu, int ref);
>
> void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
>
> -/* Return true if PC matches an installed breakpoint. */
> +#ifdef CONFIG_USER_ONLY
> +/**
> + * cpu_breakpoints_clone:
> + *
> + * @old_cpu: source of breakpoints
> + * @new_cpu: destination for breakpoints
> + *
> + * When a new user-mode thread is created the CPU structure behind it
> + * needs a copy of the exiting breakpoint conditions. This helper does
> + * the copying.
> + */
> +void cpu_breakpoints_clone(CPUState *old_cpu, CPUState *new_cpu);
> +#endif
> +
> +/* Return true if PC matches an installed breakpoint.
> + * Called while the RCU read lock is held.
The "standard" phrasing is "Called from RCU critical section".
> + */
> static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
> {
> - if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> + GArray *bkpts = atomic_rcu_read(&cpu->breakpoints);
> +
> + if (unlikely(bkpts) && unlikely(bkpts->len)) {
> CPUBreakpoint *bp;
> int i;
> - for (i = 0; i < cpu->breakpoints->len; i++) {
> - bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> + for (i = 0; i < bkpts->len; i++) {
> + bp = &g_array_index(bkpts, CPUBreakpoint, i);
> if (bp->pc == pc && (bp->flags & mask)) {
> return true;
> }
> diff --git a/linux-user/main.c b/linux-user/main.c
> index 179f2f2..2901626 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -3808,17 +3808,10 @@ CPUArchState *cpu_copy(CPUArchState *env)
>
> memcpy(new_env, env, sizeof(CPUArchState));
>
> - /* Clone all break/watchpoints.
> + /* Clone all breakpoints.
> Note: Once we support ptrace with hw-debug register access, make sure
> BP_CPU break/watchpoints are handled correctly on clone. */
> - if (unlikely(cpu->breakpoints) && unlikely(cpu->breakpoints->len)) {
> - CPUBreakpoint *bp;
> - int i;
> - for (i = 0; i < cpu->breakpoints->len; i++) {
> - bp = &g_array_index(cpu->breakpoints, CPUBreakpoint, i);
> - cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags);
> - }
> - }
> + cpu_breakpoints_clone(cpu, new_cpu);
> if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
> CPUWatchpoint *wp;
> int i;
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* [Qemu-devel] [RFC 6/7] linux-user: don't clone watchpoints
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
` (4 preceding siblings ...)
2016-06-17 16:33 ` [Qemu-devel] [RFC 5/7] breakpoints: put breakpoints under RCU control Alex Bennée
@ 2016-06-17 16:33 ` Alex Bennée
2016-06-17 17:18 ` Paolo Bonzini
2016-06-17 16:33 ` [Qemu-devel] [RFC 7/7] watchpoints: put watchpoints under RCU control Alex Bennée
` (2 subsequent siblings)
8 siblings, 1 reply; 23+ messages in thread
From: Alex Bennée @ 2016-06-17 16:33 UTC (permalink / raw)
To: mttcg, qemu-devel, fred.konrad, a.rigo, serge.fdrv, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana, Alex Bennée, Riku Voipio
The watchpoint code is stubbed out for CONFIG_USER_ONLY so there is no
point attempting to copy the data here. Lets remove the code before the
RCU protection goes in.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
linux-user/main.c | 8 --------
1 file changed, 8 deletions(-)
diff --git a/linux-user/main.c b/linux-user/main.c
index 2901626..9a5d017 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -3812,14 +3812,6 @@ CPUArchState *cpu_copy(CPUArchState *env)
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
cpu_breakpoints_clone(cpu, new_cpu);
- if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
- CPUWatchpoint *wp;
- int i;
- for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
- cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags);
- }
- }
return new_env;
}
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 6/7] linux-user: don't clone watchpoints
2016-06-17 16:33 ` [Qemu-devel] [RFC 6/7] linux-user: don't clone watchpoints Alex Bennée
@ 2016-06-17 17:18 ` Paolo Bonzini
0 siblings, 0 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-17 17:18 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo,
serge.fdrv, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana,
Riku Voipio
On 17/06/2016 18:33, Alex Bennée wrote:
> The watchpoint code is stubbed out for CONFIG_USER_ONLY so there is no
> point attempting to copy the data here. Lets remove the code before the
> RCU protection goes in.
>
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> linux-user/main.c | 8 --------
> 1 file changed, 8 deletions(-)
>
> diff --git a/linux-user/main.c b/linux-user/main.c
> index 2901626..9a5d017 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -3812,14 +3812,6 @@ CPUArchState *cpu_copy(CPUArchState *env)
> Note: Once we support ptrace with hw-debug register access, make sure
> BP_CPU break/watchpoints are handled correctly on clone. */
> cpu_breakpoints_clone(cpu, new_cpu);
> - if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
> - CPUWatchpoint *wp;
> - int i;
> - for (i = 0; i < cpu->watchpoints->len; i++) {
> - wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
> - cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags);
> - }
> - }
>
> return new_env;
> }
>
Hmm, I am not sure. It's still the correct thing to do.
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* [Qemu-devel] [RFC 7/7] watchpoints: put watchpoints under RCU control
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
` (5 preceding siblings ...)
2016-06-17 16:33 ` [Qemu-devel] [RFC 6/7] linux-user: don't clone watchpoints Alex Bennée
@ 2016-06-17 16:33 ` Alex Bennée
2016-06-17 17:10 ` Paolo Bonzini
2016-06-17 17:01 ` [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Paolo Bonzini
2016-06-20 15:49 ` Sergey Fedorov
8 siblings, 1 reply; 23+ messages in thread
From: Alex Bennée @ 2016-06-17 16:33 UTC (permalink / raw)
To: mttcg, qemu-devel, fred.konrad, a.rigo, serge.fdrv, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana, Alex Bennée, Peter Crosthwaite
Each time watchpoints are added/removed from the array it's done using
an read-copy-update cycle. Simultaneous writes are protected by the
debug_update_lock.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
cpu-exec.c | 7 ++-
exec.c | 193 ++++++++++++++++++++++++++++++++++++++++++++-----------------
2 files changed, 144 insertions(+), 56 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index 2736a27..7fcb500 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -387,12 +387,13 @@ static inline bool cpu_handle_halt(CPUState *cpu)
static inline void cpu_handle_debug_exception(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
+ GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
CPUWatchpoint *wp;
int i;
- if (!cpu->watchpoint_hit && cpu->watchpoints) {
- for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
+ if (!cpu->watchpoint_hit && wpts) {
+ for (i = 0; i < wpts->len; i++) {
+ wp = &g_array_index(wpts, CPUWatchpoint, i);
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
diff --git a/exec.c b/exec.c
index d1d14c1..b9167ec 100644
--- a/exec.c
+++ b/exec.c
@@ -735,6 +735,18 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
}
#endif
+struct FreeDebugRCU {
+ struct rcu_head rcu;
+ GArray *debug;
+};
+
+/* RCU reclaim step */
+static void rcu_debug_free(struct FreeDebugRCU *rcu_free)
+{
+ g_array_free(rcu_free->debug, false);
+ g_free(rcu_free);
+}
+
#if defined(CONFIG_USER_ONLY)
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
@@ -766,22 +778,72 @@ CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
return NULL;
}
#else
-/* Find watchpoint with external reference */
-CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
+/* Create a working copy of the watchpoints.
+ *
+ * The rcu_read_lock() may already be held depending on where this
+ * update has been triggered from. However it is safe to nest
+ * rcu_read_locks() so we do the copy under the lock here.
+ */
+
+static GArray *rcu_copy_watchpoints(CPUState *cpu)
{
- CPUWatchpoint *wp = NULL;
+ GArray *old, *new;
+
+ rcu_read_lock();
+ old = atomic_rcu_read(&cpu->watchpoints);
+ if (old) {
+ new = g_array_sized_new(false, false, sizeof(CPUWatchpoint), old->len);
+ memcpy(new->data, old->data, old->len * sizeof(CPUWatchpoint));
+ new->len = old->len;
+ } else {
+ new = g_array_new(false, false, sizeof(CPUWatchpoint));
+ }
+ rcu_read_unlock();
+
+ return new;
+}
+
+/* Called with update lock held */
+static void rcu_update_watchpoints(CPUState *cpu, GArray *new_watchpts)
+{
+ GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
+ atomic_rcu_set(&cpu->watchpoints, new_watchpts);
+ if (wpts) {
+ struct FreeDebugRCU *rcu_free = g_malloc(sizeof(*rcu_free));
+ rcu_free->debug = wpts;
+ call_rcu(rcu_free, rcu_debug_free, rcu);
+ }
+}
+
+/* Find watchpoint with external reference, only valid for duration of
+ * rcu_read_lock */
+static CPUWatchpoint *find_watch_with_ref(GArray *wpts, int ref)
+{
+ CPUWatchpoint *wp;
int i = 0;
+
do {
- wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i++);
- } while (i < cpu->watchpoints->len && wp && wp->ref != ref);
+ wp = &g_array_index(wpts, CPUWatchpoint, i);
+ if (wp->ref == ref) {
+ return wp;
+ }
+ } while (i++ < wpts->len);
+
+ return NULL;
+}
- return wp;
+/* Find watchpoint with external reference */
+CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
+{
+ GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
+ return find_watch_with_ref(wpts, ref);
}
/* Add a watchpoint. */
int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
int flags, int ref)
{
+ GArray *wpts;
CPUWatchpoint *wp = NULL;
/* forbid ranges which are empty or run off the end of the address space */
@@ -791,14 +853,14 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
return -EINVAL;
}
- /* Allocate if no previous watchpoints */
- if (!cpu->watchpoints) {
- cpu->watchpoints = g_array_new(false, true, sizeof(CPUWatchpoint));
- }
+ qemu_mutex_lock(&cpu->update_debug_lock);
+
+ /* This will allocate if no previous breakpoints */
+ wpts = rcu_copy_watchpoints(cpu);
/* Find old watchpoint */
if (ref != BPWP_NOREF) {
- wp = cpu_watchpoint_get_by_ref(cpu, ref);
+ wp = find_watch_with_ref(wpts, ref);
}
if (wp) {
@@ -815,15 +877,18 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB) {
- g_array_prepend_val(cpu->watchpoints, watch);
+ g_array_prepend_val(wpts, watch);
} else {
- g_array_append_val(cpu->watchpoints, watch);
+ g_array_append_val(wpts, watch);
}
}
tlb_flush_page(cpu, addr);
+ rcu_update_watchpoints(cpu, wpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
+
return 0;
}
@@ -832,69 +897,101 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags)
return cpu_watchpoint_insert_with_ref(cpu, addr, len, flags, BPWP_NOREF);
}
-static void cpu_watchpoint_delete(CPUState *cpu, int index)
+static void cpu_watchpoint_delete(CPUState *cpu, GArray *wpts, int index)
{
CPUWatchpoint *wp;
- wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, index);
+ wp = &g_array_index(wpts, CPUWatchpoint, index);
tlb_flush_page(cpu, wp->vaddr);
- g_array_remove_index(cpu->watchpoints, index);
+ g_array_remove_index(wpts, index);
}
/* Remove a specific watchpoint. */
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags)
{
+ GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
CPUWatchpoint *wp;
- int i = 0;
+ int retval = -ENOENT;
+
+ if (unlikely(wpts) && unlikely(wpts->len)) {
+ int i = 0;
+
+ qemu_mutex_lock(&cpu->update_debug_lock);
+ wpts = rcu_copy_watchpoints(cpu);
- if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
do {
wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
if (wp && addr == wp->vaddr && len == wp->len
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
- cpu_watchpoint_delete(cpu, i);
- return 0;
+ cpu_watchpoint_delete(cpu, wpts, i);
+ retval = 0;
+ } else {
+ i++;
}
- } while (i++ < cpu->watchpoints->len);
+ } while (i < wpts->len);
+
+ rcu_update_watchpoints(cpu, wpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
+
}
- return -ENOENT;
+ return retval;
}
/* Remove a specific watchpoint by external reference. */
int cpu_watchpoint_remove_by_ref(CPUState *cpu, int ref)
{
+ GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
CPUWatchpoint *wp;
- int i = 0;
+ int retval = -ENOENT;
+
+ if (unlikely(wpts) && unlikely(wpts->len)) {
+ int i = 0;
+
+ qemu_mutex_lock(&cpu->update_debug_lock);
+ wpts = rcu_copy_watchpoints(cpu);
- if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
do {
- wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
+ wp = &g_array_index(wpts, CPUWatchpoint, i);
if (wp && wp->ref == ref) {
- cpu_watchpoint_delete(cpu, i);
- return 0;
+ cpu_watchpoint_delete(cpu, wpts, i);
+ retval = 0;
+ } else {
+ i++;
}
- } while (wp && i++ < cpu->watchpoints->len);
+ } while (i < wpts->len);
+
+ rcu_update_watchpoints(cpu, wpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
+
}
- return -ENOENT;
+ return retval;
}
/* Remove all matching watchpoints. */
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
+ GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
CPUWatchpoint *wp;
- if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
- int i = cpu->watchpoints->len;
+ if (unlikely(wpts) && unlikely(wpts->len)) {
+ int i = 0;
+
+ qemu_mutex_lock(&cpu->update_debug_lock);
+ wpts = rcu_copy_watchpoints(cpu);
+
do {
- wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
+ wp = &g_array_index(wpts, CPUWatchpoint, i);
if (wp->flags & mask) {
- cpu_watchpoint_delete(cpu, i);
+ cpu_watchpoint_delete(cpu, wpts, i);
} else {
- i--;
+ i++;
}
- } while (cpu->watchpoints->len && i >= 0);
+ } while (i < wpts->len);
+
+ rcu_update_watchpoints(cpu, wpts);
+ qemu_mutex_unlock(&cpu->update_debug_lock);
}
}
@@ -945,18 +1042,6 @@ static GArray *rcu_copy_breakpoints(CPUState *cpu)
return new;
}
-struct BreakRCU {
- struct rcu_head rcu;
- GArray *bkpts;
-};
-
-/* RCU reclaim step */
-static void rcu_free_breakpoints(struct BreakRCU *rcu_free)
-{
- g_array_free(rcu_free->bkpts, false);
- g_free(rcu_free);
-}
-
/* Called with update lock held */
static void rcu_update_breakpoints(CPUState *cpu, GArray *new_bkpts)
{
@@ -964,9 +1049,9 @@ static void rcu_update_breakpoints(CPUState *cpu, GArray *new_bkpts)
atomic_rcu_set(&cpu->breakpoints, new_bkpts);
if (bpts) {
- struct BreakRCU *rcu_free = g_malloc(sizeof(*rcu_free));
- rcu_free->bkpts = bpts;
- call_rcu(rcu_free, rcu_free_breakpoints, rcu);
+ struct FreeDebugRCU *rcu_free = g_malloc(sizeof(*rcu_free));
+ rcu_free->debug = bpts;
+ call_rcu(rcu_free, rcu_debug_free, rcu);
}
}
@@ -2296,6 +2381,7 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
target_ulong pc, cs_base;
target_ulong vaddr;
uint32_t cpu_flags;
+ GArray *wpts;
if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise
@@ -2306,11 +2392,12 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
}
vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
+ wpts = atomic_rcu_read(&cpu->watchpoints);
+ if (unlikely(wpts) && unlikely(wpts->len)) {
CPUWatchpoint *wp;
int i;
- for (i = 0; i < cpu->watchpoints->len; i++) {
- wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
+ for (i = 0; i < wpts->len; i++) {
+ wp = &g_array_index(wpts, CPUWatchpoint, i);
if (cpu_watchpoint_address_matches(wp, vaddr, len)
&& (wp->flags & flags)) {
if (flags == BP_MEM_READ) {
--
2.7.4
^ permalink raw reply related [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 7/7] watchpoints: put watchpoints under RCU control
2016-06-17 16:33 ` [Qemu-devel] [RFC 7/7] watchpoints: put watchpoints under RCU control Alex Bennée
@ 2016-06-17 17:10 ` Paolo Bonzini
0 siblings, 0 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-17 17:10 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo,
serge.fdrv, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana,
Peter Crosthwaite
On 17/06/2016 18:33, Alex Bennée wrote:
> Each time watchpoints are added/removed from the array it's done using
> an read-copy-update cycle. Simultaneous writes are protected by the
> debug_update_lock.
>
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> ---
> cpu-exec.c | 7 ++-
> exec.c | 193 ++++++++++++++++++++++++++++++++++++++++++++-----------------
> 2 files changed, 144 insertions(+), 56 deletions(-)
>
> diff --git a/cpu-exec.c b/cpu-exec.c
> index 2736a27..7fcb500 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -387,12 +387,13 @@ static inline bool cpu_handle_halt(CPUState *cpu)
> static inline void cpu_handle_debug_exception(CPUState *cpu)
> {
> CPUClass *cc = CPU_GET_CLASS(cpu);
> + GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
> CPUWatchpoint *wp;
> int i;
>
> - if (!cpu->watchpoint_hit && cpu->watchpoints) {
> - for (i = 0; i < cpu->watchpoints->len; i++) {
> - wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
> + if (!cpu->watchpoint_hit && wpts) {
> + for (i = 0; i < wpts->len; i++) {
> + wp = &g_array_index(wpts, CPUWatchpoint, i);
> wp->flags &= ~BP_WATCHPOINT_HIT;
> }
> }
> diff --git a/exec.c b/exec.c
> index d1d14c1..b9167ec 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -735,6 +735,18 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
> }
> #endif
>
> +struct FreeDebugRCU {
> + struct rcu_head rcu;
> + GArray *debug;
> +};
Deja vu? ;)
> +/* RCU reclaim step */
> +static void rcu_debug_free(struct FreeDebugRCU *rcu_free)
> +{
> + g_array_free(rcu_free->debug, false);
> + g_free(rcu_free);
> +}
> +
> #if defined(CONFIG_USER_ONLY)
> void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
>
> @@ -766,22 +778,72 @@ CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
> return NULL;
> }
> #else
> -/* Find watchpoint with external reference */
> -CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
> +/* Create a working copy of the watchpoints.
> + *
> + * The rcu_read_lock() may already be held depending on where this
> + * update has been triggered from. However it is safe to nest
> + * rcu_read_locks() so we do the copy under the lock here.
Same as patch 5.
> + */
> +
> +static GArray *rcu_copy_watchpoints(CPUState *cpu)
> {
> - CPUWatchpoint *wp = NULL;
> + GArray *old, *new;
> +
> + rcu_read_lock();
> + old = atomic_rcu_read(&cpu->watchpoints);
> + if (old) {
> + new = g_array_sized_new(false, false, sizeof(CPUWatchpoint), old->len);
> + memcpy(new->data, old->data, old->len * sizeof(CPUWatchpoint));
> + new->len = old->len;
> + } else {
> + new = g_array_new(false, false, sizeof(CPUWatchpoint));
> + }
> + rcu_read_unlock();
> +
> + return new;
> +}
> +
> +/* Called with update lock held */
> +static void rcu_update_watchpoints(CPUState *cpu, GArray *new_watchpts)
> +{
> + GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
> + atomic_rcu_set(&cpu->watchpoints, new_watchpts);
> + if (wpts) {
> + struct FreeDebugRCU *rcu_free = g_malloc(sizeof(*rcu_free));
> + rcu_free->debug = wpts;
> + call_rcu(rcu_free, rcu_debug_free, rcu);
> + }
> +}
> +
> +/* Find watchpoint with external reference, only valid for duration of
> + * rcu_read_lock */
> +static CPUWatchpoint *find_watch_with_ref(GArray *wpts, int ref)
watchpoint_get_by_ref.
BTW, should the "ref" be unsigned?
> +{
> + CPUWatchpoint *wp;
> int i = 0;
> +
> do {
> - wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i++);
> - } while (i < cpu->watchpoints->len && wp && wp->ref != ref);
> + wp = &g_array_index(wpts, CPUWatchpoint, i);
> + if (wp->ref == ref) {
> + return wp;
> + }
> + } while (i++ < wpts->len);
> +
> + return NULL;
> +}
>
> - return wp;
> +/* Find watchpoint with external reference */
> +CPUWatchpoint *cpu_watchpoint_get_by_ref(CPUState *cpu, int ref)
> +{
> + GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
> + return find_watch_with_ref(wpts, ref);
> }
>
> /* Add a watchpoint. */
> int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
> int flags, int ref)
> {
> + GArray *wpts;
> CPUWatchpoint *wp = NULL;
>
> /* forbid ranges which are empty or run off the end of the address space */
> @@ -791,14 +853,14 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
> return -EINVAL;
> }
>
> - /* Allocate if no previous watchpoints */
> - if (!cpu->watchpoints) {
> - cpu->watchpoints = g_array_new(false, true, sizeof(CPUWatchpoint));
> - }
> + qemu_mutex_lock(&cpu->update_debug_lock);
> +
> + /* This will allocate if no previous breakpoints */
^^^^^^^^^^^
watchpoints
> + wpts = rcu_copy_watchpoints(cpu);
In Linux it's usual to just call the array "new" (and if you need it
"old"). I like it more than bpts and wpts.
>
> /* Find old watchpoint */
> if (ref != BPWP_NOREF) {
> - wp = cpu_watchpoint_get_by_ref(cpu, ref);
> + wp = find_watch_with_ref(wpts, ref);
> }
>
> if (wp) {
> @@ -815,15 +877,18 @@ int cpu_watchpoint_insert_with_ref(CPUState *cpu, vaddr addr, vaddr len,
>
> /* keep all GDB-injected watchpoints in front */
> if (flags & BP_GDB) {
> - g_array_prepend_val(cpu->watchpoints, watch);
> + g_array_prepend_val(wpts, watch);
> } else {
> - g_array_append_val(cpu->watchpoints, watch);
> + g_array_append_val(wpts, watch);
> }
> }
>
>
> tlb_flush_page(cpu, addr);
>
> + rcu_update_watchpoints(cpu, wpts);
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> +
> return 0;
> }
>
> @@ -832,69 +897,101 @@ int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, int flags)
> return cpu_watchpoint_insert_with_ref(cpu, addr, len, flags, BPWP_NOREF);
> }
>
> -static void cpu_watchpoint_delete(CPUState *cpu, int index)
> +static void cpu_watchpoint_delete(CPUState *cpu, GArray *wpts, int index)
Same here, just call the GArray "new".
> {
> CPUWatchpoint *wp;
> - wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, index);
> + wp = &g_array_index(wpts, CPUWatchpoint, index);
> tlb_flush_page(cpu, wp->vaddr);
> - g_array_remove_index(cpu->watchpoints, index);
> + g_array_remove_index(wpts, index);
> }
>
> /* Remove a specific watchpoint. */
> int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
> int flags)
> {
> + GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
old...
> CPUWatchpoint *wp;
> - int i = 0;
> + int retval = -ENOENT;
> +
> + if (unlikely(wpts) && unlikely(wpts->len)) {
If this function is called, it's not so unlikely that cpu->watchpoints
is NULL!
> + int i = 0;
> +
> + qemu_mutex_lock(&cpu->update_debug_lock);
> + wpts = rcu_copy_watchpoints(cpu);
... and new.
>
> - if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
> do {
> wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
> if (wp && addr == wp->vaddr && len == wp->len
> && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
> - cpu_watchpoint_delete(cpu, i);
> - return 0;
> + cpu_watchpoint_delete(cpu, wpts, i);
> + retval = 0;
> + } else {
> + i++;
> }
> - } while (i++ < cpu->watchpoints->len);
> + } while (i < wpts->len);
Please write the iteration in the same way from the beginning, to keep
this patch as small as possible.
> + rcu_update_watchpoints(cpu, wpts);
Or free and set it to NULL if there are no watchpoints at all?
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> +
> }
>
> - return -ENOENT;
> + return retval;
> }
>
> /* Remove a specific watchpoint by external reference. */
> int cpu_watchpoint_remove_by_ref(CPUState *cpu, int ref)
> {
> + GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
> CPUWatchpoint *wp;
> - int i = 0;
> + int retval = -ENOENT;
> +
> + if (unlikely(wpts) && unlikely(wpts->len)) {
> + int i = 0;
> +
> + qemu_mutex_lock(&cpu->update_debug_lock);
> + wpts = rcu_copy_watchpoints(cpu);
>
> - if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
> do {
> - wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
> + wp = &g_array_index(wpts, CPUWatchpoint, i);
> if (wp && wp->ref == ref) {
> - cpu_watchpoint_delete(cpu, i);
> - return 0;
> + cpu_watchpoint_delete(cpu, wpts, i);
> + retval = 0;
> + } else {
> + i++;
> }
> - } while (wp && i++ < cpu->watchpoints->len);
> + } while (i < wpts->len);
> +
> + rcu_update_watchpoints(cpu, wpts);
Same here, free and set it to NULL if the last watchpoint has been removed?
A few of these suggestions probably apply to breakpoints as well.
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> +
> }
>
> - return -ENOENT;
> + return retval;
> }
>
> /* Remove all matching watchpoints. */
> void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
> {
> + GArray *wpts = atomic_rcu_read(&cpu->watchpoints);
> CPUWatchpoint *wp;
>
> - if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
> - int i = cpu->watchpoints->len;
> + if (unlikely(wpts) && unlikely(wpts->len)) {
> + int i = 0;
> +
> + qemu_mutex_lock(&cpu->update_debug_lock);
> + wpts = rcu_copy_watchpoints(cpu);
> +
> do {
> - wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
> + wp = &g_array_index(wpts, CPUWatchpoint, i);
> if (wp->flags & mask) {
> - cpu_watchpoint_delete(cpu, i);
> + cpu_watchpoint_delete(cpu, wpts, i);
> } else {
> - i--;
> + i++;
> }
> - } while (cpu->watchpoints->len && i >= 0);
> + } while (i < wpts->len);
> +
> + rcu_update_watchpoints(cpu, wpts);
> + qemu_mutex_unlock(&cpu->update_debug_lock);
> }
> }
>
> @@ -945,18 +1042,6 @@ static GArray *rcu_copy_breakpoints(CPUState *cpu)
> return new;
> }
>
> -struct BreakRCU {
> - struct rcu_head rcu;
> - GArray *bkpts;
> -};
> -
> -/* RCU reclaim step */
> -static void rcu_free_breakpoints(struct BreakRCU *rcu_free)
> -{
> - g_array_free(rcu_free->bkpts, false);
> - g_free(rcu_free);
> -}
> -
> /* Called with update lock held */
> static void rcu_update_breakpoints(CPUState *cpu, GArray *new_bkpts)
> {
> @@ -964,9 +1049,9 @@ static void rcu_update_breakpoints(CPUState *cpu, GArray *new_bkpts)
> atomic_rcu_set(&cpu->breakpoints, new_bkpts);
>
> if (bpts) {
> - struct BreakRCU *rcu_free = g_malloc(sizeof(*rcu_free));
> - rcu_free->bkpts = bpts;
> - call_rcu(rcu_free, rcu_free_breakpoints, rcu);
> + struct FreeDebugRCU *rcu_free = g_malloc(sizeof(*rcu_free));
> + rcu_free->debug = bpts;
> + call_rcu(rcu_free, rcu_debug_free, rcu);
> }
> }
>
> @@ -2296,6 +2381,7 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
> target_ulong pc, cs_base;
> target_ulong vaddr;
> uint32_t cpu_flags;
> + GArray *wpts;
>
> if (cpu->watchpoint_hit) {
> /* We re-entered the check after replacing the TB. Now raise
> @@ -2306,11 +2392,12 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
> }
> vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
>
> - if (unlikely(cpu->watchpoints) && unlikely(cpu->watchpoints->len)) {
> + wpts = atomic_rcu_read(&cpu->watchpoints);
> + if (unlikely(wpts) && unlikely(wpts->len)) {
> CPUWatchpoint *wp;
> int i;
> - for (i = 0; i < cpu->watchpoints->len; i++) {
> - wp = &g_array_index(cpu->watchpoints, CPUWatchpoint, i);
> + for (i = 0; i < wpts->len; i++) {
> + wp = &g_array_index(wpts, CPUWatchpoint, i);
> if (cpu_watchpoint_address_matches(wp, vaddr, len)
> && (wp->flags & flags)) {
> if (flags == BP_MEM_READ) {
>
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
` (6 preceding siblings ...)
2016-06-17 16:33 ` [Qemu-devel] [RFC 7/7] watchpoints: put watchpoints under RCU control Alex Bennée
@ 2016-06-17 17:01 ` Paolo Bonzini
2016-06-20 13:55 ` Sergey Fedorov
2016-06-20 15:49 ` Sergey Fedorov
8 siblings, 1 reply; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-17 17:01 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo,
serge.fdrv, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana
On 17/06/2016 18:33, Alex Bennée wrote:
> First we move the break/watchpoints into an array which is more
> amenable to RCU control that the QLIST. We then control the life time
> of references to break/watchpoint data by removing long held
> references in the target code and getting information when needed from
> the core. Then we stop dynamically allocation the watch/breakpoint
> data and store it directly in the array which makes iteration across
> the list a bit more cache friendly than referenced pointers. Finally
> addition and removal of elements of the array is put under RCU
> control. This ensures there is always a safe array of data to check
> in the run-loop.
I'm not sure why you say that arrays are more amenable than QTAILQ
(though indeed include/qemu/rcu_queue.h only includes QLIST for now),
but I feel bad asking you to redo all the work...
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-17 17:01 ` [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Paolo Bonzini
@ 2016-06-20 13:55 ` Sergey Fedorov
2016-06-20 14:25 ` Paolo Bonzini
0 siblings, 1 reply; 23+ messages in thread
From: Sergey Fedorov @ 2016-06-20 13:55 UTC (permalink / raw)
To: Paolo Bonzini, Alex Bennée, mttcg, qemu-devel, fred.konrad,
a.rigo, cota, bobby.prani
Cc: mark.burton, jan.kiszka, rth, peter.maydell, claudio.fontana
On 17/06/16 20:01, Paolo Bonzini wrote:
>
> On 17/06/2016 18:33, Alex Bennée wrote:
>> First we move the break/watchpoints into an array which is more
>> amenable to RCU control that the QLIST. We then control the life time
>> of references to break/watchpoint data by removing long held
>> references in the target code and getting information when needed from
>> the core. Then we stop dynamically allocation the watch/breakpoint
>> data and store it directly in the array which makes iteration across
>> the list a bit more cache friendly than referenced pointers. Finally
>> addition and removal of elements of the array is put under RCU
>> control. This ensures there is always a safe array of data to check
>> in the run-loop.
> I'm not sure why you say that arrays are more amenable than QTAILQ
> (though indeed include/qemu/rcu_queue.h only includes QLIST for now),
> but I feel bad asking you to redo all the work...
Is there any realistic way to manage *doubly* linked lists in RCU?
Kind regards,
Sergey
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-20 13:55 ` Sergey Fedorov
@ 2016-06-20 14:25 ` Paolo Bonzini
2016-06-20 15:23 ` Sergey Fedorov
0 siblings, 1 reply; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-20 14:25 UTC (permalink / raw)
To: Sergey Fedorov, Alex Bennée, mttcg, qemu-devel, fred.konrad,
a.rigo, cota, bobby.prani
Cc: peter.maydell, jan.kiszka, mark.burton, claudio.fontana, rth
On 20/06/2016 15:55, Sergey Fedorov wrote:
>> > I'm not sure why you say that arrays are more amenable than QTAILQ
>> > (though indeed include/qemu/rcu_queue.h only includes QLIST for now),
>> > but I feel bad asking you to redo all the work...
> Is there any realistic way to manage *doubly* linked lists in RCU?
Linux does that fine with circular linked list, but off the top of my
head I cannot think of any reason why QTAILQ would not work.
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-20 14:25 ` Paolo Bonzini
@ 2016-06-20 15:23 ` Sergey Fedorov
0 siblings, 0 replies; 23+ messages in thread
From: Sergey Fedorov @ 2016-06-20 15:23 UTC (permalink / raw)
To: Paolo Bonzini, Alex Bennée, mttcg, qemu-devel, fred.konrad,
a.rigo, cota, bobby.prani
Cc: peter.maydell, jan.kiszka, mark.burton, claudio.fontana, rth
On 20/06/16 17:25, Paolo Bonzini wrote:
>
> On 20/06/2016 15:55, Sergey Fedorov wrote:
>>>> I'm not sure why you say that arrays are more amenable than QTAILQ
>>>> (though indeed include/qemu/rcu_queue.h only includes QLIST for now),
>>>> but I feel bad asking you to redo all the work...
>> Is there any realistic way to manage *doubly* linked lists in RCU?
> Linux does that fine with circular linked list, but off the top of my
> head I cannot think of any reason why QTAILQ would not work.
Silly me, I see that now :)
Thanks,
Sergey
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-17 16:33 [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Alex Bennée
` (7 preceding siblings ...)
2016-06-17 17:01 ` [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation Paolo Bonzini
@ 2016-06-20 15:49 ` Sergey Fedorov
2016-06-20 16:08 ` Paolo Bonzini
8 siblings, 1 reply; 23+ messages in thread
From: Sergey Fedorov @ 2016-06-20 15:49 UTC (permalink / raw)
To: Alex Bennée, mttcg, qemu-devel, fred.konrad, a.rigo, cota,
bobby.prani
Cc: mark.burton, pbonzini, jan.kiszka, rth, peter.maydell,
claudio.fontana
On 17/06/16 19:33, Alex Bennée wrote:
> Last time I went through the MTTCG code the access to the
> break/watchpoint code was annotated with "RCU?". The code currently
> gets away with avoiding locks for the gdbstub as the guest execution
> state is usually halted. However when used for modelling architectural
> debug registers there is no such protection.
I'm not so sure if there's any architecture which permits changing
breakpoins/watchpoints of one core from another.
> The patch series changes things in stages.
>
> First we move the break/watchpoints into an array which is more
> amenable to RCU control that the QLIST. We then control the life time
> of references to break/watchpoint data by removing long held
> references in the target code and getting information when needed from
> the core. Then we stop dynamically allocation the watch/breakpoint
> data and store it directly in the array which makes iteration across
> the list a bit more cache friendly than referenced pointers. Finally
> addition and removal of elements of the array is put under RCU
> control. This ensures there is always a safe array of data to check
> in the run-loop.
I a little bit unsure if we really want to complicate things with RCU.
Why don't we simply protect the lists with a mutex given that there's no
contention expected? BTW, as it comes to debugging, I suppose we don't
expect great performance anyway.
Kind regards,
Sergey
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-20 15:49 ` Sergey Fedorov
@ 2016-06-20 16:08 ` Paolo Bonzini
2016-06-20 16:27 ` Alex Bennée
2016-06-20 18:19 ` Sergey Fedorov
0 siblings, 2 replies; 23+ messages in thread
From: Paolo Bonzini @ 2016-06-20 16:08 UTC (permalink / raw)
To: Sergey Fedorov
Cc: Alex Bennée, mttcg, qemu-devel, fred konrad, a rigo, cota,
bobby prani, mark burton, jan kiszka, rth, peter maydell,
claudio fontana
> > The patch series changes things in stages.
> >
> > First we move the break/watchpoints into an array which is more
> > amenable to RCU control that the QLIST. We then control the life time
> > of references to break/watchpoint data by removing long held
> > references in the target code and getting information when needed from
> > the core. Then we stop dynamically allocation the watch/breakpoint
> > data and store it directly in the array which makes iteration across
> > the list a bit more cache friendly than referenced pointers. Finally
> > addition and removal of elements of the array is put under RCU
> > control. This ensures there is always a safe array of data to check
> > in the run-loop.
>
> I a little bit unsure if we really want to complicate things with RCU.
> Why don't we simply protect the lists with a mutex given that there's no
> contention expected? BTW, as it comes to debugging, I suppose we don't
> expect great performance anyway.
Mutexes do introduce some overhead. The breakpoints list are mostly touched
during translation, but watchpoints aren't so we could use tb_lock for
breakpoints and a separate per-CPU mutex for watchpoints. That could
indeed work.
Paolo
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-20 16:08 ` Paolo Bonzini
@ 2016-06-20 16:27 ` Alex Bennée
2016-06-20 18:16 ` Sergey Fedorov
2016-06-20 18:19 ` Sergey Fedorov
1 sibling, 1 reply; 23+ messages in thread
From: Alex Bennée @ 2016-06-20 16:27 UTC (permalink / raw)
To: Paolo Bonzini
Cc: Sergey Fedorov, mttcg, qemu-devel, fred konrad, a rigo, cota,
bobby prani, mark burton, jan kiszka, rth, peter maydell,
claudio fontana
Paolo Bonzini <pbonzini@redhat.com> writes:
>> > The patch series changes things in stages.
>> >
>> > First we move the break/watchpoints into an array which is more
>> > amenable to RCU control that the QLIST. We then control the life time
>> > of references to break/watchpoint data by removing long held
>> > references in the target code and getting information when needed from
>> > the core. Then we stop dynamically allocation the watch/breakpoint
>> > data and store it directly in the array which makes iteration across
>> > the list a bit more cache friendly than referenced pointers. Finally
>> > addition and removal of elements of the array is put under RCU
>> > control. This ensures there is always a safe array of data to check
>> > in the run-loop.
>>
>> I a little bit unsure if we really want to complicate things with RCU.
>> Why don't we simply protect the lists with a mutex given that there's no
>> contention expected? BTW, as it comes to debugging, I suppose we don't
>> expect great performance anyway.
>
> Mutexes do introduce some overhead. The breakpoints list are mostly touched
> during translation, but watchpoints aren't so we could use tb_lock for
> breakpoints and a separate per-CPU mutex for watchpoints. That could
> indeed work.
The watchpoint contention is the biggest one. FWIW I like the RCU
approach because it is low impact when running (and I'm hoping faster as
well by not being a linked list).
It's not a major problem in system mode because generally the system is
halted when changes are made to the list. However I'd like to solve it
properly for both system and user-mode so I can then forgot about
another special case.
--
Alex Bennée
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-20 16:27 ` Alex Bennée
@ 2016-06-20 18:16 ` Sergey Fedorov
0 siblings, 0 replies; 23+ messages in thread
From: Sergey Fedorov @ 2016-06-20 18:16 UTC (permalink / raw)
To: Alex Bennée, Paolo Bonzini
Cc: mttcg, qemu-devel, fred konrad, a rigo, cota, bobby prani,
mark burton, jan kiszka, rth, peter maydell, claudio fontana
On 20/06/16 19:27, Alex Bennée wrote:
> The watchpoint contention is the biggest one. FWIW I like the RCU
> approach because it is low impact when running (and I'm hoping faster as
> well by not being a linked list).
When can we expect any contention? I generally find the idea of RCU
really great, but in this particular case, I'm afraid, it could give us
less profit (performance) than pain (complexity).
> It's not a major problem in system mode because generally the system is
> halted when changes are made to the list. However I'd like to solve it
> properly for both system and user-mode so I can then forgot about
> another special case.
Could you please explain a bit more about problems in user-mode?
Thanks,
Sergey
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Qemu-devel] [RFC 0/7] Safe watch and breakpoint manipulation
2016-06-20 16:08 ` Paolo Bonzini
2016-06-20 16:27 ` Alex Bennée
@ 2016-06-20 18:19 ` Sergey Fedorov
1 sibling, 0 replies; 23+ messages in thread
From: Sergey Fedorov @ 2016-06-20 18:19 UTC (permalink / raw)
To: Paolo Bonzini
Cc: Alex Bennée, mttcg, qemu-devel, fred konrad, a rigo, cota,
bobby prani, mark burton, jan kiszka, rth, peter maydell,
claudio fontana
On 20/06/16 19:08, Paolo Bonzini wrote:
>>> The patch series changes things in stages.
>>>
>>> First we move the break/watchpoints into an array which is more
>>> amenable to RCU control that the QLIST. We then control the life time
>>> of references to break/watchpoint data by removing long held
>>> references in the target code and getting information when needed from
>>> the core. Then we stop dynamically allocation the watch/breakpoint
>>> data and store it directly in the array which makes iteration across
>>> the list a bit more cache friendly than referenced pointers. Finally
>>> addition and removal of elements of the array is put under RCU
>>> control. This ensures there is always a safe array of data to check
>>> in the run-loop.
>> I a little bit unsure if we really want to complicate things with RCU.
>> Why don't we simply protect the lists with a mutex given that there's no
>> contention expected? BTW, as it comes to debugging, I suppose we don't
>> expect great performance anyway.
> Mutexes do introduce some overhead. The breakpoints list are mostly touched
> during translation, but watchpoints aren't so we could use tb_lock for
> breakpoints and a separate per-CPU mutex for watchpoints. That could
> indeed work.
It's a good idea to use tb_lock for breakpoints. Actually, we could use
spinlocks for watchpoints, if we're careful. But I don't this it can be
so critical in performance.
Kind regards,
Sergey
^ permalink raw reply [flat|nested] 23+ messages in thread