* [Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub
@ 2008-12-12 23:52 Lionel Landwerlin
2008-12-13 0:00 ` Lionel Landwerlin
0 siblings, 1 reply; 12+ messages in thread
From: Lionel Landwerlin @ 2008-12-12 23:52 UTC (permalink / raw)
To: qemu-devel
I know the commit is pretty big... Hope it apply well.
Cpu(s) shared data should probably be better locked.
Regards,
>From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
Date: Sat, 13 Dec 2008 00:32:04 +0100
Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
When using gdb with qemu (via gdbstub), if your emulated
application is multithreaded and does a segfault then qemu
crashes.
Qemu crashes because the break/watch points are shared between
cpus. The TAILQ structure which handles the list of break/watch
points is copied inside each CPUState structure. When the last
breakpoint is removed (this happens on a segfault), it is
removed across all cpus but because of the copied TAILQ
structure a same breakpoint can be freed N times with N the
current number of cpus.
Signed-off-by: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
---
cpu-defs.h | 10 ++--
cpu-exec.c | 12 ++--
exec.c | 59 ++++++++++---------
target-alpha/translate.c | 4 +-
target-arm/translate.c | 4 +-
target-cris/translate.c | 80 +++++++++++++-------------
target-i386/helper.c | 26 ++++----
target-i386/translate.c | 146 +++++++++++++++++++++++-----------------------
target-m68k/translate.c | 4 +-
target-mips/translate.c | 4 +-
target-ppc/translate.c | 4 +-
target-sh4/translate.c | 5 +-
target-sparc/translate.c | 4 +-
13 files changed, 182 insertions(+), 180 deletions(-)
diff --git a/cpu-defs.h b/cpu-defs.h
index ed8c001..17732fa 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -123,9 +123,9 @@ typedef struct CPUTLBEntry {
target_phys_addr_t addend;
#endif
/* padding to get a power of two size */
- uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
- (sizeof(target_ulong) * 3 +
- ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
+ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+ (sizeof(target_ulong) * 3 +
+ ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
sizeof(target_phys_addr_t))];
} CPUTLBEntry;
@@ -189,10 +189,10 @@ typedef struct CPUWatchpoint {
\
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
- TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
+ TAILQ_HEAD(breakpoints_head, CPUBreakpoint) *breakpoints; \
int singlestep_enabled; \
\
- TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
+ TAILQ_HEAD(watchpoints_head, CPUWatchpoint) *watchpoints; \
CPUWatchpoint *watchpoint_hit; \
\
struct GDBRegisterState *gdb_regs; \
diff --git a/cpu-exec.c b/cpu-exec.c
index 9a35a59..8950a0a 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -198,7 +198,7 @@ static void cpu_handle_debug_exception(CPUState *env)
CPUWatchpoint *wp;
if (!env->watchpoint_hit)
- TAILQ_FOREACH(wp, &env->watchpoints, entry)
+ TAILQ_FOREACH(wp, env->watchpoints, entry)
wp->flags &= ~BP_WATCHPOINT_HIT;
if (debug_excp_handler)
@@ -378,10 +378,10 @@ int cpu_exec(CPUState *env1)
do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (((env->hflags2 & HF2_VINTR_MASK) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
(env->hflags2 & HF2_HIF_MASK)) ||
- (!(env->hflags2 & HF2_VINTR_MASK) &&
- (env->eflags & IF_MASK &&
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
int intno;
svm_check_intercept(SVM_EXIT_INTR);
@@ -396,7 +396,7 @@ int cpu_exec(CPUState *env1)
next_tb = 0;
#if !defined(CONFIG_USER_ONLY)
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
- (env->eflags & IF_MASK) &&
+ (env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
/* FIXME: this should respect TPR */
@@ -1485,7 +1485,7 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = uc->uc_mcontext.sc_iaoq[0];
/* FIXME: compute is_write */
is_write = 0;
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+ return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write,
&uc->uc_sigmask, puc);
}
diff --git a/exec.c b/exec.c
index 105812f..4c7219a 100644
--- a/exec.c
+++ b/exec.c
@@ -209,21 +209,21 @@ static void map_exec(void *addr, long size)
DWORD old_protect;
VirtualProtect(addr, size,
PAGE_EXECUTE_READWRITE, &old_protect);
-
+
}
#else
static void map_exec(void *addr, long size)
{
unsigned long start, end, page_size;
-
+
page_size = getpagesize();
start = (unsigned long)addr;
start &= ~(page_size - 1);
-
+
end = (unsigned long)addr + size;
end += page_size - 1;
end &= ~(page_size - 1);
-
+
mprotect((void *)start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC);
}
@@ -273,7 +273,7 @@ static void page_init(void)
(1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
page_set_flags(startaddr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(endaddr),
- PAGE_RESERVED);
+ PAGE_RESERVED);
}
} while (!feof(f));
fclose(f);
@@ -314,7 +314,7 @@ static inline PageDesc *page_find_alloc(target_ulong index)
unsigned long addr = h2g(p);
page_set_flags(addr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(addr + len),
- PAGE_RESERVED);
+ PAGE_RESERVED);
}
#else
p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
@@ -420,7 +420,7 @@ static void code_gen_alloc(unsigned long tb_size)
code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
/* The code gen buffer location may have constraints depending on
the host cpu and OS */
-#if defined(__linux__)
+#if defined(__linux__)
{
int flags;
void *start = NULL;
@@ -467,7 +467,7 @@ static void code_gen_alloc(unsigned long tb_size)
code_gen_buffer_size = (800 * 1024 * 1024);
#endif
code_gen_buffer = mmap(addr, code_gen_buffer_size,
- PROT_WRITE | PROT_READ | PROT_EXEC,
+ PROT_WRITE | PROT_READ | PROT_EXEC,
flags, -1, 0);
if (code_gen_buffer == MAP_FAILED) {
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
@@ -484,7 +484,7 @@ static void code_gen_alloc(unsigned long tb_size)
#endif
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
map_exec(code_gen_prologue, sizeof(code_gen_prologue));
- code_gen_buffer_max_size = code_gen_buffer_size -
+ code_gen_buffer_max_size = code_gen_buffer_size -
code_gen_max_block_size();
code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
@@ -535,6 +535,7 @@ void cpu_exec_init(CPUState *env)
{
CPUState **penv;
int cpu_index;
+ fprintf (stderr, "\n\nin %s\n\n", __func__);
env->next_cpu = NULL;
penv = &first_cpu;
@@ -544,8 +545,10 @@ void cpu_exec_init(CPUState *env)
cpu_index++;
}
env->cpu_index = cpu_index;
- TAILQ_INIT(&env->breakpoints);
- TAILQ_INIT(&env->watchpoints);
+ env->breakpoints = malloc (sizeof (*env->breakpoints));
+ env->watchpoints = malloc (sizeof (*env->watchpoints));
+ TAILQ_INIT(env->breakpoints);
+ TAILQ_INIT(env->watchpoints);
*penv = env;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
@@ -1329,9 +1332,9 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+ TAILQ_INSERT_HEAD(env->watchpoints, wp, entry);
else
- TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
+ TAILQ_INSERT_TAIL(env->watchpoints, wp, entry);
tlb_flush_page(env, addr);
@@ -1347,7 +1350,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
target_ulong len_mask = ~(len - 1);
CPUWatchpoint *wp;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(env, wp);
@@ -1360,7 +1363,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
/* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
{
- TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+ TAILQ_REMOVE(env->watchpoints, watchpoint, entry);
tlb_flush_page(env, watchpoint->vaddr);
@@ -1372,7 +1375,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask)
{
CPUWatchpoint *wp, *next;
- TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
+ TAILQ_FOREACH_SAFE(wp, env->watchpoints, entry, next) {
if (wp->flags & mask)
cpu_watchpoint_remove_by_ref(env, wp);
}
@@ -1394,9 +1397,9 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ TAILQ_INSERT_HEAD(env->breakpoints, bp, entry);
else
- TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
+ TAILQ_INSERT_TAIL(env->breakpoints, bp, entry);
breakpoint_invalidate(env, pc);
@@ -1414,7 +1417,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(env, bp);
return 0;
@@ -1430,10 +1433,8 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
-
+ TAILQ_REMOVE(env->breakpoints, breakpoint, entry);
breakpoint_invalidate(env, breakpoint->pc);
-
qemu_free(breakpoint);
#endif
}
@@ -1444,7 +1445,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask)
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
- TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
+ TAILQ_FOREACH_SAFE(bp, env->breakpoints, entry, next) {
if (bp->flags & mask)
cpu_breakpoint_remove_by_ref(env, bp);
}
@@ -1672,11 +1673,11 @@ static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
- memset (&env->tb_jmp_cache[i], 0,
+ memset (&env->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
- memset (&env->tb_jmp_cache[i], 0,
+ memset (&env->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
@@ -1981,7 +1982,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
@@ -2542,7 +2543,7 @@ static void check_watchpoint(int offset, int len_mask, int flags)
return;
}
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT;
@@ -3267,7 +3268,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
tb = tb_find_pc((unsigned long)retaddr);
if (!tb) {
- cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
+ cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
retaddr);
}
n = env->icount_decr.u16.low + tb->icount;
@@ -3345,7 +3346,7 @@ void dump_exec_info(FILE *f,
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %ld/%ld\n",
code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
- cpu_fprintf(f, "TB count %d/%d\n",
+ cpu_fprintf(f, "TB count %d/%d\n",
nb_tbs, code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
nb_tbs ? target_code_size / nb_tbs : 0,
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 7e8e644..62aa5f0 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2363,8 +2363,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
gen_icount_start();
for (ret = 0; ret == 0;) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
gen_excp(&ctx, EXCP_DEBUG, 0);
break;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 0650bc3..8ac1f6b 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -8651,8 +8651,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
}
#endif
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_set_condexec(dc);
gen_set_pc_im(dc->pc);
diff --git a/target-cris/translate.c b/target-cris/translate.c
index 242ef9c..ae976b1 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -116,7 +116,7 @@ typedef struct DisasContext {
#define JMP_NOJMP 0
#define JMP_DIRECT 1
#define JMP_INDIRECT 2
- int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
+ int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
uint32_t jmp_pc;
int delayed_branch;
@@ -214,9 +214,9 @@ static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
else if (r == PR_SRS)
tcg_gen_andi_tl(cpu_PR[r], tn, 3);
else {
- if (r == PR_PID)
+ if (r == PR_PID)
gen_helper_tlb_flush_pid(tn);
- if (dc->tb_flags & S_FLAG && r == PR_SPC)
+ if (dc->tb_flags & S_FLAG && r == PR_SPC)
gen_helper_spc_write(tn);
else if (r == PR_CCS)
dc->cpustate_changed = 1;
@@ -452,7 +452,7 @@ static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
l1 = gen_new_label();
- /*
+ /*
* d <<= 1
* if (d >= s)
* d -= s;
@@ -483,7 +483,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
-
+
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
@@ -505,7 +505,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
tcg_gen_shri_tl(x, x, 4);
tcg_gen_and_tl(x, x, c);
- tcg_gen_add_tl(d, d, x);
+ tcg_gen_add_tl(d, d, x);
tcg_temp_free(x);
tcg_temp_free(c);
}
@@ -516,7 +516,7 @@ static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
-
+
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
@@ -744,10 +744,10 @@ static void cris_evaluate_flags(DisasContext *dc)
}
if (dc->flagx_known) {
if (dc->flags_x)
- tcg_gen_ori_tl(cpu_PR[PR_CCS],
+ tcg_gen_ori_tl(cpu_PR[PR_CCS],
cpu_PR[PR_CCS], X_FLAG);
else
- tcg_gen_andi_tl(cpu_PR[PR_CCS],
+ tcg_gen_andi_tl(cpu_PR[PR_CCS],
cpu_PR[PR_CCS], ~X_FLAG);
}
@@ -762,9 +762,9 @@ static void cris_cc_mask(DisasContext *dc, unsigned int mask)
if (!mask) {
dc->update_cc = 0;
return;
- }
+ }
- /* Check if we need to evaluate the condition codes due to
+ /* Check if we need to evaluate the condition codes due to
CC overlaying. */
ovl = (dc->cc_mask ^ mask) & ~mask;
if (ovl) {
@@ -798,7 +798,7 @@ static inline void cris_update_cc_x(DisasContext *dc)
}
/* Update cc prior to executing ALU op. Needs source operands untouched. */
-static void cris_pre_alu_update_cc(DisasContext *dc, int op,
+static void cris_pre_alu_update_cc(DisasContext *dc, int op,
TCGv dst, TCGv src, int size)
{
if (dc->update_cc) {
@@ -822,7 +822,7 @@ static void cris_pre_alu_update_cc(DisasContext *dc, int op,
static inline void cris_update_result(DisasContext *dc, TCGv res)
{
if (dc->update_cc) {
- if (dc->cc_size == 4 &&
+ if (dc->cc_size == 4 &&
(dc->cc_op == CC_OP_SUB
|| dc->cc_op == CC_OP_ADD))
return;
@@ -831,7 +831,7 @@ static inline void cris_update_result(DisasContext *dc, TCGv res)
}
/* Returns one if the write back stage should execute. */
-static void cris_alu_op_exec(DisasContext *dc, int op,
+static void cris_alu_op_exec(DisasContext *dc, int op,
TCGv dst, TCGv a, TCGv b, int size)
{
/* Emit the ALU insns. */
@@ -1003,19 +1003,19 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
switch (cond) {
case CC_EQ:
if (arith_opt || move_opt) {
- /* If cc_result is zero, T0 should be
+ /* If cc_result is zero, T0 should be
non-zero otherwise T0 should be zero. */
int l1;
l1 = gen_new_label();
tcg_gen_movi_tl(cc, 0);
- tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
+ tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
0, l1);
tcg_gen_movi_tl(cc, 1);
gen_set_label(l1);
}
else {
cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc,
+ tcg_gen_andi_tl(cc,
cpu_PR[PR_CCS], Z_FLAG);
}
break;
@@ -1055,7 +1055,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
if (dc->cc_size == 1)
bits = 7;
else if (dc->cc_size == 2)
- bits = 15;
+ bits = 15;
tcg_gen_shri_tl(cc, cc_result, bits);
tcg_gen_xori_tl(cc, cc, 1);
@@ -1073,7 +1073,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
if (dc->cc_size == 1)
bits = 7;
else if (dc->cc_size == 2)
- bits = 15;
+ bits = 15;
tcg_gen_shri_tl(cc, cc_result, 31);
}
@@ -1188,7 +1188,7 @@ static void cris_store_direct_jmp(DisasContext *dc)
}
}
-static void cris_prepare_cc_branch (DisasContext *dc,
+static void cris_prepare_cc_branch (DisasContext *dc,
int offset, int cond)
{
/* This helps us re-schedule the micro-code to insns in delay-slots
@@ -1232,7 +1232,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
tcg_gen_qemu_ld64(dst, addr, mem_index);
}
-static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
+static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign)
{
int mem_index = cpu_mmu_index(dc->env);
@@ -1407,7 +1407,7 @@ static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
}
} else
imm = ldl_code(dc->pc + 2);
-
+
tcg_gen_movi_tl(dst, imm);
dc->postinc = 0;
} else {
@@ -2703,7 +2703,7 @@ static unsigned int dec_move_pm(DisasContext *dc)
memsize = preg_sizes[dc->op2];
DIS(fprintf (logfile, "move.%c $p%u, [$r%u%s\n",
- memsize_char(memsize),
+ memsize_char(memsize),
dc->op2, dc->op1, dc->postinc ? "+]" : "]"));
/* prepare store. Address in T0, value in T1. */
@@ -2993,7 +2993,7 @@ static unsigned int dec_rfe_etc(DisasContext *dc)
tcg_gen_movi_tl(env_pc, dc->pc + 2);
/* Breaks start at 16 in the exception vector. */
- t_gen_mov_env_TN(trap_vector,
+ t_gen_mov_env_TN(trap_vector,
tcg_const_tl(dc->op1 + 16));
t_gen_raise_exception(EXCP_BREAK);
dc->is_jmp = DISAS_UPDATE;
@@ -3189,8 +3189,8 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
{
CPUBreakpoint *bp;
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
cris_evaluate_flags (dc);
tcg_gen_movi_tl(env_pc, dc->pc);
@@ -3210,27 +3210,27 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
* to give SW a hint that the exception actually hit on the dslot.
*
* CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
- * the core and any jmp to an odd addresses will mask off that lsb. It is
+ * the core and any jmp to an odd addresses will mask off that lsb. It is
* simply there to let sw know there was an exception on a dslot.
*
* When the software returns from an exception, the branch will re-execute.
* On QEMU care needs to be taken when a branch+delayslot sequence is broken
* and the branch and delayslot dont share pages.
*
- * The TB contaning the branch insn will set up env->btarget and evaluate
- * env->btaken. When the translation loop exits we will note that the branch
+ * The TB contaning the branch insn will set up env->btarget and evaluate
+ * env->btaken. When the translation loop exits we will note that the branch
* sequence is broken and let env->dslot be the size of the branch insn (those
* vary in length).
*
* The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
- * set). It will also expect to have env->dslot setup with the size of the
- * delay slot so that env->pc - env->dslot point to the branch insn. This TB
- * will execute the dslot and take the branch, either to btarget or just one
+ * set). It will also expect to have env->dslot setup with the size of the
+ * delay slot so that env->pc - env->dslot point to the branch insn. This TB
+ * will execute the dslot and take the branch, either to btarget or just one
* insn ahead.
*
- * When exceptions occur, we check for env->dslot in do_interrupt to detect
+ * When exceptions occur, we check for env->dslot in do_interrupt to detect
* broken branch sequences and setup $erp accordingly (i.e let it point to the
- * branch and set lsb). Then env->dslot gets cleared so that the exception
+ * branch and set lsb). Then env->dslot gets cleared so that the exception
* handler can enter. When returning from exceptions (jump $erp) the lsb gets
* masked off and we will reexecute the branch insn.
*
@@ -3299,7 +3299,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
search_pc, dc->pc, dc->ppc,
(unsigned long long)tb->flags,
env->btarget, (unsigned)tb->flags & 7,
- env->pregs[PR_CCS],
+ env->pregs[PR_CCS],
env->pregs[PR_PID], env->pregs[PR_USP],
env->regs[0], env->regs[1], env->regs[2], env->regs[3],
env->regs[4], env->regs[5], env->regs[6], env->regs[7],
@@ -3345,7 +3345,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
gen_io_start();
dc->clear_x = 1;
- insn_len = cris_decoder(dc);
+ insn_len = cris_decoder(dc);
dc->ppc = dc->pc;
dc->pc += insn_len;
if (dc->clear_x)
@@ -3360,12 +3360,12 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
if (dc->delayed_branch == 0)
{
if (tb->flags & 7)
- t_gen_mov_env_TN(dslot,
+ t_gen_mov_env_TN(dslot,
tcg_const_tl(0));
if (dc->jmp == JMP_DIRECT) {
dc->is_jmp = DISAS_NEXT;
} else {
- t_gen_cc_jmp(env_btarget,
+ t_gen_cc_jmp(env_btarget,
tcg_const_tl(dc->pc));
dc->is_jmp = DISAS_JUMP;
}
@@ -3390,7 +3390,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
- && (dc->cpustate_changed || !dc->flagx_known
+ && (dc->cpustate_changed || !dc->flagx_known
|| (dc->flags_x != (tb->flags & X_FLAG)))) {
dc->is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(env_pc, npc);
@@ -3539,7 +3539,7 @@ CPUCRISState *cpu_cris_init (const char *cpu_model)
offsetof(CPUState, cc_mask),
"cc_mask");
- env_pc = tcg_global_mem_new(TCG_AREG0,
+ env_pc = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, pc),
"pc");
env_btarget = tcg_global_mem_new(TCG_AREG0,
diff --git a/target-i386/helper.c b/target-i386/helper.c
index f2d91df..103bad2 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -34,14 +34,14 @@
//#define DEBUG_MMU
-static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
- uint32_t *ext_features,
- uint32_t *ext2_features,
+static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
+ uint32_t *ext_features,
+ uint32_t *ext2_features,
uint32_t *ext3_features)
{
int i;
/* feature flags taken from "Intel Processor Identification and the CPUID
- * Instruction" and AMD's "CPUID Specification". In cases of disagreement
+ * Instruction" and AMD's "CPUID Specification". In cases of disagreement
* about feature names, the Linux name is used. */
static const char *feature_name[] = {
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
@@ -68,22 +68,22 @@ static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
*features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
*ext_features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
*ext2_features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
*ext3_features |= 1 << i;
return;
@@ -125,13 +125,13 @@ static x86_def_t x86_defs[] = {
.family = 6,
.model = 2,
.stepping = 3,
- .features = PPRO_FEATURES |
+ .features = PPRO_FEATURES |
/* these features are needed for Win64 and aren't fully implemented */
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
/* this feature is needed for Solaris and isn't fully implemented */
CPUID_PSE36,
.ext_features = CPUID_EXT_SSE3,
- .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
+ .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
.ext3_features = CPUID_EXT3_SVM,
@@ -1174,7 +1174,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;
@@ -1364,7 +1364,7 @@ static void breakpoint_handler(CPUState *env)
cpu_resume_from_signal(env, NULL);
}
} else {
- TAILQ_FOREACH(bp, &env->breakpoints, entry)
+ TAILQ_FOREACH(bp, env->breakpoints, entry)
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, 1);
@@ -1575,7 +1575,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
-/* XXX: This value must match the one used in the MMU code. */
+/* XXX: This value must match the one used in the MMU code. */
if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
/* 64 bit processor */
#if defined(USE_KQEMU)
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 423fca3..2ecf029 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -381,7 +381,7 @@ static inline void gen_op_addq_A0_im(int64_t val)
tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
}
#endif
-
+
static void gen_add_A0_im(DisasContext *s, int val)
{
#ifdef TARGET_X86_64
@@ -462,7 +462,7 @@ static inline void gen_op_set_cc_op(int32_t val)
static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
{
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
- if (shift != 0)
+ if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#ifdef TARGET_X86_64
@@ -504,7 +504,7 @@ static inline void gen_op_movq_A0_reg(int reg)
static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
{
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
- if (shift != 0)
+ if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
}
@@ -661,7 +661,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
}
}
-static inline void gen_op_movl_T0_Dshift(int ot)
+static inline void gen_op_movl_T0_Dshift(int ot)
{
tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
@@ -953,7 +953,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
case CC_OP_SUBW:
case CC_OP_SUBL:
case CC_OP_SUBQ:
-
+
size = cc_op - CC_OP_SUBB;
switch(jcc_op) {
case JCC_Z:
@@ -984,28 +984,28 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
switch(size) {
case 0:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
case 1:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
#ifdef TARGET_X86_64
case 2:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
#endif
default:
- tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
0, l1);
break;
}
break;
-
+
case JCC_B:
cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
goto fast_jcc_b;
@@ -1037,7 +1037,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
}
tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
break;
-
+
case JCC_L:
cond = inv ? TCG_COND_GE : TCG_COND_LT;
goto fast_jcc_l;
@@ -1069,48 +1069,48 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
}
tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
break;
-
+
default:
goto slow_jcc;
}
break;
-
+
/* some jumps are easy to compute */
case CC_OP_ADDB:
case CC_OP_ADDW:
case CC_OP_ADDL:
case CC_OP_ADDQ:
-
+
case CC_OP_ADCB:
case CC_OP_ADCW:
case CC_OP_ADCL:
case CC_OP_ADCQ:
-
+
case CC_OP_SBBB:
case CC_OP_SBBW:
case CC_OP_SBBL:
case CC_OP_SBBQ:
-
+
case CC_OP_LOGICB:
case CC_OP_LOGICW:
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
-
+
case CC_OP_INCB:
case CC_OP_INCW:
case CC_OP_INCL:
case CC_OP_INCQ:
-
+
case CC_OP_DECB:
case CC_OP_DECW:
case CC_OP_DECL:
case CC_OP_DECQ:
-
+
case CC_OP_SHLB:
case CC_OP_SHLW:
case CC_OP_SHLL:
case CC_OP_SHLQ:
-
+
case CC_OP_SARB:
case CC_OP_SARW:
case CC_OP_SARL:
@@ -1129,7 +1129,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
default:
slow_jcc:
gen_setcc_slow_T0(s, jcc_op);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
cpu_T[0], 0, l1);
break;
}
@@ -1421,7 +1421,7 @@ static void gen_inc(DisasContext *s1, int ot, int d, int c)
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
}
-static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
int is_right, int is_arith)
{
target_ulong mask;
@@ -1463,7 +1463,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, op1);
-
+
/* update eflags if non zero shift */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -1484,7 +1484,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
else
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
-
+
gen_set_label(shift_label);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
@@ -1496,7 +1496,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
int is_right, int is_arith)
{
int mask;
-
+
if (ot == OT_QUAD)
mask = 0x3f;
else
@@ -1531,7 +1531,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, op1);
-
+
/* update eflags if non zero shift */
if (op2 != 0) {
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
@@ -1552,7 +1552,7 @@ static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
}
/* XXX: add faster immediate case */
-static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
int is_right)
{
target_ulong mask;
@@ -1586,12 +1586,12 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
shifts. */
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
-
+
if (ot <= OT_WORD)
tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
else
tcg_gen_mov_tl(cpu_tmp0, t1);
-
+
gen_extu(ot, t0);
tcg_gen_mov_tl(t2, t0);
@@ -1616,7 +1616,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
} else {
gen_op_mov_reg_v(ot, op1, t0);
}
-
+
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -1635,10 +1635,10 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
}
tcg_gen_andi_tl(t0, t0, CC_C);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
-
+
tcg_gen_discard_tl(cpu_cc_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
-
+
gen_set_label(label2);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
@@ -1649,7 +1649,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
}
/* XXX: add faster immediate = 1 case */
-static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
int is_right)
{
int label1;
@@ -1662,7 +1662,7 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
gen_op_ld_T0_A0(ot + s->mem_index);
else
gen_op_mov_TN_reg(ot, 0, op1);
-
+
if (is_right) {
switch (ot) {
case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
@@ -1695,13 +1695,13 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
tcg_gen_discard_tl(cpu_cc_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
-
+
gen_set_label(label1);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
}
/* XXX: add faster immediate case */
-static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
+static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
int is_right)
{
int label1, label2, data_bits;
@@ -1735,7 +1735,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
shifts. */
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
-
+
tcg_gen_addi_tl(cpu_tmp5, t2, -1);
if (ot == OT_WORD) {
/* Note: we implement the Intel behaviour for shift count > 16 */
@@ -1746,7 +1746,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
-
+
/* only needed if count > 16, but a test would complicate */
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
@@ -1760,7 +1760,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
tcg_gen_shli_tl(t1, t1, 16);
tcg_gen_or_tl(t1, t1, t0);
tcg_gen_ext32u_tl(t1, t1);
-
+
tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
@@ -1783,13 +1783,13 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
tcg_gen_shl_tl(t1, t1, cpu_tmp5);
tcg_gen_or_tl(t0, t0, t1);
-
+
} else {
if (ot == OT_LONG)
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
-
+
tcg_gen_shl_tl(t0, t0, t2);
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
tcg_gen_shr_tl(t1, t1, cpu_tmp5);
@@ -1805,7 +1805,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
} else {
gen_op_mov_reg_v(ot, op1, t0);
}
-
+
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -2234,7 +2234,7 @@ static inline void gen_jcc(DisasContext *s, int b,
if (s->jmp_opt) {
l1 = gen_new_label();
gen_jcc1(s, cc_op, b, l1);
-
+
gen_goto_tb(s, 0, next_eip);
gen_set_label(l1);
@@ -2287,17 +2287,17 @@ static void gen_setcc(DisasContext *s, int b)
static inline void gen_op_movl_T0_seg(int seg_reg)
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
}
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
{
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
- tcg_gen_st32_tl(cpu_T[0], cpu_env,
+ tcg_gen_st32_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
- tcg_gen_st_tl(cpu_T[0], cpu_env,
+ tcg_gen_st_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].base));
}
@@ -2600,7 +2600,7 @@ static void gen_interrupt(DisasContext *s, int intno,
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(cur_eip);
- gen_helper_raise_interrupt(tcg_const_i32(intno),
+ gen_helper_raise_interrupt(tcg_const_i32(intno),
tcg_const_i32(next_eip - cur_eip));
s->is_jmp = 3;
}
@@ -3091,7 +3091,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
#endif
{
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
@@ -3101,14 +3101,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
#ifdef TARGET_X86_64
if (s->dflag == 2) {
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
} else
#endif
{
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
@@ -3240,13 +3240,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x7e: /* movd ea, mm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
}
@@ -3254,13 +3254,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x17e: /* movd ea, xmm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
}
@@ -3376,7 +3376,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x050: /* movmskps */
rm = (modrm & 7) | REX_B(s);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
@@ -3384,7 +3384,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x150: /* movmskpd */
rm = (modrm & 7) | REX_B(s);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
@@ -4521,12 +4521,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - pc_start));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - s->cs_base));
}
gen_eob(s);
@@ -4793,7 +4793,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
gen_helper_cmpxchg16b(cpu_A0);
} else
-#endif
+#endif
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
@@ -5368,7 +5368,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_fildl_FT0(cpu_tmp2_i32);
break;
case 2:
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_FT0(cpu_tmp1_i64);
break;
@@ -5407,7 +5407,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_fildl_ST0(cpu_tmp2_i32);
break;
case 2:
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_ST0(cpu_tmp1_i64);
break;
@@ -5429,7 +5429,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 2:
gen_helper_fisttll_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
@@ -5455,7 +5455,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 2:
gen_helper_fstl_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
@@ -5537,13 +5537,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_fpop();
break;
case 0x3d: /* fildll */
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fildll_ST0(cpu_tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fpop();
break;
@@ -5931,7 +5931,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = dflag ? OT_LONG : OT_WORD;
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
gen_op_andl_T0_ffff();
- gen_check_io(s, ot, pc_start - s->cs_base,
+ gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
@@ -6122,7 +6122,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_iret_protected(tcg_const_i32(s->dflag),
+ gen_helper_iret_protected(tcg_const_i32(s->dflag),
tcg_const_i32(s->pc - s->cs_base));
s->cc_op = CC_OP_EFLAGS;
}
@@ -6644,7 +6644,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
{
TCGv_i32 tmp0;
gen_op_mov_TN_reg(OT_LONG, 0, reg);
-
+
tmp0 = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
tcg_gen_bswap_i32(tmp0, tmp0);
@@ -7014,7 +7014,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 4: /* STGI */
if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
+ !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
if (s->cpl != 0) {
@@ -7035,8 +7035,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 6: /* SKINIT */
- if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
+ if ((!(s->flags & HF_SVME_MASK) &&
+ !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
gen_helper_skinit();
@@ -7608,8 +7608,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
gen_icount_start();
for(;;) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == pc_ptr) {
gen_debug(dc, pc_ptr - dc->cs_base);
break;
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index bc2fe2b..634f3d8 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -2999,8 +2999,8 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
do {
pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL;
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 418b9ef..9ae1e35 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -8286,8 +8286,8 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
#endif
gen_icount_start();
while (ctx.bstate == BS_NONE) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index aa85ba7..f6b7eed 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -7765,8 +7765,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
gen_icount_start();
/* Set env in case of segfault during code fetch */
while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.nip) {
gen_update_nip(&ctx, ctx.nip);
gen_helper_raise_debug();
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 287b4a3..e67ebba 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -1798,8 +1798,9 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ fprintf (stderr, "\n\nin translate\n\n");
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (ctx.pc == bp->pc) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 07b2624..fbe0ded 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -4816,8 +4816,8 @@ static inline void gen_intermediate_code_internal(TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
do {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
if (dc->pc != pc_start)
save_state(dc, cpu_cond);
--
1.5.6.5
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub
2008-12-12 23:52 [Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub Lionel Landwerlin
@ 2008-12-13 0:00 ` Lionel Landwerlin
2008-12-13 8:26 ` [Qemu-devel] " Jan Kiszka
2008-12-13 10:16 ` Jan Kiszka
0 siblings, 2 replies; 12+ messages in thread
From: Lionel Landwerlin @ 2008-12-13 0:00 UTC (permalink / raw)
To: qemu-devel
I just forgot to remove 2 printf ...
Here the good patch :
>From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
Date: Sat, 13 Dec 2008 00:32:04 +0100
Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
When using gdb with qemu (via gdbstub), if your emulated
application is multithreaded and does a segfault then qemu
crashes.
Qemu crashes because the break/watch points are shared between
cpus. The TAILQ structure which handles the list of break/watch
points is copied inside each CPUState structure. When the last
breakpoint is removed (this happens on a segfault), it is
removed across all cpus but because of the copied TAILQ
structure a same breakpoint can be freed N times with N the
current number of cpus.
Signed-off-by: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
---
cpu-defs.h | 10 ++--
cpu-exec.c | 12 ++--
exec.c | 59 ++++++++++---------
target-alpha/translate.c | 4 +-
target-arm/translate.c | 4 +-
target-cris/translate.c | 80 +++++++++++++-------------
target-i386/helper.c | 26 ++++----
target-i386/translate.c | 146 +++++++++++++++++++++++-----------------------
target-m68k/translate.c | 4 +-
target-mips/translate.c | 4 +-
target-ppc/translate.c | 4 +-
target-sh4/translate.c | 5 +-
target-sparc/translate.c | 4 +-
13 files changed, 182 insertions(+), 180 deletions(-)
diff --git a/cpu-defs.h b/cpu-defs.h
index ed8c001..17732fa 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -123,9 +123,9 @@ typedef struct CPUTLBEntry {
target_phys_addr_t addend;
#endif
/* padding to get a power of two size */
- uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
- (sizeof(target_ulong) * 3 +
- ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
+ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+ (sizeof(target_ulong) * 3 +
+ ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
sizeof(target_phys_addr_t))];
} CPUTLBEntry;
@@ -189,10 +189,10 @@ typedef struct CPUWatchpoint {
\
/* from this point: preserved by CPU reset */ \
/* ice debug support */ \
- TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
+ TAILQ_HEAD(breakpoints_head, CPUBreakpoint) *breakpoints; \
int singlestep_enabled; \
\
- TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
+ TAILQ_HEAD(watchpoints_head, CPUWatchpoint) *watchpoints; \
CPUWatchpoint *watchpoint_hit; \
\
struct GDBRegisterState *gdb_regs; \
diff --git a/cpu-exec.c b/cpu-exec.c
index 9a35a59..8950a0a 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -198,7 +198,7 @@ static void cpu_handle_debug_exception(CPUState *env)
CPUWatchpoint *wp;
if (!env->watchpoint_hit)
- TAILQ_FOREACH(wp, &env->watchpoints, entry)
+ TAILQ_FOREACH(wp, env->watchpoints, entry)
wp->flags &= ~BP_WATCHPOINT_HIT;
if (debug_excp_handler)
@@ -378,10 +378,10 @@ int cpu_exec(CPUState *env1)
do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (((env->hflags2 & HF2_VINTR_MASK) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
(env->hflags2 & HF2_HIF_MASK)) ||
- (!(env->hflags2 & HF2_VINTR_MASK) &&
- (env->eflags & IF_MASK &&
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
int intno;
svm_check_intercept(SVM_EXIT_INTR);
@@ -396,7 +396,7 @@ int cpu_exec(CPUState *env1)
next_tb = 0;
#if !defined(CONFIG_USER_ONLY)
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
- (env->eflags & IF_MASK) &&
+ (env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
/* FIXME: this should respect TPR */
@@ -1485,7 +1485,7 @@ int cpu_signal_handler(int host_signum, void *pinfo,
pc = uc->uc_mcontext.sc_iaoq[0];
/* FIXME: compute is_write */
is_write = 0;
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
+ return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write,
&uc->uc_sigmask, puc);
}
diff --git a/exec.c b/exec.c
index 105812f..4c7219a 100644
--- a/exec.c
+++ b/exec.c
@@ -209,21 +209,21 @@ static void map_exec(void *addr, long size)
DWORD old_protect;
VirtualProtect(addr, size,
PAGE_EXECUTE_READWRITE, &old_protect);
-
+
}
#else
static void map_exec(void *addr, long size)
{
unsigned long start, end, page_size;
-
+
page_size = getpagesize();
start = (unsigned long)addr;
start &= ~(page_size - 1);
-
+
end = (unsigned long)addr + size;
end += page_size - 1;
end &= ~(page_size - 1);
-
+
mprotect((void *)start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC);
}
@@ -273,7 +273,7 @@ static void page_init(void)
(1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
page_set_flags(startaddr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(endaddr),
- PAGE_RESERVED);
+ PAGE_RESERVED);
}
} while (!feof(f));
fclose(f);
@@ -314,7 +314,7 @@ static inline PageDesc *page_find_alloc(target_ulong index)
unsigned long addr = h2g(p);
page_set_flags(addr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(addr + len),
- PAGE_RESERVED);
+ PAGE_RESERVED);
}
#else
p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
@@ -420,7 +420,7 @@ static void code_gen_alloc(unsigned long tb_size)
code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
/* The code gen buffer location may have constraints depending on
the host cpu and OS */
-#if defined(__linux__)
+#if defined(__linux__)
{
int flags;
void *start = NULL;
@@ -467,7 +467,7 @@ static void code_gen_alloc(unsigned long tb_size)
code_gen_buffer_size = (800 * 1024 * 1024);
#endif
code_gen_buffer = mmap(addr, code_gen_buffer_size,
- PROT_WRITE | PROT_READ | PROT_EXEC,
+ PROT_WRITE | PROT_READ | PROT_EXEC,
flags, -1, 0);
if (code_gen_buffer == MAP_FAILED) {
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
@@ -484,7 +484,7 @@ static void code_gen_alloc(unsigned long tb_size)
#endif
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
map_exec(code_gen_prologue, sizeof(code_gen_prologue));
- code_gen_buffer_max_size = code_gen_buffer_size -
+ code_gen_buffer_max_size = code_gen_buffer_size -
code_gen_max_block_size();
code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
@@ -544,8 +545,10 @@ void cpu_exec_init(CPUState *env)
cpu_index++;
}
env->cpu_index = cpu_index;
- TAILQ_INIT(&env->breakpoints);
- TAILQ_INIT(&env->watchpoints);
+ env->breakpoints = malloc (sizeof (*env->breakpoints));
+ env->watchpoints = malloc (sizeof (*env->watchpoints));
+ TAILQ_INIT(env->breakpoints);
+ TAILQ_INIT(env->watchpoints);
*penv = env;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
@@ -1329,9 +1332,9 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+ TAILQ_INSERT_HEAD(env->watchpoints, wp, entry);
else
- TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
+ TAILQ_INSERT_TAIL(env->watchpoints, wp, entry);
tlb_flush_page(env, addr);
@@ -1347,7 +1350,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
target_ulong len_mask = ~(len - 1);
CPUWatchpoint *wp;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(env, wp);
@@ -1360,7 +1363,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
/* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
{
- TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+ TAILQ_REMOVE(env->watchpoints, watchpoint, entry);
tlb_flush_page(env, watchpoint->vaddr);
@@ -1372,7 +1375,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask)
{
CPUWatchpoint *wp, *next;
- TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
+ TAILQ_FOREACH_SAFE(wp, env->watchpoints, entry, next) {
if (wp->flags & mask)
cpu_watchpoint_remove_by_ref(env, wp);
}
@@ -1394,9 +1397,9 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ TAILQ_INSERT_HEAD(env->breakpoints, bp, entry);
else
- TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
+ TAILQ_INSERT_TAIL(env->breakpoints, bp, entry);
breakpoint_invalidate(env, pc);
@@ -1414,7 +1417,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(env, bp);
return 0;
@@ -1430,10 +1433,8 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
-
+ TAILQ_REMOVE(env->breakpoints, breakpoint, entry);
breakpoint_invalidate(env, breakpoint->pc);
-
qemu_free(breakpoint);
#endif
}
@@ -1444,7 +1445,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask)
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
- TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
+ TAILQ_FOREACH_SAFE(bp, env->breakpoints, entry, next) {
if (bp->flags & mask)
cpu_breakpoint_remove_by_ref(env, bp);
}
@@ -1672,11 +1673,11 @@ static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
- memset (&env->tb_jmp_cache[i], 0,
+ memset (&env->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
- memset (&env->tb_jmp_cache[i], 0,
+ memset (&env->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
@@ -1981,7 +1982,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
@@ -2542,7 +2543,7 @@ static void check_watchpoint(int offset, int len_mask, int flags)
return;
}
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ TAILQ_FOREACH(wp, env->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT;
@@ -3267,7 +3268,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
tb = tb_find_pc((unsigned long)retaddr);
if (!tb) {
- cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
+ cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
retaddr);
}
n = env->icount_decr.u16.low + tb->icount;
@@ -3345,7 +3346,7 @@ void dump_exec_info(FILE *f,
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %ld/%ld\n",
code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
- cpu_fprintf(f, "TB count %d/%d\n",
+ cpu_fprintf(f, "TB count %d/%d\n",
nb_tbs, code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
nb_tbs ? target_code_size / nb_tbs : 0,
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 7e8e644..62aa5f0 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -2363,8 +2363,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
gen_icount_start();
for (ret = 0; ret == 0;) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
gen_excp(&ctx, EXCP_DEBUG, 0);
break;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 0650bc3..8ac1f6b 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -8651,8 +8651,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
}
#endif
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_set_condexec(dc);
gen_set_pc_im(dc->pc);
diff --git a/target-cris/translate.c b/target-cris/translate.c
index 242ef9c..ae976b1 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -116,7 +116,7 @@ typedef struct DisasContext {
#define JMP_NOJMP 0
#define JMP_DIRECT 1
#define JMP_INDIRECT 2
- int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
+ int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
uint32_t jmp_pc;
int delayed_branch;
@@ -214,9 +214,9 @@ static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
else if (r == PR_SRS)
tcg_gen_andi_tl(cpu_PR[r], tn, 3);
else {
- if (r == PR_PID)
+ if (r == PR_PID)
gen_helper_tlb_flush_pid(tn);
- if (dc->tb_flags & S_FLAG && r == PR_SPC)
+ if (dc->tb_flags & S_FLAG && r == PR_SPC)
gen_helper_spc_write(tn);
else if (r == PR_CCS)
dc->cpustate_changed = 1;
@@ -452,7 +452,7 @@ static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
l1 = gen_new_label();
- /*
+ /*
* d <<= 1
* if (d >= s)
* d -= s;
@@ -483,7 +483,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
-
+
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
@@ -505,7 +505,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
tcg_gen_shri_tl(x, x, 4);
tcg_gen_and_tl(x, x, c);
- tcg_gen_add_tl(d, d, x);
+ tcg_gen_add_tl(d, d, x);
tcg_temp_free(x);
tcg_temp_free(c);
}
@@ -516,7 +516,7 @@ static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
if (dc->flagx_known) {
if (dc->flags_x) {
TCGv c;
-
+
c = tcg_temp_new();
t_gen_mov_TN_preg(c, PR_CCS);
/* C flag is already at bit 0. */
@@ -744,10 +744,10 @@ static void cris_evaluate_flags(DisasContext *dc)
}
if (dc->flagx_known) {
if (dc->flags_x)
- tcg_gen_ori_tl(cpu_PR[PR_CCS],
+ tcg_gen_ori_tl(cpu_PR[PR_CCS],
cpu_PR[PR_CCS], X_FLAG);
else
- tcg_gen_andi_tl(cpu_PR[PR_CCS],
+ tcg_gen_andi_tl(cpu_PR[PR_CCS],
cpu_PR[PR_CCS], ~X_FLAG);
}
@@ -762,9 +762,9 @@ static void cris_cc_mask(DisasContext *dc, unsigned int mask)
if (!mask) {
dc->update_cc = 0;
return;
- }
+ }
- /* Check if we need to evaluate the condition codes due to
+ /* Check if we need to evaluate the condition codes due to
CC overlaying. */
ovl = (dc->cc_mask ^ mask) & ~mask;
if (ovl) {
@@ -798,7 +798,7 @@ static inline void cris_update_cc_x(DisasContext *dc)
}
/* Update cc prior to executing ALU op. Needs source operands untouched. */
-static void cris_pre_alu_update_cc(DisasContext *dc, int op,
+static void cris_pre_alu_update_cc(DisasContext *dc, int op,
TCGv dst, TCGv src, int size)
{
if (dc->update_cc) {
@@ -822,7 +822,7 @@ static void cris_pre_alu_update_cc(DisasContext *dc, int op,
static inline void cris_update_result(DisasContext *dc, TCGv res)
{
if (dc->update_cc) {
- if (dc->cc_size == 4 &&
+ if (dc->cc_size == 4 &&
(dc->cc_op == CC_OP_SUB
|| dc->cc_op == CC_OP_ADD))
return;
@@ -831,7 +831,7 @@ static inline void cris_update_result(DisasContext *dc, TCGv res)
}
/* Returns one if the write back stage should execute. */
-static void cris_alu_op_exec(DisasContext *dc, int op,
+static void cris_alu_op_exec(DisasContext *dc, int op,
TCGv dst, TCGv a, TCGv b, int size)
{
/* Emit the ALU insns. */
@@ -1003,19 +1003,19 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
switch (cond) {
case CC_EQ:
if (arith_opt || move_opt) {
- /* If cc_result is zero, T0 should be
+ /* If cc_result is zero, T0 should be
non-zero otherwise T0 should be zero. */
int l1;
l1 = gen_new_label();
tcg_gen_movi_tl(cc, 0);
- tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
+ tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
0, l1);
tcg_gen_movi_tl(cc, 1);
gen_set_label(l1);
}
else {
cris_evaluate_flags(dc);
- tcg_gen_andi_tl(cc,
+ tcg_gen_andi_tl(cc,
cpu_PR[PR_CCS], Z_FLAG);
}
break;
@@ -1055,7 +1055,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
if (dc->cc_size == 1)
bits = 7;
else if (dc->cc_size == 2)
- bits = 15;
+ bits = 15;
tcg_gen_shri_tl(cc, cc_result, bits);
tcg_gen_xori_tl(cc, cc, 1);
@@ -1073,7 +1073,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
if (dc->cc_size == 1)
bits = 7;
else if (dc->cc_size == 2)
- bits = 15;
+ bits = 15;
tcg_gen_shri_tl(cc, cc_result, 31);
}
@@ -1188,7 +1188,7 @@ static void cris_store_direct_jmp(DisasContext *dc)
}
}
-static void cris_prepare_cc_branch (DisasContext *dc,
+static void cris_prepare_cc_branch (DisasContext *dc,
int offset, int cond)
{
/* This helps us re-schedule the micro-code to insns in delay-slots
@@ -1232,7 +1232,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
tcg_gen_qemu_ld64(dst, addr, mem_index);
}
-static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
+static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign)
{
int mem_index = cpu_mmu_index(dc->env);
@@ -1407,7 +1407,7 @@ static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
}
} else
imm = ldl_code(dc->pc + 2);
-
+
tcg_gen_movi_tl(dst, imm);
dc->postinc = 0;
} else {
@@ -2703,7 +2703,7 @@ static unsigned int dec_move_pm(DisasContext *dc)
memsize = preg_sizes[dc->op2];
DIS(fprintf (logfile, "move.%c $p%u, [$r%u%s\n",
- memsize_char(memsize),
+ memsize_char(memsize),
dc->op2, dc->op1, dc->postinc ? "+]" : "]"));
/* prepare store. Address in T0, value in T1. */
@@ -2993,7 +2993,7 @@ static unsigned int dec_rfe_etc(DisasContext *dc)
tcg_gen_movi_tl(env_pc, dc->pc + 2);
/* Breaks start at 16 in the exception vector. */
- t_gen_mov_env_TN(trap_vector,
+ t_gen_mov_env_TN(trap_vector,
tcg_const_tl(dc->op1 + 16));
t_gen_raise_exception(EXCP_BREAK);
dc->is_jmp = DISAS_UPDATE;
@@ -3189,8 +3189,8 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
{
CPUBreakpoint *bp;
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
cris_evaluate_flags (dc);
tcg_gen_movi_tl(env_pc, dc->pc);
@@ -3210,27 +3210,27 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
* to give SW a hint that the exception actually hit on the dslot.
*
* CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
- * the core and any jmp to an odd addresses will mask off that lsb. It is
+ * the core and any jmp to an odd addresses will mask off that lsb. It is
* simply there to let sw know there was an exception on a dslot.
*
* When the software returns from an exception, the branch will re-execute.
* On QEMU care needs to be taken when a branch+delayslot sequence is broken
* and the branch and delayslot dont share pages.
*
- * The TB contaning the branch insn will set up env->btarget and evaluate
- * env->btaken. When the translation loop exits we will note that the branch
+ * The TB contaning the branch insn will set up env->btarget and evaluate
+ * env->btaken. When the translation loop exits we will note that the branch
* sequence is broken and let env->dslot be the size of the branch insn (those
* vary in length).
*
* The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
- * set). It will also expect to have env->dslot setup with the size of the
- * delay slot so that env->pc - env->dslot point to the branch insn. This TB
- * will execute the dslot and take the branch, either to btarget or just one
+ * set). It will also expect to have env->dslot setup with the size of the
+ * delay slot so that env->pc - env->dslot point to the branch insn. This TB
+ * will execute the dslot and take the branch, either to btarget or just one
* insn ahead.
*
- * When exceptions occur, we check for env->dslot in do_interrupt to detect
+ * When exceptions occur, we check for env->dslot in do_interrupt to detect
* broken branch sequences and setup $erp accordingly (i.e let it point to the
- * branch and set lsb). Then env->dslot gets cleared so that the exception
+ * branch and set lsb). Then env->dslot gets cleared so that the exception
* handler can enter. When returning from exceptions (jump $erp) the lsb gets
* masked off and we will reexecute the branch insn.
*
@@ -3299,7 +3299,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
search_pc, dc->pc, dc->ppc,
(unsigned long long)tb->flags,
env->btarget, (unsigned)tb->flags & 7,
- env->pregs[PR_CCS],
+ env->pregs[PR_CCS],
env->pregs[PR_PID], env->pregs[PR_USP],
env->regs[0], env->regs[1], env->regs[2], env->regs[3],
env->regs[4], env->regs[5], env->regs[6], env->regs[7],
@@ -3345,7 +3345,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
gen_io_start();
dc->clear_x = 1;
- insn_len = cris_decoder(dc);
+ insn_len = cris_decoder(dc);
dc->ppc = dc->pc;
dc->pc += insn_len;
if (dc->clear_x)
@@ -3360,12 +3360,12 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
if (dc->delayed_branch == 0)
{
if (tb->flags & 7)
- t_gen_mov_env_TN(dslot,
+ t_gen_mov_env_TN(dslot,
tcg_const_tl(0));
if (dc->jmp == JMP_DIRECT) {
dc->is_jmp = DISAS_NEXT;
} else {
- t_gen_cc_jmp(env_btarget,
+ t_gen_cc_jmp(env_btarget,
tcg_const_tl(dc->pc));
dc->is_jmp = DISAS_JUMP;
}
@@ -3390,7 +3390,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
gen_io_end();
/* Force an update if the per-tb cpu state has changed. */
if (dc->is_jmp == DISAS_NEXT
- && (dc->cpustate_changed || !dc->flagx_known
+ && (dc->cpustate_changed || !dc->flagx_known
|| (dc->flags_x != (tb->flags & X_FLAG)))) {
dc->is_jmp = DISAS_UPDATE;
tcg_gen_movi_tl(env_pc, npc);
@@ -3539,7 +3539,7 @@ CPUCRISState *cpu_cris_init (const char *cpu_model)
offsetof(CPUState, cc_mask),
"cc_mask");
- env_pc = tcg_global_mem_new(TCG_AREG0,
+ env_pc = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUState, pc),
"pc");
env_btarget = tcg_global_mem_new(TCG_AREG0,
diff --git a/target-i386/helper.c b/target-i386/helper.c
index f2d91df..103bad2 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -34,14 +34,14 @@
//#define DEBUG_MMU
-static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
- uint32_t *ext_features,
- uint32_t *ext2_features,
+static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
+ uint32_t *ext_features,
+ uint32_t *ext2_features,
uint32_t *ext3_features)
{
int i;
/* feature flags taken from "Intel Processor Identification and the CPUID
- * Instruction" and AMD's "CPUID Specification". In cases of disagreement
+ * Instruction" and AMD's "CPUID Specification". In cases of disagreement
* about feature names, the Linux name is used. */
static const char *feature_name[] = {
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
@@ -68,22 +68,22 @@ static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
*features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
*ext_features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
*ext2_features |= 1 << i;
return;
}
- for ( i = 0 ; i < 32 ; i++ )
+ for ( i = 0 ; i < 32 ; i++ )
if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
*ext3_features |= 1 << i;
return;
@@ -125,13 +125,13 @@ static x86_def_t x86_defs[] = {
.family = 6,
.model = 2,
.stepping = 3,
- .features = PPRO_FEATURES |
+ .features = PPRO_FEATURES |
/* these features are needed for Win64 and aren't fully implemented */
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
/* this feature is needed for Solaris and isn't fully implemented */
CPUID_PSE36,
.ext_features = CPUID_EXT_SSE3,
- .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
+ .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
.ext3_features = CPUID_EXT3_SVM,
@@ -1174,7 +1174,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;
@@ -1364,7 +1364,7 @@ static void breakpoint_handler(CPUState *env)
cpu_resume_from_signal(env, NULL);
}
} else {
- TAILQ_FOREACH(bp, &env->breakpoints, entry)
+ TAILQ_FOREACH(bp, env->breakpoints, entry)
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, 1);
@@ -1575,7 +1575,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
-/* XXX: This value must match the one used in the MMU code. */
+/* XXX: This value must match the one used in the MMU code. */
if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
/* 64 bit processor */
#if defined(USE_KQEMU)
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 423fca3..2ecf029 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -381,7 +381,7 @@ static inline void gen_op_addq_A0_im(int64_t val)
tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
}
#endif
-
+
static void gen_add_A0_im(DisasContext *s, int val)
{
#ifdef TARGET_X86_64
@@ -462,7 +462,7 @@ static inline void gen_op_set_cc_op(int32_t val)
static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
{
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
- if (shift != 0)
+ if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#ifdef TARGET_X86_64
@@ -504,7 +504,7 @@ static inline void gen_op_movq_A0_reg(int reg)
static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
{
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
- if (shift != 0)
+ if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
}
@@ -661,7 +661,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
}
}
-static inline void gen_op_movl_T0_Dshift(int ot)
+static inline void gen_op_movl_T0_Dshift(int ot)
{
tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
@@ -953,7 +953,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
case CC_OP_SUBW:
case CC_OP_SUBL:
case CC_OP_SUBQ:
-
+
size = cc_op - CC_OP_SUBB;
switch(jcc_op) {
case JCC_Z:
@@ -984,28 +984,28 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
switch(size) {
case 0:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
case 1:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
#ifdef TARGET_X86_64
case 2:
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
0, l1);
break;
#endif
default:
- tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
0, l1);
break;
}
break;
-
+
case JCC_B:
cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
goto fast_jcc_b;
@@ -1037,7 +1037,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
}
tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
break;
-
+
case JCC_L:
cond = inv ? TCG_COND_GE : TCG_COND_LT;
goto fast_jcc_l;
@@ -1069,48 +1069,48 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
}
tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
break;
-
+
default:
goto slow_jcc;
}
break;
-
+
/* some jumps are easy to compute */
case CC_OP_ADDB:
case CC_OP_ADDW:
case CC_OP_ADDL:
case CC_OP_ADDQ:
-
+
case CC_OP_ADCB:
case CC_OP_ADCW:
case CC_OP_ADCL:
case CC_OP_ADCQ:
-
+
case CC_OP_SBBB:
case CC_OP_SBBW:
case CC_OP_SBBL:
case CC_OP_SBBQ:
-
+
case CC_OP_LOGICB:
case CC_OP_LOGICW:
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
-
+
case CC_OP_INCB:
case CC_OP_INCW:
case CC_OP_INCL:
case CC_OP_INCQ:
-
+
case CC_OP_DECB:
case CC_OP_DECW:
case CC_OP_DECL:
case CC_OP_DECQ:
-
+
case CC_OP_SHLB:
case CC_OP_SHLW:
case CC_OP_SHLL:
case CC_OP_SHLQ:
-
+
case CC_OP_SARB:
case CC_OP_SARW:
case CC_OP_SARL:
@@ -1129,7 +1129,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
default:
slow_jcc:
gen_setcc_slow_T0(s, jcc_op);
- tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
+ tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
cpu_T[0], 0, l1);
break;
}
@@ -1421,7 +1421,7 @@ static void gen_inc(DisasContext *s1, int ot, int d, int c)
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
}
-static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
int is_right, int is_arith)
{
target_ulong mask;
@@ -1463,7 +1463,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, op1);
-
+
/* update eflags if non zero shift */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -1484,7 +1484,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
else
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
-
+
gen_set_label(shift_label);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
@@ -1496,7 +1496,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
int is_right, int is_arith)
{
int mask;
-
+
if (ot == OT_QUAD)
mask = 0x3f;
else
@@ -1531,7 +1531,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, op1);
-
+
/* update eflags if non zero shift */
if (op2 != 0) {
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
@@ -1552,7 +1552,7 @@ static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
}
/* XXX: add faster immediate case */
-static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
int is_right)
{
target_ulong mask;
@@ -1586,12 +1586,12 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
shifts. */
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
-
+
if (ot <= OT_WORD)
tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
else
tcg_gen_mov_tl(cpu_tmp0, t1);
-
+
gen_extu(ot, t0);
tcg_gen_mov_tl(t2, t0);
@@ -1616,7 +1616,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
} else {
gen_op_mov_reg_v(ot, op1, t0);
}
-
+
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -1635,10 +1635,10 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
}
tcg_gen_andi_tl(t0, t0, CC_C);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
-
+
tcg_gen_discard_tl(cpu_cc_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
-
+
gen_set_label(label2);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
@@ -1649,7 +1649,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
}
/* XXX: add faster immediate = 1 case */
-static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
+static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
int is_right)
{
int label1;
@@ -1662,7 +1662,7 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
gen_op_ld_T0_A0(ot + s->mem_index);
else
gen_op_mov_TN_reg(ot, 0, op1);
-
+
if (is_right) {
switch (ot) {
case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
@@ -1695,13 +1695,13 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
tcg_gen_discard_tl(cpu_cc_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
-
+
gen_set_label(label1);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
}
/* XXX: add faster immediate case */
-static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
+static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
int is_right)
{
int label1, label2, data_bits;
@@ -1735,7 +1735,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
shifts. */
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
-
+
tcg_gen_addi_tl(cpu_tmp5, t2, -1);
if (ot == OT_WORD) {
/* Note: we implement the Intel behaviour for shift count > 16 */
@@ -1746,7 +1746,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
tcg_gen_ext32u_tl(t0, t0);
tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
-
+
/* only needed if count > 16, but a test would complicate */
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
@@ -1760,7 +1760,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
tcg_gen_shli_tl(t1, t1, 16);
tcg_gen_or_tl(t1, t1, t0);
tcg_gen_ext32u_tl(t1, t1);
-
+
tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
@@ -1783,13 +1783,13 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
tcg_gen_shl_tl(t1, t1, cpu_tmp5);
tcg_gen_or_tl(t0, t0, t1);
-
+
} else {
if (ot == OT_LONG)
tcg_gen_ext32u_tl(t1, t1);
tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
-
+
tcg_gen_shl_tl(t0, t0, t2);
tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
tcg_gen_shr_tl(t1, t1, cpu_tmp5);
@@ -1805,7 +1805,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
} else {
gen_op_mov_reg_v(ot, op1, t0);
}
-
+
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -2234,7 +2234,7 @@ static inline void gen_jcc(DisasContext *s, int b,
if (s->jmp_opt) {
l1 = gen_new_label();
gen_jcc1(s, cc_op, b, l1);
-
+
gen_goto_tb(s, 0, next_eip);
gen_set_label(l1);
@@ -2287,17 +2287,17 @@ static void gen_setcc(DisasContext *s, int b)
static inline void gen_op_movl_T0_seg(int seg_reg)
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
}
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
{
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
- tcg_gen_st32_tl(cpu_T[0], cpu_env,
+ tcg_gen_st32_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].selector));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
- tcg_gen_st_tl(cpu_T[0], cpu_env,
+ tcg_gen_st_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[seg_reg].base));
}
@@ -2600,7 +2600,7 @@ static void gen_interrupt(DisasContext *s, int intno,
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(cur_eip);
- gen_helper_raise_interrupt(tcg_const_i32(intno),
+ gen_helper_raise_interrupt(tcg_const_i32(intno),
tcg_const_i32(next_eip - cur_eip));
s->is_jmp = 3;
}
@@ -3091,7 +3091,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
#endif
{
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
@@ -3101,14 +3101,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
#ifdef TARGET_X86_64
if (s->dflag == 2) {
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
} else
#endif
{
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
@@ -3240,13 +3240,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x7e: /* movd ea, mm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
}
@@ -3254,13 +3254,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x17e: /* movd ea, xmm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- tcg_gen_ld_i64(cpu_T[0], cpu_env,
+ tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
- tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
+ tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
}
@@ -3376,7 +3376,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x050: /* movmskps */
rm = (modrm & 7) | REX_B(s);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
@@ -3384,7 +3384,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x150: /* movmskpd */
rm = (modrm & 7) | REX_B(s);
- tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
@@ -4521,12 +4521,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - pc_start));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
- tcg_const_i32(dflag),
+ tcg_const_i32(dflag),
tcg_const_i32(s->pc - s->cs_base));
}
gen_eob(s);
@@ -4793,7 +4793,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
gen_helper_cmpxchg16b(cpu_A0);
} else
-#endif
+#endif
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
@@ -5368,7 +5368,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_fildl_FT0(cpu_tmp2_i32);
break;
case 2:
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_FT0(cpu_tmp1_i64);
break;
@@ -5407,7 +5407,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_fildl_ST0(cpu_tmp2_i32);
break;
case 2:
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_ST0(cpu_tmp1_i64);
break;
@@ -5429,7 +5429,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 2:
gen_helper_fisttll_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
@@ -5455,7 +5455,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 2:
gen_helper_fstl_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
@@ -5537,13 +5537,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_fpop();
break;
case 0x3d: /* fildll */
- tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fildll_ST0(cpu_tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(cpu_tmp1_i64);
- tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
+ tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fpop();
break;
@@ -5931,7 +5931,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = dflag ? OT_LONG : OT_WORD;
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
gen_op_andl_T0_ffff();
- gen_check_io(s, ot, pc_start - s->cs_base,
+ gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
@@ -6122,7 +6122,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_helper_iret_protected(tcg_const_i32(s->dflag),
+ gen_helper_iret_protected(tcg_const_i32(s->dflag),
tcg_const_i32(s->pc - s->cs_base));
s->cc_op = CC_OP_EFLAGS;
}
@@ -6644,7 +6644,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
{
TCGv_i32 tmp0;
gen_op_mov_TN_reg(OT_LONG, 0, reg);
-
+
tmp0 = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
tcg_gen_bswap_i32(tmp0, tmp0);
@@ -7014,7 +7014,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 4: /* STGI */
if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
+ !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
if (s->cpl != 0) {
@@ -7035,8 +7035,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 6: /* SKINIT */
- if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
+ if ((!(s->flags & HF_SVME_MASK) &&
+ !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
gen_helper_skinit();
@@ -7608,8 +7608,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
gen_icount_start();
for(;;) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == pc_ptr) {
gen_debug(dc, pc_ptr - dc->cs_base);
break;
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index bc2fe2b..634f3d8 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -2999,8 +2999,8 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
do {
pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL;
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 418b9ef..9ae1e35 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -8286,8 +8286,8 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
#endif
gen_icount_start();
while (ctx.bstate == BS_NONE) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index aa85ba7..f6b7eed 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -7765,8 +7765,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
gen_icount_start();
/* Set env in case of segfault during code fetch */
while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == ctx.nip) {
gen_update_nip(&ctx, ctx.nip);
gen_helper_raise_debug();
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 287b4a3..e67ebba 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -1798,8 +1798,8 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (ctx.pc == bp->pc) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 07b2624..fbe0ded 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -4816,8 +4816,8 @@ static inline void gen_intermediate_code_internal(TranslationBlock * tb,
max_insns = CF_COUNT_MASK;
gen_icount_start();
do {
- if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
+ TAILQ_FOREACH(bp, env->breakpoints, entry) {
if (bp->pc == dc->pc) {
if (dc->pc != pc_start)
save_state(dc, cpu_cond);
--
1.5.6.5
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 0:00 ` Lionel Landwerlin
@ 2008-12-13 8:26 ` Jan Kiszka
2008-12-13 10:16 ` Jan Kiszka
1 sibling, 0 replies; 12+ messages in thread
From: Jan Kiszka @ 2008-12-13 8:26 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 61266 bytes --]
Lionel Landwerlin wrote:
> I just forgot to remove 2 printf ...
> Here the good patch :
>
The patch still contains tons of unrelated changes. Please don't do
this, it makes reviewing very hard!
>
>
>
>>From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
> From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
> Date: Sat, 13 Dec 2008 00:32:04 +0100
> Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
>
> When using gdb with qemu (via gdbstub), if your emulated
> application is multithreaded and does a segfault then qemu
> crashes.
>
> Qemu crashes because the break/watch points are shared between
> cpus. The TAILQ structure which handles the list of break/watch
> points is copied inside each CPUState structure. When the last
> breakpoint is removed (this happens on a segfault), it is
> removed across all cpus but because of the copied TAILQ
> structure a same breakpoint can be freed N times with N the
> current number of cpus.
I need to dig into this issue a bit further. I'm not yet sure what
happens here, but me gut feeling is that your are fixing a symptom, not
the core reason (watch/breakpoint structures were not designed to be
registered multiple times). Also, note that we need per-vcpu
breakpoint/watchpoint lists as they are also used to emulate per-vcpu
hardware watch/breakpoints! I think your patch breaks this.
Jan
>
> Signed-off-by: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
> ---
> cpu-defs.h | 10 ++--
> cpu-exec.c | 12 ++--
> exec.c | 59 ++++++++++---------
> target-alpha/translate.c | 4 +-
> target-arm/translate.c | 4 +-
> target-cris/translate.c | 80 +++++++++++++-------------
> target-i386/helper.c | 26 ++++----
> target-i386/translate.c | 146 +++++++++++++++++++++++-----------------------
> target-m68k/translate.c | 4 +-
> target-mips/translate.c | 4 +-
> target-ppc/translate.c | 4 +-
> target-sh4/translate.c | 5 +-
> target-sparc/translate.c | 4 +-
> 13 files changed, 182 insertions(+), 180 deletions(-)
>
> diff --git a/cpu-defs.h b/cpu-defs.h
> index ed8c001..17732fa 100644
> --- a/cpu-defs.h
> +++ b/cpu-defs.h
> @@ -123,9 +123,9 @@ typedef struct CPUTLBEntry {
> target_phys_addr_t addend;
> #endif
> /* padding to get a power of two size */
> - uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
> - (sizeof(target_ulong) * 3 +
> - ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
> + uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
> + (sizeof(target_ulong) * 3 +
> + ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) +
> sizeof(target_phys_addr_t))];
> } CPUTLBEntry;
>
> @@ -189,10 +189,10 @@ typedef struct CPUWatchpoint {
> \
> /* from this point: preserved by CPU reset */ \
> /* ice debug support */ \
> - TAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
> + TAILQ_HEAD(breakpoints_head, CPUBreakpoint) *breakpoints; \
> int singlestep_enabled; \
> \
> - TAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
> + TAILQ_HEAD(watchpoints_head, CPUWatchpoint) *watchpoints; \
> CPUWatchpoint *watchpoint_hit; \
> \
> struct GDBRegisterState *gdb_regs; \
> diff --git a/cpu-exec.c b/cpu-exec.c
> index 9a35a59..8950a0a 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -198,7 +198,7 @@ static void cpu_handle_debug_exception(CPUState *env)
> CPUWatchpoint *wp;
>
> if (!env->watchpoint_hit)
> - TAILQ_FOREACH(wp, &env->watchpoints, entry)
> + TAILQ_FOREACH(wp, env->watchpoints, entry)
> wp->flags &= ~BP_WATCHPOINT_HIT;
>
> if (debug_excp_handler)
> @@ -378,10 +378,10 @@ int cpu_exec(CPUState *env1)
> do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
> next_tb = 0;
> } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
> - (((env->hflags2 & HF2_VINTR_MASK) &&
> + (((env->hflags2 & HF2_VINTR_MASK) &&
> (env->hflags2 & HF2_HIF_MASK)) ||
> - (!(env->hflags2 & HF2_VINTR_MASK) &&
> - (env->eflags & IF_MASK &&
> + (!(env->hflags2 & HF2_VINTR_MASK) &&
> + (env->eflags & IF_MASK &&
> !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
> int intno;
> svm_check_intercept(SVM_EXIT_INTR);
> @@ -396,7 +396,7 @@ int cpu_exec(CPUState *env1)
> next_tb = 0;
> #if !defined(CONFIG_USER_ONLY)
> } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
> - (env->eflags & IF_MASK) &&
> + (env->eflags & IF_MASK) &&
> !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
> int intno;
> /* FIXME: this should respect TPR */
> @@ -1485,7 +1485,7 @@ int cpu_signal_handler(int host_signum, void *pinfo,
> pc = uc->uc_mcontext.sc_iaoq[0];
> /* FIXME: compute is_write */
> is_write = 0;
> - return handle_cpu_signal(pc, (unsigned long)info->si_addr,
> + return handle_cpu_signal(pc, (unsigned long)info->si_addr,
> is_write,
> &uc->uc_sigmask, puc);
> }
> diff --git a/exec.c b/exec.c
> index 105812f..4c7219a 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -209,21 +209,21 @@ static void map_exec(void *addr, long size)
> DWORD old_protect;
> VirtualProtect(addr, size,
> PAGE_EXECUTE_READWRITE, &old_protect);
> -
> +
> }
> #else
> static void map_exec(void *addr, long size)
> {
> unsigned long start, end, page_size;
> -
> +
> page_size = getpagesize();
> start = (unsigned long)addr;
> start &= ~(page_size - 1);
> -
> +
> end = (unsigned long)addr + size;
> end += page_size - 1;
> end &= ~(page_size - 1);
> -
> +
> mprotect((void *)start, end - start,
> PROT_READ | PROT_WRITE | PROT_EXEC);
> }
> @@ -273,7 +273,7 @@ static void page_init(void)
> (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
> page_set_flags(startaddr & TARGET_PAGE_MASK,
> TARGET_PAGE_ALIGN(endaddr),
> - PAGE_RESERVED);
> + PAGE_RESERVED);
> }
> } while (!feof(f));
> fclose(f);
> @@ -314,7 +314,7 @@ static inline PageDesc *page_find_alloc(target_ulong index)
> unsigned long addr = h2g(p);
> page_set_flags(addr & TARGET_PAGE_MASK,
> TARGET_PAGE_ALIGN(addr + len),
> - PAGE_RESERVED);
> + PAGE_RESERVED);
> }
> #else
> p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
> @@ -420,7 +420,7 @@ static void code_gen_alloc(unsigned long tb_size)
> code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
> /* The code gen buffer location may have constraints depending on
> the host cpu and OS */
> -#if defined(__linux__)
> +#if defined(__linux__)
> {
> int flags;
> void *start = NULL;
> @@ -467,7 +467,7 @@ static void code_gen_alloc(unsigned long tb_size)
> code_gen_buffer_size = (800 * 1024 * 1024);
> #endif
> code_gen_buffer = mmap(addr, code_gen_buffer_size,
> - PROT_WRITE | PROT_READ | PROT_EXEC,
> + PROT_WRITE | PROT_READ | PROT_EXEC,
> flags, -1, 0);
> if (code_gen_buffer == MAP_FAILED) {
> fprintf(stderr, "Could not allocate dynamic translator buffer\n");
> @@ -484,7 +484,7 @@ static void code_gen_alloc(unsigned long tb_size)
> #endif
> #endif /* !USE_STATIC_CODE_GEN_BUFFER */
> map_exec(code_gen_prologue, sizeof(code_gen_prologue));
> - code_gen_buffer_max_size = code_gen_buffer_size -
> + code_gen_buffer_max_size = code_gen_buffer_size -
> code_gen_max_block_size();
> code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
> tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
> @@ -544,8 +545,10 @@ void cpu_exec_init(CPUState *env)
> cpu_index++;
> }
> env->cpu_index = cpu_index;
> - TAILQ_INIT(&env->breakpoints);
> - TAILQ_INIT(&env->watchpoints);
> + env->breakpoints = malloc (sizeof (*env->breakpoints));
> + env->watchpoints = malloc (sizeof (*env->watchpoints));
> + TAILQ_INIT(env->breakpoints);
> + TAILQ_INIT(env->watchpoints);
> *penv = env;
> #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
> register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
> @@ -1329,9 +1332,9 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
>
> /* keep all GDB-injected watchpoints in front */
> if (flags & BP_GDB)
> - TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
> + TAILQ_INSERT_HEAD(env->watchpoints, wp, entry);
> else
> - TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
> + TAILQ_INSERT_TAIL(env->watchpoints, wp, entry);
>
> tlb_flush_page(env, addr);
>
> @@ -1347,7 +1350,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
> target_ulong len_mask = ~(len - 1);
> CPUWatchpoint *wp;
>
> - TAILQ_FOREACH(wp, &env->watchpoints, entry) {
> + TAILQ_FOREACH(wp, env->watchpoints, entry) {
> if (addr == wp->vaddr && len_mask == wp->len_mask
> && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
> cpu_watchpoint_remove_by_ref(env, wp);
> @@ -1360,7 +1363,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
> /* Remove a specific watchpoint by reference. */
> void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
> {
> - TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
> + TAILQ_REMOVE(env->watchpoints, watchpoint, entry);
>
> tlb_flush_page(env, watchpoint->vaddr);
>
> @@ -1372,7 +1375,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask)
> {
> CPUWatchpoint *wp, *next;
>
> - TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
> + TAILQ_FOREACH_SAFE(wp, env->watchpoints, entry, next) {
> if (wp->flags & mask)
> cpu_watchpoint_remove_by_ref(env, wp);
> }
> @@ -1394,9 +1397,9 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
>
> /* keep all GDB-injected breakpoints in front */
> if (flags & BP_GDB)
> - TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
> + TAILQ_INSERT_HEAD(env->breakpoints, bp, entry);
> else
> - TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
> + TAILQ_INSERT_TAIL(env->breakpoints, bp, entry);
>
> breakpoint_invalidate(env, pc);
>
> @@ -1414,7 +1417,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
> #if defined(TARGET_HAS_ICE)
> CPUBreakpoint *bp;
>
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == pc && bp->flags == flags) {
> cpu_breakpoint_remove_by_ref(env, bp);
> return 0;
> @@ -1430,10 +1433,8 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
> void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
> {
> #if defined(TARGET_HAS_ICE)
> - TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
> -
> + TAILQ_REMOVE(env->breakpoints, breakpoint, entry);
> breakpoint_invalidate(env, breakpoint->pc);
> -
> qemu_free(breakpoint);
> #endif
> }
> @@ -1444,7 +1445,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask)
> #if defined(TARGET_HAS_ICE)
> CPUBreakpoint *bp, *next;
>
> - TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
> + TAILQ_FOREACH_SAFE(bp, env->breakpoints, entry, next) {
> if (bp->flags & mask)
> cpu_breakpoint_remove_by_ref(env, bp);
> }
> @@ -1672,11 +1673,11 @@ static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
> /* Discard jump cache entries for any tb which might potentially
> overlap the flushed page. */
> i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
> - memset (&env->tb_jmp_cache[i], 0,
> + memset (&env->tb_jmp_cache[i], 0,
> TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
>
> i = tb_jmp_cache_hash_page(addr);
> - memset (&env->tb_jmp_cache[i], 0,
> + memset (&env->tb_jmp_cache[i], 0,
> TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
> }
>
> @@ -1981,7 +1982,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
> code_address = address;
> /* Make accesses to pages with watchpoints go via the
> watchpoint trap routines. */
> - TAILQ_FOREACH(wp, &env->watchpoints, entry) {
> + TAILQ_FOREACH(wp, env->watchpoints, entry) {
> if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
> iotlb = io_mem_watch + paddr;
> /* TODO: The memory case can be optimized by not trapping
> @@ -2542,7 +2543,7 @@ static void check_watchpoint(int offset, int len_mask, int flags)
> return;
> }
> vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
> - TAILQ_FOREACH(wp, &env->watchpoints, entry) {
> + TAILQ_FOREACH(wp, env->watchpoints, entry) {
> if ((vaddr == (wp->vaddr & len_mask) ||
> (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
> wp->flags |= BP_WATCHPOINT_HIT;
> @@ -3267,7 +3268,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
>
> tb = tb_find_pc((unsigned long)retaddr);
> if (!tb) {
> - cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
> + cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
> retaddr);
> }
> n = env->icount_decr.u16.low + tb->icount;
> @@ -3345,7 +3346,7 @@ void dump_exec_info(FILE *f,
> cpu_fprintf(f, "Translation buffer state:\n");
> cpu_fprintf(f, "gen code size %ld/%ld\n",
> code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
> - cpu_fprintf(f, "TB count %d/%d\n",
> + cpu_fprintf(f, "TB count %d/%d\n",
> nb_tbs, code_gen_max_blocks);
> cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
> nb_tbs ? target_code_size / nb_tbs : 0,
> diff --git a/target-alpha/translate.c b/target-alpha/translate.c
> index 7e8e644..62aa5f0 100644
> --- a/target-alpha/translate.c
> +++ b/target-alpha/translate.c
> @@ -2363,8 +2363,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
>
> gen_icount_start();
> for (ret = 0; ret == 0;) {
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == ctx.pc) {
> gen_excp(&ctx, EXCP_DEBUG, 0);
> break;
> diff --git a/target-arm/translate.c b/target-arm/translate.c
> index 0650bc3..8ac1f6b 100644
> --- a/target-arm/translate.c
> +++ b/target-arm/translate.c
> @@ -8651,8 +8651,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
> }
> #endif
>
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == dc->pc) {
> gen_set_condexec(dc);
> gen_set_pc_im(dc->pc);
> diff --git a/target-cris/translate.c b/target-cris/translate.c
> index 242ef9c..ae976b1 100644
> --- a/target-cris/translate.c
> +++ b/target-cris/translate.c
> @@ -116,7 +116,7 @@ typedef struct DisasContext {
> #define JMP_NOJMP 0
> #define JMP_DIRECT 1
> #define JMP_INDIRECT 2
> - int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
> + int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
> uint32_t jmp_pc;
>
> int delayed_branch;
> @@ -214,9 +214,9 @@ static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
> else if (r == PR_SRS)
> tcg_gen_andi_tl(cpu_PR[r], tn, 3);
> else {
> - if (r == PR_PID)
> + if (r == PR_PID)
> gen_helper_tlb_flush_pid(tn);
> - if (dc->tb_flags & S_FLAG && r == PR_SPC)
> + if (dc->tb_flags & S_FLAG && r == PR_SPC)
> gen_helper_spc_write(tn);
> else if (r == PR_CCS)
> dc->cpustate_changed = 1;
> @@ -452,7 +452,7 @@ static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
>
> l1 = gen_new_label();
>
> - /*
> + /*
> * d <<= 1
> * if (d >= s)
> * d -= s;
> @@ -483,7 +483,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
> if (dc->flagx_known) {
> if (dc->flags_x) {
> TCGv c;
> -
> +
> c = tcg_temp_new();
> t_gen_mov_TN_preg(c, PR_CCS);
> /* C flag is already at bit 0. */
> @@ -505,7 +505,7 @@ static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
> tcg_gen_shri_tl(x, x, 4);
>
> tcg_gen_and_tl(x, x, c);
> - tcg_gen_add_tl(d, d, x);
> + tcg_gen_add_tl(d, d, x);
> tcg_temp_free(x);
> tcg_temp_free(c);
> }
> @@ -516,7 +516,7 @@ static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
> if (dc->flagx_known) {
> if (dc->flags_x) {
> TCGv c;
> -
> +
> c = tcg_temp_new();
> t_gen_mov_TN_preg(c, PR_CCS);
> /* C flag is already at bit 0. */
> @@ -744,10 +744,10 @@ static void cris_evaluate_flags(DisasContext *dc)
> }
> if (dc->flagx_known) {
> if (dc->flags_x)
> - tcg_gen_ori_tl(cpu_PR[PR_CCS],
> + tcg_gen_ori_tl(cpu_PR[PR_CCS],
> cpu_PR[PR_CCS], X_FLAG);
> else
> - tcg_gen_andi_tl(cpu_PR[PR_CCS],
> + tcg_gen_andi_tl(cpu_PR[PR_CCS],
> cpu_PR[PR_CCS], ~X_FLAG);
> }
>
> @@ -762,9 +762,9 @@ static void cris_cc_mask(DisasContext *dc, unsigned int mask)
> if (!mask) {
> dc->update_cc = 0;
> return;
> - }
> + }
>
> - /* Check if we need to evaluate the condition codes due to
> + /* Check if we need to evaluate the condition codes due to
> CC overlaying. */
> ovl = (dc->cc_mask ^ mask) & ~mask;
> if (ovl) {
> @@ -798,7 +798,7 @@ static inline void cris_update_cc_x(DisasContext *dc)
> }
>
> /* Update cc prior to executing ALU op. Needs source operands untouched. */
> -static void cris_pre_alu_update_cc(DisasContext *dc, int op,
> +static void cris_pre_alu_update_cc(DisasContext *dc, int op,
> TCGv dst, TCGv src, int size)
> {
> if (dc->update_cc) {
> @@ -822,7 +822,7 @@ static void cris_pre_alu_update_cc(DisasContext *dc, int op,
> static inline void cris_update_result(DisasContext *dc, TCGv res)
> {
> if (dc->update_cc) {
> - if (dc->cc_size == 4 &&
> + if (dc->cc_size == 4 &&
> (dc->cc_op == CC_OP_SUB
> || dc->cc_op == CC_OP_ADD))
> return;
> @@ -831,7 +831,7 @@ static inline void cris_update_result(DisasContext *dc, TCGv res)
> }
>
> /* Returns one if the write back stage should execute. */
> -static void cris_alu_op_exec(DisasContext *dc, int op,
> +static void cris_alu_op_exec(DisasContext *dc, int op,
> TCGv dst, TCGv a, TCGv b, int size)
> {
> /* Emit the ALU insns. */
> @@ -1003,19 +1003,19 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
> switch (cond) {
> case CC_EQ:
> if (arith_opt || move_opt) {
> - /* If cc_result is zero, T0 should be
> + /* If cc_result is zero, T0 should be
> non-zero otherwise T0 should be zero. */
> int l1;
> l1 = gen_new_label();
> tcg_gen_movi_tl(cc, 0);
> - tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
> + tcg_gen_brcondi_tl(TCG_COND_NE, cc_result,
> 0, l1);
> tcg_gen_movi_tl(cc, 1);
> gen_set_label(l1);
> }
> else {
> cris_evaluate_flags(dc);
> - tcg_gen_andi_tl(cc,
> + tcg_gen_andi_tl(cc,
> cpu_PR[PR_CCS], Z_FLAG);
> }
> break;
> @@ -1055,7 +1055,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
> if (dc->cc_size == 1)
> bits = 7;
> else if (dc->cc_size == 2)
> - bits = 15;
> + bits = 15;
>
> tcg_gen_shri_tl(cc, cc_result, bits);
> tcg_gen_xori_tl(cc, cc, 1);
> @@ -1073,7 +1073,7 @@ static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
> if (dc->cc_size == 1)
> bits = 7;
> else if (dc->cc_size == 2)
> - bits = 15;
> + bits = 15;
>
> tcg_gen_shri_tl(cc, cc_result, 31);
> }
> @@ -1188,7 +1188,7 @@ static void cris_store_direct_jmp(DisasContext *dc)
> }
> }
>
> -static void cris_prepare_cc_branch (DisasContext *dc,
> +static void cris_prepare_cc_branch (DisasContext *dc,
> int offset, int cond)
> {
> /* This helps us re-schedule the micro-code to insns in delay-slots
> @@ -1232,7 +1232,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
> tcg_gen_qemu_ld64(dst, addr, mem_index);
> }
>
> -static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
> +static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
> unsigned int size, int sign)
> {
> int mem_index = cpu_mmu_index(dc->env);
> @@ -1407,7 +1407,7 @@ static int dec_prep_move_m(DisasContext *dc, int s_ext, int memsize,
> }
> } else
> imm = ldl_code(dc->pc + 2);
> -
> +
> tcg_gen_movi_tl(dst, imm);
> dc->postinc = 0;
> } else {
> @@ -2703,7 +2703,7 @@ static unsigned int dec_move_pm(DisasContext *dc)
> memsize = preg_sizes[dc->op2];
>
> DIS(fprintf (logfile, "move.%c $p%u, [$r%u%s\n",
> - memsize_char(memsize),
> + memsize_char(memsize),
> dc->op2, dc->op1, dc->postinc ? "+]" : "]"));
>
> /* prepare store. Address in T0, value in T1. */
> @@ -2993,7 +2993,7 @@ static unsigned int dec_rfe_etc(DisasContext *dc)
> tcg_gen_movi_tl(env_pc, dc->pc + 2);
>
> /* Breaks start at 16 in the exception vector. */
> - t_gen_mov_env_TN(trap_vector,
> + t_gen_mov_env_TN(trap_vector,
> tcg_const_tl(dc->op1 + 16));
> t_gen_raise_exception(EXCP_BREAK);
> dc->is_jmp = DISAS_UPDATE;
> @@ -3189,8 +3189,8 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
> {
> CPUBreakpoint *bp;
>
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == dc->pc) {
> cris_evaluate_flags (dc);
> tcg_gen_movi_tl(env_pc, dc->pc);
> @@ -3210,27 +3210,27 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
> * to give SW a hint that the exception actually hit on the dslot.
> *
> * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
> - * the core and any jmp to an odd addresses will mask off that lsb. It is
> + * the core and any jmp to an odd addresses will mask off that lsb. It is
> * simply there to let sw know there was an exception on a dslot.
> *
> * When the software returns from an exception, the branch will re-execute.
> * On QEMU care needs to be taken when a branch+delayslot sequence is broken
> * and the branch and delayslot dont share pages.
> *
> - * The TB contaning the branch insn will set up env->btarget and evaluate
> - * env->btaken. When the translation loop exits we will note that the branch
> + * The TB contaning the branch insn will set up env->btarget and evaluate
> + * env->btaken. When the translation loop exits we will note that the branch
> * sequence is broken and let env->dslot be the size of the branch insn (those
> * vary in length).
> *
> * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
> - * set). It will also expect to have env->dslot setup with the size of the
> - * delay slot so that env->pc - env->dslot point to the branch insn. This TB
> - * will execute the dslot and take the branch, either to btarget or just one
> + * set). It will also expect to have env->dslot setup with the size of the
> + * delay slot so that env->pc - env->dslot point to the branch insn. This TB
> + * will execute the dslot and take the branch, either to btarget or just one
> * insn ahead.
> *
> - * When exceptions occur, we check for env->dslot in do_interrupt to detect
> + * When exceptions occur, we check for env->dslot in do_interrupt to detect
> * broken branch sequences and setup $erp accordingly (i.e let it point to the
> - * branch and set lsb). Then env->dslot gets cleared so that the exception
> + * branch and set lsb). Then env->dslot gets cleared so that the exception
> * handler can enter. When returning from exceptions (jump $erp) the lsb gets
> * masked off and we will reexecute the branch insn.
> *
> @@ -3299,7 +3299,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
> search_pc, dc->pc, dc->ppc,
> (unsigned long long)tb->flags,
> env->btarget, (unsigned)tb->flags & 7,
> - env->pregs[PR_CCS],
> + env->pregs[PR_CCS],
> env->pregs[PR_PID], env->pregs[PR_USP],
> env->regs[0], env->regs[1], env->regs[2], env->regs[3],
> env->regs[4], env->regs[5], env->regs[6], env->regs[7],
> @@ -3345,7 +3345,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
> gen_io_start();
> dc->clear_x = 1;
>
> - insn_len = cris_decoder(dc);
> + insn_len = cris_decoder(dc);
> dc->ppc = dc->pc;
> dc->pc += insn_len;
> if (dc->clear_x)
> @@ -3360,12 +3360,12 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
> if (dc->delayed_branch == 0)
> {
> if (tb->flags & 7)
> - t_gen_mov_env_TN(dslot,
> + t_gen_mov_env_TN(dslot,
> tcg_const_tl(0));
> if (dc->jmp == JMP_DIRECT) {
> dc->is_jmp = DISAS_NEXT;
> } else {
> - t_gen_cc_jmp(env_btarget,
> + t_gen_cc_jmp(env_btarget,
> tcg_const_tl(dc->pc));
> dc->is_jmp = DISAS_JUMP;
> }
> @@ -3390,7 +3390,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
> gen_io_end();
> /* Force an update if the per-tb cpu state has changed. */
> if (dc->is_jmp == DISAS_NEXT
> - && (dc->cpustate_changed || !dc->flagx_known
> + && (dc->cpustate_changed || !dc->flagx_known
> || (dc->flags_x != (tb->flags & X_FLAG)))) {
> dc->is_jmp = DISAS_UPDATE;
> tcg_gen_movi_tl(env_pc, npc);
> @@ -3539,7 +3539,7 @@ CPUCRISState *cpu_cris_init (const char *cpu_model)
> offsetof(CPUState, cc_mask),
> "cc_mask");
>
> - env_pc = tcg_global_mem_new(TCG_AREG0,
> + env_pc = tcg_global_mem_new(TCG_AREG0,
> offsetof(CPUState, pc),
> "pc");
> env_btarget = tcg_global_mem_new(TCG_AREG0,
> diff --git a/target-i386/helper.c b/target-i386/helper.c
> index f2d91df..103bad2 100644
> --- a/target-i386/helper.c
> +++ b/target-i386/helper.c
> @@ -34,14 +34,14 @@
>
> //#define DEBUG_MMU
>
> -static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
> - uint32_t *ext_features,
> - uint32_t *ext2_features,
> +static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
> + uint32_t *ext_features,
> + uint32_t *ext2_features,
> uint32_t *ext3_features)
> {
> int i;
> /* feature flags taken from "Intel Processor Identification and the CPUID
> - * Instruction" and AMD's "CPUID Specification". In cases of disagreement
> + * Instruction" and AMD's "CPUID Specification". In cases of disagreement
> * about feature names, the Linux name is used. */
> static const char *feature_name[] = {
> "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
> @@ -68,22 +68,22 @@ static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
> NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
> };
>
> - for ( i = 0 ; i < 32 ; i++ )
> + for ( i = 0 ; i < 32 ; i++ )
> if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
> *features |= 1 << i;
> return;
> }
> - for ( i = 0 ; i < 32 ; i++ )
> + for ( i = 0 ; i < 32 ; i++ )
> if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
> *ext_features |= 1 << i;
> return;
> }
> - for ( i = 0 ; i < 32 ; i++ )
> + for ( i = 0 ; i < 32 ; i++ )
> if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
> *ext2_features |= 1 << i;
> return;
> }
> - for ( i = 0 ; i < 32 ; i++ )
> + for ( i = 0 ; i < 32 ; i++ )
> if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
> *ext3_features |= 1 << i;
> return;
> @@ -125,13 +125,13 @@ static x86_def_t x86_defs[] = {
> .family = 6,
> .model = 2,
> .stepping = 3,
> - .features = PPRO_FEATURES |
> + .features = PPRO_FEATURES |
> /* these features are needed for Win64 and aren't fully implemented */
> CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
> /* this feature is needed for Solaris and isn't fully implemented */
> CPUID_PSE36,
> .ext_features = CPUID_EXT_SSE3,
> - .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
> + .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
> CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
> CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
> .ext3_features = CPUID_EXT3_SVM,
> @@ -1174,7 +1174,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
> error_code |= PG_ERROR_I_D_MASK;
> if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
> /* cr2 is not modified in case of exceptions */
> - stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
> + stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
> addr);
> } else {
> env->cr[2] = addr;
> @@ -1364,7 +1364,7 @@ static void breakpoint_handler(CPUState *env)
> cpu_resume_from_signal(env, NULL);
> }
> } else {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry)
> + TAILQ_FOREACH(bp, env->breakpoints, entry)
> if (bp->pc == env->eip) {
> if (bp->flags & BP_CPU) {
> check_hw_breakpoints(env, 1);
> @@ -1575,7 +1575,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
> break;
> case 0x80000008:
> /* virtual & phys address size in low 2 bytes. */
> -/* XXX: This value must match the one used in the MMU code. */
> +/* XXX: This value must match the one used in the MMU code. */
> if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
> /* 64 bit processor */
> #if defined(USE_KQEMU)
> diff --git a/target-i386/translate.c b/target-i386/translate.c
> index 423fca3..2ecf029 100644
> --- a/target-i386/translate.c
> +++ b/target-i386/translate.c
> @@ -381,7 +381,7 @@ static inline void gen_op_addq_A0_im(int64_t val)
> tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
> }
> #endif
> -
> +
> static void gen_add_A0_im(DisasContext *s, int val)
> {
> #ifdef TARGET_X86_64
> @@ -462,7 +462,7 @@ static inline void gen_op_set_cc_op(int32_t val)
> static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
> {
> tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
> - if (shift != 0)
> + if (shift != 0)
> tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
> tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
> #ifdef TARGET_X86_64
> @@ -504,7 +504,7 @@ static inline void gen_op_movq_A0_reg(int reg)
> static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
> {
> tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
> - if (shift != 0)
> + if (shift != 0)
> tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
> tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
> }
> @@ -661,7 +661,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
> }
> }
>
> -static inline void gen_op_movl_T0_Dshift(int ot)
> +static inline void gen_op_movl_T0_Dshift(int ot)
> {
> tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
> tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
> @@ -953,7 +953,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
> case CC_OP_SUBW:
> case CC_OP_SUBL:
> case CC_OP_SUBQ:
> -
> +
> size = cc_op - CC_OP_SUBB;
> switch(jcc_op) {
> case JCC_Z:
> @@ -984,28 +984,28 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
> switch(size) {
> case 0:
> tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
> - tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
> + tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
> 0, l1);
> break;
> case 1:
> tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
> - tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
> + tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
> 0, l1);
> break;
> #ifdef TARGET_X86_64
> case 2:
> tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
> - tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
> + tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
> 0, l1);
> break;
> #endif
> default:
> - tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
> + tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
> 0, l1);
> break;
> }
> break;
> -
> +
> case JCC_B:
> cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
> goto fast_jcc_b;
> @@ -1037,7 +1037,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
> }
> tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
> break;
> -
> +
> case JCC_L:
> cond = inv ? TCG_COND_GE : TCG_COND_LT;
> goto fast_jcc_l;
> @@ -1069,48 +1069,48 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
> }
> tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
> break;
> -
> +
> default:
> goto slow_jcc;
> }
> break;
> -
> +
> /* some jumps are easy to compute */
> case CC_OP_ADDB:
> case CC_OP_ADDW:
> case CC_OP_ADDL:
> case CC_OP_ADDQ:
> -
> +
> case CC_OP_ADCB:
> case CC_OP_ADCW:
> case CC_OP_ADCL:
> case CC_OP_ADCQ:
> -
> +
> case CC_OP_SBBB:
> case CC_OP_SBBW:
> case CC_OP_SBBL:
> case CC_OP_SBBQ:
> -
> +
> case CC_OP_LOGICB:
> case CC_OP_LOGICW:
> case CC_OP_LOGICL:
> case CC_OP_LOGICQ:
> -
> +
> case CC_OP_INCB:
> case CC_OP_INCW:
> case CC_OP_INCL:
> case CC_OP_INCQ:
> -
> +
> case CC_OP_DECB:
> case CC_OP_DECW:
> case CC_OP_DECL:
> case CC_OP_DECQ:
> -
> +
> case CC_OP_SHLB:
> case CC_OP_SHLW:
> case CC_OP_SHLL:
> case CC_OP_SHLQ:
> -
> +
> case CC_OP_SARB:
> case CC_OP_SARW:
> case CC_OP_SARL:
> @@ -1129,7 +1129,7 @@ static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
> default:
> slow_jcc:
> gen_setcc_slow_T0(s, jcc_op);
> - tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
> + tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
> cpu_T[0], 0, l1);
> break;
> }
> @@ -1421,7 +1421,7 @@ static void gen_inc(DisasContext *s1, int ot, int d, int c)
> tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
> }
>
> -static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
> +static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
> int is_right, int is_arith)
> {
> target_ulong mask;
> @@ -1463,7 +1463,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
> gen_op_st_T0_A0(ot + s->mem_index);
> else
> gen_op_mov_reg_T0(ot, op1);
> -
> +
> /* update eflags if non zero shift */
> if (s->cc_op != CC_OP_DYNAMIC)
> gen_op_set_cc_op(s->cc_op);
> @@ -1484,7 +1484,7 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
> tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
> else
> tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
> -
> +
> gen_set_label(shift_label);
> s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
>
> @@ -1496,7 +1496,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
> int is_right, int is_arith)
> {
> int mask;
> -
> +
> if (ot == OT_QUAD)
> mask = 0x3f;
> else
> @@ -1531,7 +1531,7 @@ static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
> gen_op_st_T0_A0(ot + s->mem_index);
> else
> gen_op_mov_reg_T0(ot, op1);
> -
> +
> /* update eflags if non zero shift */
> if (op2 != 0) {
> tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
> @@ -1552,7 +1552,7 @@ static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
> }
>
> /* XXX: add faster immediate case */
> -static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
> +static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
> int is_right)
> {
> target_ulong mask;
> @@ -1586,12 +1586,12 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
> shifts. */
> label1 = gen_new_label();
> tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
> -
> +
> if (ot <= OT_WORD)
> tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
> else
> tcg_gen_mov_tl(cpu_tmp0, t1);
> -
> +
> gen_extu(ot, t0);
> tcg_gen_mov_tl(t2, t0);
>
> @@ -1616,7 +1616,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
> } else {
> gen_op_mov_reg_v(ot, op1, t0);
> }
> -
> +
> /* update eflags */
> if (s->cc_op != CC_OP_DYNAMIC)
> gen_op_set_cc_op(s->cc_op);
> @@ -1635,10 +1635,10 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
> }
> tcg_gen_andi_tl(t0, t0, CC_C);
> tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
> -
> +
> tcg_gen_discard_tl(cpu_cc_dst);
> tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
> -
> +
> gen_set_label(label2);
> s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
>
> @@ -1649,7 +1649,7 @@ static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
> }
>
> /* XXX: add faster immediate = 1 case */
> -static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
> +static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
> int is_right)
> {
> int label1;
> @@ -1662,7 +1662,7 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
> gen_op_ld_T0_A0(ot + s->mem_index);
> else
> gen_op_mov_TN_reg(ot, 0, op1);
> -
> +
> if (is_right) {
> switch (ot) {
> case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
> @@ -1695,13 +1695,13 @@ static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
> tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
> tcg_gen_discard_tl(cpu_cc_dst);
> tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
> -
> +
> gen_set_label(label1);
> s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
> }
>
> /* XXX: add faster immediate case */
> -static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
> +static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
> int is_right)
> {
> int label1, label2, data_bits;
> @@ -1735,7 +1735,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
> shifts. */
> label1 = gen_new_label();
> tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
> -
> +
> tcg_gen_addi_tl(cpu_tmp5, t2, -1);
> if (ot == OT_WORD) {
> /* Note: we implement the Intel behaviour for shift count > 16 */
> @@ -1746,7 +1746,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
> tcg_gen_ext32u_tl(t0, t0);
>
> tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
> -
> +
> /* only needed if count > 16, but a test would complicate */
> tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
> tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
> @@ -1760,7 +1760,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
> tcg_gen_shli_tl(t1, t1, 16);
> tcg_gen_or_tl(t1, t1, t0);
> tcg_gen_ext32u_tl(t1, t1);
> -
> +
> tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
> tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
> tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
> @@ -1783,13 +1783,13 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
> tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
> tcg_gen_shl_tl(t1, t1, cpu_tmp5);
> tcg_gen_or_tl(t0, t0, t1);
> -
> +
> } else {
> if (ot == OT_LONG)
> tcg_gen_ext32u_tl(t1, t1);
>
> tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
> -
> +
> tcg_gen_shl_tl(t0, t0, t2);
> tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
> tcg_gen_shr_tl(t1, t1, cpu_tmp5);
> @@ -1805,7 +1805,7 @@ static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
> } else {
> gen_op_mov_reg_v(ot, op1, t0);
> }
> -
> +
> /* update eflags */
> if (s->cc_op != CC_OP_DYNAMIC)
> gen_op_set_cc_op(s->cc_op);
> @@ -2234,7 +2234,7 @@ static inline void gen_jcc(DisasContext *s, int b,
> if (s->jmp_opt) {
> l1 = gen_new_label();
> gen_jcc1(s, cc_op, b, l1);
> -
> +
> gen_goto_tb(s, 0, next_eip);
>
> gen_set_label(l1);
> @@ -2287,17 +2287,17 @@ static void gen_setcc(DisasContext *s, int b)
>
> static inline void gen_op_movl_T0_seg(int seg_reg)
> {
> - tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
> + tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
> offsetof(CPUX86State,segs[seg_reg].selector));
> }
>
> static inline void gen_op_movl_seg_T0_vm(int seg_reg)
> {
> tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
> - tcg_gen_st32_tl(cpu_T[0], cpu_env,
> + tcg_gen_st32_tl(cpu_T[0], cpu_env,
> offsetof(CPUX86State,segs[seg_reg].selector));
> tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
> - tcg_gen_st_tl(cpu_T[0], cpu_env,
> + tcg_gen_st_tl(cpu_T[0], cpu_env,
> offsetof(CPUX86State,segs[seg_reg].base));
> }
>
> @@ -2600,7 +2600,7 @@ static void gen_interrupt(DisasContext *s, int intno,
> if (s->cc_op != CC_OP_DYNAMIC)
> gen_op_set_cc_op(s->cc_op);
> gen_jmp_im(cur_eip);
> - gen_helper_raise_interrupt(tcg_const_i32(intno),
> + gen_helper_raise_interrupt(tcg_const_i32(intno),
> tcg_const_i32(next_eip - cur_eip));
> s->is_jmp = 3;
> }
> @@ -3091,7 +3091,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
> #endif
> {
> gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
> - tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> + tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> offsetof(CPUX86State,fpregs[reg].mmx));
> tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
> gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
> @@ -3101,14 +3101,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
> #ifdef TARGET_X86_64
> if (s->dflag == 2) {
> gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
> - tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> + tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> offsetof(CPUX86State,xmm_regs[reg]));
> gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
> } else
> #endif
> {
> gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
> - tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> + tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> offsetof(CPUX86State,xmm_regs[reg]));
> tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
> gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
> @@ -3240,13 +3240,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
> case 0x7e: /* movd ea, mm */
> #ifdef TARGET_X86_64
> if (s->dflag == 2) {
> - tcg_gen_ld_i64(cpu_T[0], cpu_env,
> + tcg_gen_ld_i64(cpu_T[0], cpu_env,
> offsetof(CPUX86State,fpregs[reg].mmx));
> gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
> } else
> #endif
> {
> - tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
> + tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
> offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
> gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
> }
> @@ -3254,13 +3254,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
> case 0x17e: /* movd ea, xmm */
> #ifdef TARGET_X86_64
> if (s->dflag == 2) {
> - tcg_gen_ld_i64(cpu_T[0], cpu_env,
> + tcg_gen_ld_i64(cpu_T[0], cpu_env,
> offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
> gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
> } else
> #endif
> {
> - tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
> + tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
> offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
> gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
> }
> @@ -3376,7 +3376,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
> break;
> case 0x050: /* movmskps */
> rm = (modrm & 7) | REX_B(s);
> - tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> + tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> offsetof(CPUX86State,xmm_regs[rm]));
> gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
> tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
> @@ -3384,7 +3384,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
> break;
> case 0x150: /* movmskpd */
> rm = (modrm & 7) | REX_B(s);
> - tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> + tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
> offsetof(CPUX86State,xmm_regs[rm]));
> gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
> tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
> @@ -4521,12 +4521,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> gen_jmp_im(pc_start - s->cs_base);
> tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
> gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
> - tcg_const_i32(dflag),
> + tcg_const_i32(dflag),
> tcg_const_i32(s->pc - pc_start));
> } else {
> tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
> gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
> - tcg_const_i32(dflag),
> + tcg_const_i32(dflag),
> tcg_const_i32(s->pc - s->cs_base));
> }
> gen_eob(s);
> @@ -4793,7 +4793,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
> gen_helper_cmpxchg16b(cpu_A0);
> } else
> -#endif
> +#endif
> {
> if (!(s->cpuid_features & CPUID_CX8))
> goto illegal_op;
> @@ -5368,7 +5368,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> gen_helper_fildl_FT0(cpu_tmp2_i32);
> break;
> case 2:
> - tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
> + tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
> (s->mem_index >> 2) - 1);
> gen_helper_fldl_FT0(cpu_tmp1_i64);
> break;
> @@ -5407,7 +5407,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> gen_helper_fildl_ST0(cpu_tmp2_i32);
> break;
> case 2:
> - tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
> + tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
> (s->mem_index >> 2) - 1);
> gen_helper_fldl_ST0(cpu_tmp1_i64);
> break;
> @@ -5429,7 +5429,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> break;
> case 2:
> gen_helper_fisttll_ST0(cpu_tmp1_i64);
> - tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
> + tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
> (s->mem_index >> 2) - 1);
> break;
> case 3:
> @@ -5455,7 +5455,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> break;
> case 2:
> gen_helper_fstl_ST0(cpu_tmp1_i64);
> - tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
> + tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
> (s->mem_index >> 2) - 1);
> break;
> case 3:
> @@ -5537,13 +5537,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> gen_helper_fpop();
> break;
> case 0x3d: /* fildll */
> - tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
> + tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
> (s->mem_index >> 2) - 1);
> gen_helper_fildll_ST0(cpu_tmp1_i64);
> break;
> case 0x3f: /* fistpll */
> gen_helper_fistll_ST0(cpu_tmp1_i64);
> - tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
> + tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
> (s->mem_index >> 2) - 1);
> gen_helper_fpop();
> break;
> @@ -5931,7 +5931,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> ot = dflag ? OT_LONG : OT_WORD;
> gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
> gen_op_andl_T0_ffff();
> - gen_check_io(s, ot, pc_start - s->cs_base,
> + gen_check_io(s, ot, pc_start - s->cs_base,
> SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
> if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
> gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
> @@ -6122,7 +6122,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> if (s->cc_op != CC_OP_DYNAMIC)
> gen_op_set_cc_op(s->cc_op);
> gen_jmp_im(pc_start - s->cs_base);
> - gen_helper_iret_protected(tcg_const_i32(s->dflag),
> + gen_helper_iret_protected(tcg_const_i32(s->dflag),
> tcg_const_i32(s->pc - s->cs_base));
> s->cc_op = CC_OP_EFLAGS;
> }
> @@ -6644,7 +6644,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> {
> TCGv_i32 tmp0;
> gen_op_mov_TN_reg(OT_LONG, 0, reg);
> -
> +
> tmp0 = tcg_temp_new_i32();
> tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
> tcg_gen_bswap_i32(tmp0, tmp0);
> @@ -7014,7 +7014,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> break;
> case 4: /* STGI */
> if ((!(s->flags & HF_SVME_MASK) &&
> - !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
> + !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
> !s->pe)
> goto illegal_op;
> if (s->cpl != 0) {
> @@ -7035,8 +7035,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
> }
> break;
> case 6: /* SKINIT */
> - if ((!(s->flags & HF_SVME_MASK) &&
> - !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
> + if ((!(s->flags & HF_SVME_MASK) &&
> + !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
> !s->pe)
> goto illegal_op;
> gen_helper_skinit();
> @@ -7608,8 +7608,8 @@ static inline void gen_intermediate_code_internal(CPUState *env,
>
> gen_icount_start();
> for(;;) {
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == pc_ptr) {
> gen_debug(dc, pc_ptr - dc->cs_base);
> break;
> diff --git a/target-m68k/translate.c b/target-m68k/translate.c
> index bc2fe2b..634f3d8 100644
> --- a/target-m68k/translate.c
> +++ b/target-m68k/translate.c
> @@ -2999,8 +2999,8 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
> do {
> pc_offset = dc->pc - pc_start;
> gen_throws_exception = NULL;
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == dc->pc) {
> gen_exception(dc, dc->pc, EXCP_DEBUG);
> dc->is_jmp = DISAS_JUMP;
> diff --git a/target-mips/translate.c b/target-mips/translate.c
> index 418b9ef..9ae1e35 100644
> --- a/target-mips/translate.c
> +++ b/target-mips/translate.c
> @@ -8286,8 +8286,8 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
> #endif
> gen_icount_start();
> while (ctx.bstate == BS_NONE) {
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == ctx.pc) {
> save_cpu_state(&ctx, 1);
> ctx.bstate = BS_BRANCH;
> diff --git a/target-ppc/translate.c b/target-ppc/translate.c
> index aa85ba7..f6b7eed 100644
> --- a/target-ppc/translate.c
> +++ b/target-ppc/translate.c
> @@ -7765,8 +7765,8 @@ static always_inline void gen_intermediate_code_internal (CPUState *env,
> gen_icount_start();
> /* Set env in case of segfault during code fetch */
> while (ctx.exception == POWERPC_EXCP_NONE && gen_opc_ptr < gen_opc_end) {
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == ctx.nip) {
> gen_update_nip(&ctx, ctx.nip);
> gen_helper_raise_debug();
> diff --git a/target-sh4/translate.c b/target-sh4/translate.c
> index 287b4a3..e67ebba 100644
> --- a/target-sh4/translate.c
> +++ b/target-sh4/translate.c
> @@ -1798,8 +1798,8 @@ gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
> max_insns = CF_COUNT_MASK;
> gen_icount_start();
> while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (ctx.pc == bp->pc) {
> /* We have hit a breakpoint - make sure PC is up-to-date */
> tcg_gen_movi_i32(cpu_pc, ctx.pc);
> diff --git a/target-sparc/translate.c b/target-sparc/translate.c
> index 07b2624..fbe0ded 100644
> --- a/target-sparc/translate.c
> +++ b/target-sparc/translate.c
> @@ -4816,8 +4816,8 @@ static inline void gen_intermediate_code_internal(TranslationBlock * tb,
> max_insns = CF_COUNT_MASK;
> gen_icount_start();
> do {
> - if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
> - TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + if (unlikely(!TAILQ_EMPTY(env->breakpoints))) {
> + TAILQ_FOREACH(bp, env->breakpoints, entry) {
> if (bp->pc == dc->pc) {
> if (dc->pc != pc_start)
> save_state(dc, cpu_cond);
> --
> 1.5.6.5
>
>
>
>
>
>
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 0:00 ` Lionel Landwerlin
2008-12-13 8:26 ` [Qemu-devel] " Jan Kiszka
@ 2008-12-13 10:16 ` Jan Kiszka
2008-12-13 12:31 ` Lionel Landwerlin
1 sibling, 1 reply; 12+ messages in thread
From: Jan Kiszka @ 2008-12-13 10:16 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 1847 bytes --]
Lionel Landwerlin wrote:
> I just forgot to remove 2 printf ...
> Here the good patch :
>
>
>
>
>>From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
> From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
> Date: Sat, 13 Dec 2008 00:32:04 +0100
> Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
>
> When using gdb with qemu (via gdbstub), if your emulated
> application is multithreaded and does a segfault then qemu
> crashes.
>
> Qemu crashes because the break/watch points are shared between
> cpus. The TAILQ structure which handles the list of break/watch
> points is copied inside each CPUState structure. When the last
> breakpoint is removed (this happens on a segfault), it is
> removed across all cpus but because of the copied TAILQ
> structure a same breakpoint can be freed N times with N the
> current number of cpus.
OK, now I got the problem: user space emulation spawns additional VCPUs
to emulate fork. Those VCPUs are cloned via cpu_copy which simply
duplicates the CPUState of the parent, including the breakpoint and
watchpoint TAILQ headers. This is doomed to fail.
But your approach to let the cloned VCPU point to the same TAILQ header
as its parent is not correct as well. It will cause troubles to gdbstub
which manages breakpoints on all VCPUs by adding duplicate instances on
a per-VCPU base. If you inject a breakpoint before a fork and then
remove it afterwards, gdbstub will report an error because it will only
find the breakpoint once, not n times (n = number of VCPUs).
What you have to do is to cleanly duplicate the breakpoint and
watchpoint lists on cpu_copy (filter out BP_CPU types for cleanness
reasons, although they do not occur in user emulation ATM).
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 10:16 ` Jan Kiszka
@ 2008-12-13 12:31 ` Lionel Landwerlin
2008-12-13 12:59 ` Jan Kiszka
0 siblings, 1 reply; 12+ messages in thread
From: Lionel Landwerlin @ 2008-12-13 12:31 UTC (permalink / raw)
To: qemu-devel
Le samedi 13 décembre 2008 à 11:16 +0100, Jan Kiszka a écrit :
> Lionel Landwerlin wrote:
> > I just forgot to remove 2 printf ...
> > Here the good patch :
> >
> >
> >
> >
> >>From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
> > From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
> > Date: Sat, 13 Dec 2008 00:32:04 +0100
> > Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
> >
> > When using gdb with qemu (via gdbstub), if your emulated
> > application is multithreaded and does a segfault then qemu
> > crashes.
> >
> > Qemu crashes because the break/watch points are shared between
> > cpus. The TAILQ structure which handles the list of break/watch
> > points is copied inside each CPUState structure. When the last
> > breakpoint is removed (this happens on a segfault), it is
> > removed across all cpus but because of the copied TAILQ
> > structure a same breakpoint can be freed N times with N the
> > current number of cpus.
>
> OK, now I got the problem: user space emulation spawns additional VCPUs
> to emulate fork. Those VCPUs are cloned via cpu_copy which simply
> duplicates the CPUState of the parent, including the breakpoint and
> watchpoint TAILQ headers. This is doomed to fail.
>
> But your approach to let the cloned VCPU point to the same TAILQ header
> as its parent is not correct as well. It will cause troubles to gdbstub
> which manages breakpoints on all VCPUs by adding duplicate instances on
> a per-VCPU base. If you inject a breakpoint before a fork and then
> remove it afterwards, gdbstub will report an error because it will only
> find the breakpoint once, not n times (n = number of VCPUs).
>
> What you have to do is to cleanly duplicate the breakpoint and
> watchpoint lists on cpu_copy (filter out BP_CPU types for cleanness
> reasons, although they do not occur in user emulation ATM).
Hello Jan,
Thanks for reviewing my patch.
Duplication of all break/watchpoints will makes the patch bigger,
because it will required break/watchpoint_copy functions etc...
Another problem is that threads are also emulated by vcpus in user
emulation. But we also need to share break/watchpoints between threads.
This explain the way my patch do the thing.
Finally, this makes the modification a lot more complicated than what I
expected, because breakpoints on emulated forks should not apply.
--
Lione Landwerlin
O p e n W i d e 14, rue Gaillon 75002 Paris
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 12:31 ` Lionel Landwerlin
@ 2008-12-13 12:59 ` Jan Kiszka
2008-12-13 13:21 ` Lionel Landwerlin
0 siblings, 1 reply; 12+ messages in thread
From: Jan Kiszka @ 2008-12-13 12:59 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 3009 bytes --]
Lionel Landwerlin wrote:
> Le samedi 13 décembre 2008 à 11:16 +0100, Jan Kiszka a écrit :
>> Lionel Landwerlin wrote:
>>> I just forgot to remove 2 printf ...
>>> Here the good patch :
>>>
>>>
>>>
>>>
>>> >From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
>>> From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
>>> Date: Sat, 13 Dec 2008 00:32:04 +0100
>>> Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
>>>
>>> When using gdb with qemu (via gdbstub), if your emulated
>>> application is multithreaded and does a segfault then qemu
>>> crashes.
>>>
>>> Qemu crashes because the break/watch points are shared between
>>> cpus. The TAILQ structure which handles the list of break/watch
>>> points is copied inside each CPUState structure. When the last
>>> breakpoint is removed (this happens on a segfault), it is
>>> removed across all cpus but because of the copied TAILQ
>>> structure a same breakpoint can be freed N times with N the
>>> current number of cpus.
>> OK, now I got the problem: user space emulation spawns additional VCPUs
>> to emulate fork. Those VCPUs are cloned via cpu_copy which simply
>> duplicates the CPUState of the parent, including the breakpoint and
>> watchpoint TAILQ headers. This is doomed to fail.
>>
>> But your approach to let the cloned VCPU point to the same TAILQ header
>> as its parent is not correct as well. It will cause troubles to gdbstub
>> which manages breakpoints on all VCPUs by adding duplicate instances on
>> a per-VCPU base. If you inject a breakpoint before a fork and then
>> remove it afterwards, gdbstub will report an error because it will only
>> find the breakpoint once, not n times (n = number of VCPUs).
>>
>> What you have to do is to cleanly duplicate the breakpoint and
>> watchpoint lists on cpu_copy (filter out BP_CPU types for cleanness
>> reasons, although they do not occur in user emulation ATM).
>
> Hello Jan,
>
> Thanks for reviewing my patch.
>
> Duplication of all break/watchpoints will makes the patch bigger,
> because it will required break/watchpoint_copy functions etc...
>
> Another problem is that threads are also emulated by vcpus in user
> emulation. But we also need to share break/watchpoints between threads.
> This explain the way my patch do the thing.
>
> Finally, this makes the modification a lot more complicated than what I
> expected, because breakpoints on emulated forks should not apply.
Sorry, but shouldn't we prefer correct solutions over simpler but broken
ones...?
Before my gdbstub changes, break/watchpoints were per-VCPU and
automatically duplicated on cpu_copy (as they were stored in a static
array inside CPUState). Now they are kept in lists, but still per-VCPU.
All that has to be done now is to fix cpu_copy to take this into
account. If that takes additional simple helpers to clone breakpoints,
so what?
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 12:59 ` Jan Kiszka
@ 2008-12-13 13:21 ` Lionel Landwerlin
2008-12-13 13:49 ` Jan Kiszka
0 siblings, 1 reply; 12+ messages in thread
From: Lionel Landwerlin @ 2008-12-13 13:21 UTC (permalink / raw)
To: qemu-devel
Le samedi 13 décembre 2008 à 13:59 +0100, Jan Kiszka a écrit :
> Lionel Landwerlin wrote:
> > Le samedi 13 décembre 2008 à 11:16 +0100, Jan Kiszka a écrit :
> >> Lionel Landwerlin wrote:
> >>> I just forgot to remove 2 printf ...
> >>> Here the good patch :
> >>>
> >>>
> >>>
> >>>
> >>> >From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
> >>> From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
> >>> Date: Sat, 13 Dec 2008 00:32:04 +0100
> >>> Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
> >>>
> >>> When using gdb with qemu (via gdbstub), if your emulated
> >>> application is multithreaded and does a segfault then qemu
> >>> crashes.
> >>>
> >>> Qemu crashes because the break/watch points are shared between
> >>> cpus. The TAILQ structure which handles the list of break/watch
> >>> points is copied inside each CPUState structure. When the last
> >>> breakpoint is removed (this happens on a segfault), it is
> >>> removed across all cpus but because of the copied TAILQ
> >>> structure a same breakpoint can be freed N times with N the
> >>> current number of cpus.
> >> OK, now I got the problem: user space emulation spawns additional VCPUs
> >> to emulate fork. Those VCPUs are cloned via cpu_copy which simply
> >> duplicates the CPUState of the parent, including the breakpoint and
> >> watchpoint TAILQ headers. This is doomed to fail.
> >>
> >> But your approach to let the cloned VCPU point to the same TAILQ header
> >> as its parent is not correct as well. It will cause troubles to gdbstub
> >> which manages breakpoints on all VCPUs by adding duplicate instances on
> >> a per-VCPU base. If you inject a breakpoint before a fork and then
> >> remove it afterwards, gdbstub will report an error because it will only
> >> find the breakpoint once, not n times (n = number of VCPUs).
> >>
> >> What you have to do is to cleanly duplicate the breakpoint and
> >> watchpoint lists on cpu_copy (filter out BP_CPU types for cleanness
> >> reasons, although they do not occur in user emulation ATM).
> >
> > Hello Jan,
> >
> > Thanks for reviewing my patch.
> >
> > Duplication of all break/watchpoints will makes the patch bigger,
> > because it will required break/watchpoint_copy functions etc...
> >
> > Another problem is that threads are also emulated by vcpus in user
> > emulation. But we also need to share break/watchpoints between threads.
> > This explain the way my patch do the thing.
> >
> > Finally, this makes the modification a lot more complicated than what I
> > expected, because breakpoints on emulated forks should not apply.
>
> Sorry, but shouldn't we prefer correct solutions over simpler but broken
> ones...?
>
> Before my gdbstub changes, break/watchpoints were per-VCPU and
> automatically duplicated on cpu_copy (as they were stored in a static
> array inside CPUState). Now they are kept in lists, but still per-VCPU.
> All that has to be done now is to fix cpu_copy to take this into
> account. If that takes additional simple helpers to clone breakpoints,
> so what?
>
> Jan
>
Ok, I will do that
Regards,
--
Lione Landwerlin
O p e n W i d e 14, rue Gaillon 75002 Paris
^ permalink raw reply [flat|nested] 12+ messages in thread
* [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 13:21 ` Lionel Landwerlin
@ 2008-12-13 13:49 ` Jan Kiszka
2008-12-13 17:37 ` Lionel Landwerlin
2008-12-28 22:21 ` Lionel Landwerlin
0 siblings, 2 replies; 12+ messages in thread
From: Jan Kiszka @ 2008-12-13 13:49 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 5027 bytes --]
Lionel Landwerlin wrote:
> Le samedi 13 décembre 2008 à 13:59 +0100, Jan Kiszka a écrit :
>> Lionel Landwerlin wrote:
>>> Le samedi 13 décembre 2008 à 11:16 +0100, Jan Kiszka a écrit :
>>>> Lionel Landwerlin wrote:
>>>>> I just forgot to remove 2 printf ...
>>>>> Here the good patch :
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> >From 2b3fe65ea3f2ee8dd3efbb52b66a2f4e53b788ea Mon Sep 17 00:00:00 2001
>>>>> From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
>>>>> Date: Sat, 13 Dec 2008 00:32:04 +0100
>>>>> Subject: [PATCH] [linux-user] Fixed Qemu crash using Gdbstub
>>>>>
>>>>> When using gdb with qemu (via gdbstub), if your emulated
>>>>> application is multithreaded and does a segfault then qemu
>>>>> crashes.
>>>>>
>>>>> Qemu crashes because the break/watch points are shared between
>>>>> cpus. The TAILQ structure which handles the list of break/watch
>>>>> points is copied inside each CPUState structure. When the last
>>>>> breakpoint is removed (this happens on a segfault), it is
>>>>> removed across all cpus but because of the copied TAILQ
>>>>> structure a same breakpoint can be freed N times with N the
>>>>> current number of cpus.
>>>> OK, now I got the problem: user space emulation spawns additional VCPUs
>>>> to emulate fork. Those VCPUs are cloned via cpu_copy which simply
>>>> duplicates the CPUState of the parent, including the breakpoint and
>>>> watchpoint TAILQ headers. This is doomed to fail.
>>>>
>>>> But your approach to let the cloned VCPU point to the same TAILQ header
>>>> as its parent is not correct as well. It will cause troubles to gdbstub
>>>> which manages breakpoints on all VCPUs by adding duplicate instances on
>>>> a per-VCPU base. If you inject a breakpoint before a fork and then
>>>> remove it afterwards, gdbstub will report an error because it will only
>>>> find the breakpoint once, not n times (n = number of VCPUs).
>>>>
>>>> What you have to do is to cleanly duplicate the breakpoint and
>>>> watchpoint lists on cpu_copy (filter out BP_CPU types for cleanness
>>>> reasons, although they do not occur in user emulation ATM).
>>> Hello Jan,
>>>
>>> Thanks for reviewing my patch.
>>>
>>> Duplication of all break/watchpoints will makes the patch bigger,
>>> because it will required break/watchpoint_copy functions etc...
>>>
>>> Another problem is that threads are also emulated by vcpus in user
>>> emulation. But we also need to share break/watchpoints between threads.
>>> This explain the way my patch do the thing.
>>>
>>> Finally, this makes the modification a lot more complicated than what I
>>> expected, because breakpoints on emulated forks should not apply.
>> Sorry, but shouldn't we prefer correct solutions over simpler but broken
>> ones...?
>>
>> Before my gdbstub changes, break/watchpoints were per-VCPU and
>> automatically duplicated on cpu_copy (as they were stored in a static
>> array inside CPUState). Now they are kept in lists, but still per-VCPU.
>> All that has to be done now is to fix cpu_copy to take this into
>> account. If that takes additional simple helpers to clone breakpoints,
>> so what?
>>
>> Jan
>>
>
> Ok, I will do that
Guess I was too fast - could you try this one:
-------->
Subject: [PATCH] Adopt cpu_copy to new breakpoint API
Latest changes to the cpu_breakpoint/watchpoint API broke cpu_copy. This
patch fixes it by cloning the breakpoint and watchpoint lists
appropriately.
Thanks to Lionel Landwerlin for pointing out.
Signed-off-by: Jan Kiszka <jan.kiszka@web.de>
---
exec.c | 24 +++++++++++++++++++++++-
1 files changed, 23 insertions(+), 1 deletions(-)
diff --git a/exec.c b/exec.c
index 44f6a42..193a43c 100644
--- a/exec.c
+++ b/exec.c
@@ -1654,12 +1654,34 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
CPUState *cpu_copy(CPUState *env)
{
CPUState *new_env = cpu_init(env->cpu_model_str);
- /* preserve chaining and index */
CPUState *next_cpu = new_env->next_cpu;
int cpu_index = new_env->cpu_index;
+#if defined(TARGET_HAS_ICE)
+ CPUBreakpoint *bp;
+ CPUWatchpoint *wp;
+#endif
+
memcpy(new_env, env, sizeof(CPUState));
+
+ /* Preserve chaining and index. */
new_env->next_cpu = next_cpu;
new_env->cpu_index = cpu_index;
+
+ /* Clone all break/watchpoints.
+ Note: Once we support ptrace with hw-debug register access, make sure
+ BP_CPU break/watchpoints are handled correctly on clone. */
+ TAILQ_INIT(&env->breakpoints);
+ TAILQ_INIT(&env->watchpoints);
+#if defined(TARGET_HAS_ICE)
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
+ }
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
+ wp->flags, NULL);
+ }
+#endif
+
return new_env;
}
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 257 bytes --]
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 13:49 ` Jan Kiszka
@ 2008-12-13 17:37 ` Lionel Landwerlin
2008-12-14 14:17 ` Jan Kiszka
2008-12-28 22:21 ` Lionel Landwerlin
1 sibling, 1 reply; 12+ messages in thread
From: Lionel Landwerlin @ 2008-12-13 17:37 UTC (permalink / raw)
To: qemu-devel
Le samedi 13 décembre 2008 à 14:49 +0100, Jan Kiszka a écrit :
> Lionel Landwerlin wrote:
> Subject: [PATCH] Adopt cpu_copy to new breakpoint API
>
> Latest changes to the cpu_breakpoint/watchpoint API broke cpu_copy. This
> patch fixes it by cloning the breakpoint and watchpoint lists
> appropriately.
>
> Thanks to Lionel Landwerlin for pointing out.
>
> Signed-off-by: Jan Kiszka <jan.kiszka@web.de>
> ---
>
> exec.c | 24 +++++++++++++++++++++++-
> 1 files changed, 23 insertions(+), 1 deletions(-)
>
> diff --git a/exec.c b/exec.c
> index 44f6a42..193a43c 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -1654,12 +1654,34 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
> CPUState *cpu_copy(CPUState *env)
> {
> CPUState *new_env = cpu_init(env->cpu_model_str);
> - /* preserve chaining and index */
> CPUState *next_cpu = new_env->next_cpu;
> int cpu_index = new_env->cpu_index;
> +#if defined(TARGET_HAS_ICE)
> + CPUBreakpoint *bp;
> + CPUWatchpoint *wp;
> +#endif
> +
> memcpy(new_env, env, sizeof(CPUState));
> +
> + /* Preserve chaining and index. */
> new_env->next_cpu = next_cpu;
> new_env->cpu_index = cpu_index;
> +
> + /* Clone all break/watchpoints.
> + Note: Once we support ptrace with hw-debug register access, make sure
> + BP_CPU break/watchpoints are handled correctly on clone. */
> + TAILQ_INIT(&env->breakpoints);
> + TAILQ_INIT(&env->watchpoints);
> +#if defined(TARGET_HAS_ICE)
> + TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
> + }
> + TAILQ_FOREACH(wp, &env->watchpoints, entry) {
> + cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
> + wp->flags, NULL);
> + }
> +#endif
> +
> return new_env;
> }
>
>
Jan,
Well the patch seems pretty better as qemu does not crash anymore :)
There might be other problems, because gdbstub doesn't stop where I know
it should. I'm investigating...
You might want to add this patch too, there is something strange with
TAILQ 'first' structure member. It's not updated on deletion of
all/first elements.
Regards,
>From 78ba0dbf0c9e5d73022fecdbf1869274b8224949 Mon Sep 17 00:00:00 2001
From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
Date: Sat, 13 Dec 2008 14:05:18 +0100
Subject: [PATCH] Fix suspicious TAILQ management
TAILQ first pointer is not updated when the last element is
removed.
---
sys-queue.h | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)
diff --git a/sys-queue.h b/sys-queue.h
index ad5c8fb..37bedde 100644
--- a/sys-queue.h
+++ b/sys-queue.h
@@ -202,7 +202,8 @@ struct { \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
- *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+ if ((head)->tqh_first == (elm)) \
+ (head)->tqh_first = (elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define TAILQ_FOREACH(var, head, field) \
--
1.5.6.5
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 17:37 ` Lionel Landwerlin
@ 2008-12-14 14:17 ` Jan Kiszka
2008-12-14 19:34 ` Lionel Landwerlin
0 siblings, 1 reply; 12+ messages in thread
From: Jan Kiszka @ 2008-12-14 14:17 UTC (permalink / raw)
To: qemu-devel
[-- Attachment #1: Type: text/plain, Size: 3814 bytes --]
Lionel Landwerlin wrote:
> Le samedi 13 décembre 2008 à 14:49 +0100, Jan Kiszka a écrit :
>> Lionel Landwerlin wrote:
>> Subject: [PATCH] Adopt cpu_copy to new breakpoint API
>>
>> Latest changes to the cpu_breakpoint/watchpoint API broke cpu_copy. This
>> patch fixes it by cloning the breakpoint and watchpoint lists
>> appropriately.
>>
>> Thanks to Lionel Landwerlin for pointing out.
>>
>> Signed-off-by: Jan Kiszka <jan.kiszka@web.de>
>> ---
>>
>> exec.c | 24 +++++++++++++++++++++++-
>> 1 files changed, 23 insertions(+), 1 deletions(-)
>>
>> diff --git a/exec.c b/exec.c
>> index 44f6a42..193a43c 100644
>> --- a/exec.c
>> +++ b/exec.c
>> @@ -1654,12 +1654,34 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
>> CPUState *cpu_copy(CPUState *env)
>> {
>> CPUState *new_env = cpu_init(env->cpu_model_str);
>> - /* preserve chaining and index */
>> CPUState *next_cpu = new_env->next_cpu;
>> int cpu_index = new_env->cpu_index;
>> +#if defined(TARGET_HAS_ICE)
>> + CPUBreakpoint *bp;
>> + CPUWatchpoint *wp;
>> +#endif
>> +
>> memcpy(new_env, env, sizeof(CPUState));
>> +
>> + /* Preserve chaining and index. */
>> new_env->next_cpu = next_cpu;
>> new_env->cpu_index = cpu_index;
>> +
>> + /* Clone all break/watchpoints.
>> + Note: Once we support ptrace with hw-debug register access, make sure
>> + BP_CPU break/watchpoints are handled correctly on clone. */
>> + TAILQ_INIT(&env->breakpoints);
>> + TAILQ_INIT(&env->watchpoints);
>> +#if defined(TARGET_HAS_ICE)
>> + TAILQ_FOREACH(bp, &env->breakpoints, entry) {
>> + cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
>> + }
>> + TAILQ_FOREACH(wp, &env->watchpoints, entry) {
>> + cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
>> + wp->flags, NULL);
>> + }
>> +#endif
>> +
>> return new_env;
>> }
>>
>>
>
> Jan,
>
> Well the patch seems pretty better as qemu does not crash anymore :)
> There might be other problems, because gdbstub doesn't stop where I know
> it should. I'm investigating...
OK. If you have a testcase, I would also look into this next week.
>
> You might want to add this patch too, there is something strange with
> TAILQ 'first' structure member. It's not updated on deletion of
> all/first elements.
>
> Regards,
>
>>From 78ba0dbf0c9e5d73022fecdbf1869274b8224949 Mon Sep 17 00:00:00 2001
> From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
> Date: Sat, 13 Dec 2008 14:05:18 +0100
> Subject: [PATCH] Fix suspicious TAILQ management
>
> TAILQ first pointer is not updated when the last element is
> removed.
> ---
> sys-queue.h | 3 ++-
> 1 files changed, 2 insertions(+), 1 deletions(-)
>
> diff --git a/sys-queue.h b/sys-queue.h
> index ad5c8fb..37bedde 100644
> --- a/sys-queue.h
> +++ b/sys-queue.h
> @@ -202,7 +202,8 @@ struct { \
> (elm)->field.tqe_prev; \
> else \
> (head)->tqh_last = (elm)->field.tqe_prev; \
> - *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
> + if ((head)->tqh_first == (elm)) \
> + (head)->tqh_first = (elm)->field.tqe_next; \
That's fishy. The elm's prev field should point to the head, thus the
head should be updated to elm's next (ie. NULL). Could you dig deeper
what the state of all involved structures are and maybe track down when
they become inconsistent? Alternatively, please provide a testcase.
Thanks,
Jan
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 258 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-14 14:17 ` Jan Kiszka
@ 2008-12-14 19:34 ` Lionel Landwerlin
0 siblings, 0 replies; 12+ messages in thread
From: Lionel Landwerlin @ 2008-12-14 19:34 UTC (permalink / raw)
To: qemu-devel
Le dimanche 14 décembre 2008 à 15:17 +0100, Jan Kiszka a écrit :
> Lionel Landwerlin wrote:
> > Le samedi 13 décembre 2008 à 14:49 +0100, Jan Kiszka a écrit :
> >> Lionel Landwerlin wrote:
> >> Subject: [PATCH] Adopt cpu_copy to new breakpoint API
> >>
> >> Latest changes to the cpu_breakpoint/watchpoint API broke cpu_copy. This
> >> patch fixes it by cloning the breakpoint and watchpoint lists
> >> appropriately.
> >>
> >> Thanks to Lionel Landwerlin for pointing out.
> >>
> >> Signed-off-by: Jan Kiszka <jan.kiszka@web.de>
> >> ---
> >>
> >> exec.c | 24 +++++++++++++++++++++++-
> >> 1 files changed, 23 insertions(+), 1 deletions(-)
> >>
> >> diff --git a/exec.c b/exec.c
> >> index 44f6a42..193a43c 100644
> >> --- a/exec.c
> >> +++ b/exec.c
> >> @@ -1654,12 +1654,34 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
> >> CPUState *cpu_copy(CPUState *env)
> >> {
> >> CPUState *new_env = cpu_init(env->cpu_model_str);
> >> - /* preserve chaining and index */
> >> CPUState *next_cpu = new_env->next_cpu;
> >> int cpu_index = new_env->cpu_index;
> >> +#if defined(TARGET_HAS_ICE)
> >> + CPUBreakpoint *bp;
> >> + CPUWatchpoint *wp;
> >> +#endif
> >> +
> >> memcpy(new_env, env, sizeof(CPUState));
> >> +
> >> + /* Preserve chaining and index. */
> >> new_env->next_cpu = next_cpu;
> >> new_env->cpu_index = cpu_index;
> >> +
> >> + /* Clone all break/watchpoints.
> >> + Note: Once we support ptrace with hw-debug register access, make sure
> >> + BP_CPU break/watchpoints are handled correctly on clone. */
> >> + TAILQ_INIT(&env->breakpoints);
> >> + TAILQ_INIT(&env->watchpoints);
> >> +#if defined(TARGET_HAS_ICE)
> >> + TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> >> + cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
> >> + }
> >> + TAILQ_FOREACH(wp, &env->watchpoints, entry) {
> >> + cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
> >> + wp->flags, NULL);
> >> + }
> >> +#endif
> >> +
> >> return new_env;
> >> }
> >>
> >>
> >
> > Jan,
> >
> > Well the patch seems pretty better as qemu does not crash anymore :)
> > There might be other problems, because gdbstub doesn't stop where I know
> > it should. I'm investigating...
>
> OK. If you have a testcase, I would also look into this next week.
>
> >
> > You might want to add this patch too, there is something strange with
> > TAILQ 'first' structure member. It's not updated on deletion of
> > all/first elements.
> >
> > Regards,
> >
> >>From 78ba0dbf0c9e5d73022fecdbf1869274b8224949 Mon Sep 17 00:00:00 2001
> > From: Lionel Landwerlin <lionel.landwerlin@openwide.fr>
> > Date: Sat, 13 Dec 2008 14:05:18 +0100
> > Subject: [PATCH] Fix suspicious TAILQ management
> >
> > TAILQ first pointer is not updated when the last element is
> > removed.
> > ---
> > sys-queue.h | 3 ++-
> > 1 files changed, 2 insertions(+), 1 deletions(-)
> >
> > diff --git a/sys-queue.h b/sys-queue.h
> > index ad5c8fb..37bedde 100644
> > --- a/sys-queue.h
> > +++ b/sys-queue.h
> > @@ -202,7 +202,8 @@ struct { \
> > (elm)->field.tqe_prev; \
> > else \
> > (head)->tqh_last = (elm)->field.tqe_prev; \
> > - *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
> > + if ((head)->tqh_first == (elm)) \
> > + (head)->tqh_first = (elm)->field.tqe_next; \
>
> That's fishy. The elm's prev field should point to the head, thus the
> head should be updated to elm's next (ie. NULL). Could you dig deeper
> what the state of all involved structures are and maybe track down when
> they become inconsistent? Alternatively, please provide a testcase.
In fact when you're not using gdbstub, there is no break/watch points.
So this problem never appears. And when you're using gdbstub, this
problem only appears, when all break/watch points are removed (ie when a
SIGSEGV is raised). In this case, you're almost everytime restart qemu
anytime soon, so break/watch points (and first member of the TAILQ
lists) are not used anymore.
My current test case is a piece a proprietary software, so I don't think
I will be able to give it to you, but I can probably rewrite something
simple using ~10/15 threads. I'm testing on sh4 binary, do you think you
need a sh4 root filesystem to reproduce the problem ?
--
Lione Landwerlin
O p e n W i d e 14, rue Gaillon 75002 Paris
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] Re: [linux-user] Fixed Qemu crash using Gdbstub
2008-12-13 13:49 ` Jan Kiszka
2008-12-13 17:37 ` Lionel Landwerlin
@ 2008-12-28 22:21 ` Lionel Landwerlin
1 sibling, 0 replies; 12+ messages in thread
From: Lionel Landwerlin @ 2008-12-28 22:21 UTC (permalink / raw)
To: qemu-devel
Le samedi 13 décembre 2008 à 14:49 +0100, Jan Kiszka a écrit :
> -------->
>
> Subject: [PATCH] Adopt cpu_copy to new breakpoint API
>
> Latest changes to the cpu_breakpoint/watchpoint API broke cpu_copy. This
> patch fixes it by cloning the breakpoint and watchpoint lists
> appropriately.
>
> Thanks to Lionel Landwerlin for pointing out.
>
> Signed-off-by: Jan Kiszka <jan.kiszka@web.de>
> ---
>
> exec.c | 24 +++++++++++++++++++++++-
> 1 files changed, 23 insertions(+), 1 deletions(-)
>
> diff --git a/exec.c b/exec.c
> index 44f6a42..193a43c 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -1654,12 +1654,34 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
> CPUState *cpu_copy(CPUState *env)
> {
> CPUState *new_env = cpu_init(env->cpu_model_str);
> - /* preserve chaining and index */
> CPUState *next_cpu = new_env->next_cpu;
> int cpu_index = new_env->cpu_index;
> +#if defined(TARGET_HAS_ICE)
> + CPUBreakpoint *bp;
> + CPUWatchpoint *wp;
> +#endif
> +
> memcpy(new_env, env, sizeof(CPUState));
> +
> + /* Preserve chaining and index. */
> new_env->next_cpu = next_cpu;
> new_env->cpu_index = cpu_index;
> +
> + /* Clone all break/watchpoints.
> + Note: Once we support ptrace with hw-debug register access, make sure
> + BP_CPU break/watchpoints are handled correctly on clone. */
> + TAILQ_INIT(&env->breakpoints);
> + TAILQ_INIT(&env->watchpoints);
> +#if defined(TARGET_HAS_ICE)
> + TAILQ_FOREACH(bp, &env->breakpoints, entry) {
> + cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
> + }
> + TAILQ_FOREACH(wp, &env->watchpoints, entry) {
> + cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
> + wp->flags, NULL);
> + }
> +#endif
> +
> return new_env;
> }
>
>
Is this patch going to be integrated anytime soon ?
--
Lione Landwerlin
O p e n W i d e 14, rue Gaillon 75002 Paris
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2008-12-28 22:21 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-12-12 23:52 [Qemu-devel] [linux-user] Fixed Qemu crash using Gdbstub Lionel Landwerlin
2008-12-13 0:00 ` Lionel Landwerlin
2008-12-13 8:26 ` [Qemu-devel] " Jan Kiszka
2008-12-13 10:16 ` Jan Kiszka
2008-12-13 12:31 ` Lionel Landwerlin
2008-12-13 12:59 ` Jan Kiszka
2008-12-13 13:21 ` Lionel Landwerlin
2008-12-13 13:49 ` Jan Kiszka
2008-12-13 17:37 ` Lionel Landwerlin
2008-12-14 14:17 ` Jan Kiszka
2008-12-14 19:34 ` Lionel Landwerlin
2008-12-28 22:21 ` Lionel Landwerlin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).