From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1KGcaq-00074x-JM for qemu-devel@nongnu.org; Wed, 09 Jul 2008 12:32:36 -0400 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1KGcao-00073p-OG for qemu-devel@nongnu.org; Wed, 09 Jul 2008 12:32:36 -0400 Received: from [199.232.76.173] (port=57668 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1KGcao-00073k-Iu for qemu-devel@nongnu.org; Wed, 09 Jul 2008 12:32:34 -0400 Received: from mx20.gnu.org ([199.232.41.8]:48510) by monty-python.gnu.org with esmtps (TLS-1.0:RSA_AES_256_CBC_SHA1:32) (Exim 4.60) (envelope-from ) id 1KGblm-0003hR-IR for qemu-devel@nongnu.org; Wed, 09 Jul 2008 11:39:51 -0400 Received: from gecko.sbs.de ([194.138.37.40]) by mx20.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1KGYXb-0007WW-SA for qemu-devel@nongnu.org; Wed, 09 Jul 2008 08:13:00 -0400 Received: from mail1.sbs.de (localhost [127.0.0.1]) by gecko.sbs.de (8.12.11.20060308/8.12.11) with ESMTP id m69CCuYZ022963 for ; Wed, 9 Jul 2008 14:12:56 +0200 Received: from [139.25.109.167] (mchn012c.mchp.siemens.de [139.25.109.167] (may be forged)) by mail1.sbs.de (8.12.11.20060308/8.12.11) with ESMTP id m69CCuDE003128 for ; Wed, 9 Jul 2008 14:12:56 +0200 Message-ID: <4874AB47.9090208@siemens.com> Date: Wed, 09 Jul 2008 14:12:55 +0200 From: Jan Kiszka MIME-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-15 Content-Transfer-Encoding: 7bit Subject: [Qemu-devel] [RFC][PATCH] x86: Optional segment type and limit checks Reply-To: qemu-devel@nongnu.org List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org As announced earlier, I developed an add-on feature for x86 emulation to support segment type and limit checks. Now I finally completed porting it over to latest SVN and added (hopefully) all the cases that were missing so far (as the customer didn't need them). The idea of this patch is to generate calls to a check helper only in case the user requested this support via "-seg-checks". This feature remains off by default as most x86 OSes do not care about protection via segmentation anymore (and it was even removed from 64-bit modes by the CPU vendors). Moreover, checking the segment type and limit on every memory access is nothing that makes QEMU faster, so you will only want this if you are looking for very accurate emulation. The attached patch passed our own test cases, and it boots standard Linux into X, both scenarios with -seg-checks enabled. However, though things have been checked twice, I wouldn't be surprised if there are a few typos or other errors in the check instrumentations. But at least, those won't have any impact on the disabled, standard case. Signed-off-by: Jan Kiszka --- target-i386/cpu.h | 8 target-i386/helper.h | 5 target-i386/op_helper.c | 59 ++++++ target-i386/translate.c | 409 +++++++++++++++++++++++++++++++++++++++--------- vl.c | 7 5 files changed, 417 insertions(+), 71 deletions(-) Index: b/target-i386/cpu.h =================================================================== --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -94,6 +94,10 @@ #define DESC_TSS_BUSY_MASK (1 << 9) +/* flags for segment access checks */ +#define ACC_READ 0x0000 +#define ACC_WRITE 0x0100 + /* eflags masks */ #define CC_C 0x0001 #define CC_P 0x0004 @@ -751,6 +755,10 @@ static inline void cpu_clone_regs(CPUSta env->regs[R_ESP] = newsp; env->regs[R_EAX] = 0; } + +#define seg_checks 0 +#else +extern int seg_checks; #endif #define CPU_PC_FROM_TB(env, tb) env->eip = tb->pc - tb->cs_base Index: b/target-i386/translate.c =================================================================== --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -591,7 +591,7 @@ static inline void gen_jmp_im(target_ulo tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip)); } -static inline void gen_string_movl_A0_ESI(DisasContext *s) +static inline int gen_string_movl_A0_ESI(DisasContext *s) { int override; @@ -604,6 +604,7 @@ static inline void gen_string_movl_A0_ES } else { gen_op_movq_A0_reg(R_ESI); } + return -1; } else #endif if (s->aflag) { @@ -615,6 +616,7 @@ static inline void gen_string_movl_A0_ES gen_op_addl_A0_reg_sN(0, R_ESI); } else { gen_op_movl_A0_reg(R_ESI); + return R_DS; } } else { /* 16 address, always override */ @@ -624,13 +626,15 @@ static inline void gen_string_movl_A0_ES gen_op_andl_A0_ffff(); gen_op_addl_A0_seg(override); } + return override; } -static inline void gen_string_movl_A0_EDI(DisasContext *s) +static inline int gen_string_movl_A0_EDI(DisasContext *s) { #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_movq_A0_reg(R_EDI); + return -1; } else #endif if (s->aflag) { @@ -645,6 +649,7 @@ static inline void gen_string_movl_A0_ED gen_op_andl_A0_ffff(); gen_op_addl_A0_seg(R_ES); } + return R_ES; } static inline void gen_op_movl_T0_Dshift(int ot) @@ -752,11 +757,31 @@ static void gen_check_io(DisasContext *s } } +static inline void gen_check_segmented_access(int seg_reg, int type) +{ + tcg_gen_helper_0_3(helper_check_segmented_access, cpu_A0, + tcg_const_i32(seg_reg), tcg_const_i32(type)); +} + +static inline void gen_check_segmented_access_size(int seg_reg, int type, + int size) +{ + tcg_gen_helper_0_4(helper_check_segmented_access_size, cpu_A0, + tcg_const_i32(seg_reg), tcg_const_i32(type), + tcg_const_i32(size)); +} + static inline void gen_movs(DisasContext *s, int ot) { - gen_string_movl_A0_ESI(s); + int seg_reg; + + seg_reg = gen_string_movl_A0_ESI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T0_A0(ot + s->mem_index); - gen_string_movl_A0_EDI(s); + seg_reg = gen_string_movl_A0_EDI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_st_T0_A0(ot + s->mem_index); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_ESI); @@ -1165,8 +1190,12 @@ static int gen_jz_ecx_string(DisasContex static inline void gen_stos(DisasContext *s, int ot) { + int seg_reg; + gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); - gen_string_movl_A0_EDI(s); + seg_reg = gen_string_movl_A0_EDI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_st_T0_A0(ot + s->mem_index); gen_op_movl_T0_Dshift(ot); gen_op_add_reg_T0(s->aflag, R_EDI); @@ -1174,7 +1203,11 @@ static inline void gen_stos(DisasContext static inline void gen_lods(DisasContext *s, int ot) { - gen_string_movl_A0_ESI(s); + int seg_reg; + + seg_reg = gen_string_movl_A0_ESI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T0_A0(ot + s->mem_index); gen_op_mov_reg_T0(ot, R_EAX); gen_op_movl_T0_Dshift(ot); @@ -1183,8 +1216,12 @@ static inline void gen_lods(DisasContext static inline void gen_scas(DisasContext *s, int ot) { + int seg_reg; + gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); - gen_string_movl_A0_EDI(s); + seg_reg = gen_string_movl_A0_EDI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T1_A0(ot + s->mem_index); gen_op_cmpl_T0_T1_cc(); gen_op_movl_T0_Dshift(ot); @@ -1193,9 +1230,15 @@ static inline void gen_scas(DisasContext static inline void gen_cmps(DisasContext *s, int ot) { - gen_string_movl_A0_ESI(s); + int seg_reg; + + seg_reg = gen_string_movl_A0_ESI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T0_A0(ot + s->mem_index); - gen_string_movl_A0_EDI(s); + seg_reg = gen_string_movl_A0_EDI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T1_A0(ot + s->mem_index); gen_op_cmpl_T0_T1_cc(); gen_op_movl_T0_Dshift(ot); @@ -1205,9 +1248,13 @@ static inline void gen_cmps(DisasContext static inline void gen_ins(DisasContext *s, int ot) { + int seg_reg; + if (use_icount) gen_io_start(); - gen_string_movl_A0_EDI(s); + seg_reg = gen_string_movl_A0_EDI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); /* Note: we must do this dummy write first to be restartable in case of page fault. */ gen_op_movl_T0_0(); @@ -1225,9 +1272,13 @@ static inline void gen_ins(DisasContext static inline void gen_outs(DisasContext *s, int ot) { + int seg_reg; + if (use_icount) gen_io_start(); - gen_string_movl_A0_ESI(s); + seg_reg = gen_string_movl_A0_ESI(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T0_A0(ot + s->mem_index); gen_op_mov_TN_reg(OT_WORD, 1, R_EDX); @@ -1882,7 +1933,7 @@ static void gen_shifti(DisasContext *s1, } } -static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr) +static int gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr) { target_long disp; int havesib; @@ -2064,6 +2115,7 @@ static void gen_lea_modrm(DisasContext * disp = 0; *reg_ptr = opreg; *offset_ptr = disp; + return override; } static void gen_nop_modrm(DisasContext *s, int modrm) @@ -2117,11 +2169,10 @@ static void gen_nop_modrm(DisasContext * } /* used for LEA and MOV AX, mem */ -static void gen_add_A0_ds_seg(DisasContext *s) +static int gen_add_A0_ds_seg(DisasContext *s) { int override, must_add_seg; must_add_seg = s->addseg; - override = R_DS; if (s->override >= 0) { override = s->override; must_add_seg = 1; @@ -2138,13 +2189,14 @@ static void gen_add_A0_ds_seg(DisasConte gen_op_addl_A0_seg(override); } } + return override; } /* generate modrm memory load or store of 'reg'. TMP0 is used if reg != OR_TMP0 */ static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store) { - int mod, rm, opreg, disp; + int mod, rm, opreg, disp, seg_reg; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); @@ -2159,12 +2211,16 @@ static void gen_ldst_modrm(DisasContext gen_op_mov_reg_T0(ot, reg); } } else { - gen_lea_modrm(s, modrm, &opreg, &disp); + seg_reg = gen_lea_modrm(s, modrm, &opreg, &disp); if (is_store) { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); if (reg != OR_TMP0) gen_op_mov_TN_reg(ot, 0, reg); gen_op_st_T0_A0(ot + s->mem_index); } else { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T0_A0(ot + s->mem_index); if (reg != OR_TMP0) gen_op_mov_reg_T0(ot, reg); @@ -2398,6 +2454,8 @@ static void gen_push_T0(DisasContext *s) tcg_gen_mov_tl(cpu_T[1], cpu_A0); gen_op_addl_A0_seg(R_SS); } + if (seg_checks) + gen_check_segmented_access(R_SS, ACC_WRITE | (OT_WORD + s->dflag)); gen_op_st_T0_A0(s->dflag + 1 + s->mem_index); if (s->ss32 && !s->addseg) gen_op_mov_reg_A0(1, R_ESP); @@ -2437,6 +2495,8 @@ static void gen_push_T1(DisasContext *s) gen_op_andl_A0_ffff(); gen_op_addl_A0_seg(R_SS); } + if (seg_checks) + gen_check_segmented_access(R_SS, ACC_WRITE | (OT_WORD + s->dflag)); gen_op_st_T1_A0(s->dflag + 1 + s->mem_index); if (s->ss32 && !s->addseg) @@ -2464,6 +2524,8 @@ static void gen_pop_T0(DisasContext *s) gen_op_andl_A0_ffff(); gen_op_addl_A0_seg(R_SS); } + if (seg_checks) + gen_check_segmented_access(R_SS, ACC_READ | (OT_WORD + s->dflag)); gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index); } } @@ -2501,6 +2563,8 @@ static void gen_pusha(DisasContext *s) tcg_gen_mov_tl(cpu_T[1], cpu_A0); if (s->addseg) gen_op_addl_A0_seg(R_SS); + if (seg_checks) + gen_check_segmented_access(R_SS, ACC_WRITE | (OT_WORD + s->dflag + 3)); for(i = 0;i < 8; i++) { gen_op_mov_TN_reg(OT_LONG, 0, 7 - i); gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index); @@ -2520,6 +2584,8 @@ static void gen_popa(DisasContext *s) tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag); if (s->addseg) gen_op_addl_A0_seg(R_SS); + if (seg_checks) + gen_check_segmented_access(R_SS, ACC_READ | (OT_WORD + s->dflag + 3)); for(i = 0;i < 8; i++) { /* ESP is not reloaded */ if (i != 3) { @@ -2571,6 +2637,8 @@ static void gen_enter(DisasContext *s, i tcg_gen_mov_tl(cpu_T[1], cpu_A0); if (s->addseg) gen_op_addl_A0_seg(R_SS); + if (seg_checks) + gen_check_segmented_access(R_SS, ACC_WRITE | ot); /* push bp */ gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); gen_op_st_T0_A0(ot + s->mem_index); @@ -2924,7 +2992,7 @@ static void *sse_op_table5[256] = { static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) { int b1, op1_offset, op2_offset, is_xmm, val, ot; - int modrm, mod, rm, reg, reg_addr, offset_addr; + int seg_reg, modrm, mod, rm, reg, reg_addr, offset_addr; void *sse_op2; b &= 0xff; @@ -2990,7 +3058,9 @@ static void gen_sse(DisasContext *s, int case 0x0e7: /* movntq */ if (mod == 3) goto illegal_op; - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); break; case 0x1e7: /* movntdq */ @@ -2999,7 +3069,9 @@ static void gen_sse(DisasContext *s, int case 0x3f0: /* lddqu */ if (mod == 3) goto illegal_op; - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | (OT_QUAD + 1)); gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); break; case 0x6e: /* movd mm, ea */ @@ -3035,7 +3107,9 @@ static void gen_sse(DisasContext *s, int break; case 0x6f: /* movq mm, ea */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); } else { rm = (modrm & 7); @@ -3052,7 +3126,9 @@ static void gen_sse(DisasContext *s, int case 0x16f: /* movdqa xmm, ea */ case 0x26f: /* movdqu xmm, ea */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | (OT_QUAD + 1)); gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); @@ -3062,7 +3138,9 @@ static void gen_sse(DisasContext *s, int break; case 0x210: /* movss xmm, ea */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_op_movl_T0_0(); @@ -3077,7 +3155,9 @@ static void gen_sse(DisasContext *s, int break; case 0x310: /* movsd xmm, ea */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); gen_op_movl_T0_0(); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); @@ -3091,7 +3171,9 @@ static void gen_sse(DisasContext *s, int case 0x012: /* movlps */ case 0x112: /* movlpd */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { /* movhlps */ @@ -3102,7 +3184,9 @@ static void gen_sse(DisasContext *s, int break; case 0x212: /* movsldup */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | (OT_QUAD + 1)); gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); @@ -3118,7 +3202,9 @@ static void gen_sse(DisasContext *s, int break; case 0x312: /* movddup */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); @@ -3131,7 +3217,9 @@ static void gen_sse(DisasContext *s, int case 0x016: /* movhps */ case 0x116: /* movhpd */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); } else { /* movlhps */ @@ -3142,7 +3230,9 @@ static void gen_sse(DisasContext *s, int break; case 0x216: /* movshdup */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | (OT_QUAD + 1)); gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); @@ -3186,7 +3276,9 @@ static void gen_sse(DisasContext *s, int break; case 0x27e: /* movq xmm, ea */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); @@ -3197,7 +3289,9 @@ static void gen_sse(DisasContext *s, int break; case 0x7f: /* movq ea, mm */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); } else { rm = (modrm & 7); @@ -3212,7 +3306,9 @@ static void gen_sse(DisasContext *s, int case 0x17f: /* movdqa ea, xmm */ case 0x27f: /* movdqu ea, xmm */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | (OT_QUAD + 1)); gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); @@ -3222,7 +3318,9 @@ static void gen_sse(DisasContext *s, int break; case 0x211: /* movss ea, xmm */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_LONG); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_op_st_T0_A0(OT_LONG + s->mem_index); } else { @@ -3233,7 +3331,9 @@ static void gen_sse(DisasContext *s, int break; case 0x311: /* movsd ea, xmm */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); @@ -3244,7 +3344,9 @@ static void gen_sse(DisasContext *s, int case 0x013: /* movlps */ case 0x113: /* movlpd */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { goto illegal_op; @@ -3253,7 +3355,9 @@ static void gen_sse(DisasContext *s, int case 0x017: /* movhps */ case 0x117: /* movhpd */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); } else { goto illegal_op; @@ -3313,7 +3417,9 @@ static void gen_sse(DisasContext *s, int case 0x12a: /* cvtpi2pd */ tcg_gen_helper_0_0(helper_enter_mmx); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s->mem_index, op2_offset); } else { @@ -3353,7 +3459,9 @@ static void gen_sse(DisasContext *s, int case 0x12d: /* cvtpd2pi */ tcg_gen_helper_0_0(helper_enter_mmx); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | (OT_QUAD + 1)); op2_offset = offsetof(CPUX86State,xmm_t0); gen_ldo_env_A0(s->mem_index, op2_offset); } else { @@ -3384,10 +3492,14 @@ static void gen_sse(DisasContext *s, int case 0x32d: /* cvtsd2si */ ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if ((b >> 8) & 1) { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0))); } else { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); } @@ -3443,7 +3555,9 @@ static void gen_sse(DisasContext *s, int break; case 0x1d6: /* movq ea, xmm */ if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); @@ -3499,20 +3613,26 @@ static void gen_sse(DisasContext *s, int if (is_xmm) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); op2_offset = offsetof(CPUX86State,xmm_t0); if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) || b == 0xc2)) { /* specific case for SSE single instructions */ if (b1 == 2) { /* 32 bit access */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); } else { /* 64 bit access */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0))); } } else { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | (OT_QUAD + 1)); gen_ldo_env_A0(s->mem_index, op2_offset); } } else { @@ -3522,7 +3642,9 @@ static void gen_sse(DisasContext *s, int } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s->mem_index, op2_offset); } else { @@ -3597,7 +3719,7 @@ static target_ulong disas_insn(DisasCont { int b, prefixes, aflag, dflag; int shift, ot; - int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val; + int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val, seg_reg; target_ulong next_eip, tval; int rex_w, rex_r; @@ -3762,7 +3884,10 @@ static target_ulong disas_insn(DisasCont mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ((op == OP_CMPL) ? ACC_READ : ACC_WRITE) | ot); opreg = OR_TMP0; } else if (op == OP_XORL && rm == reg) { xor_zero: @@ -3784,7 +3909,9 @@ static target_ulong disas_insn(DisasCont reg = ((modrm >> 3) & 7) | rex_r; rm = (modrm & 7) | REX_B(s); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T1_A0(ot + s->mem_index); } else if (op == OP_XORL && rm == reg) { goto xor_zero; @@ -3826,7 +3953,10 @@ static target_ulong disas_insn(DisasCont s->rip_offset = 1; else s->rip_offset = insn_const_size(ot); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ((op == OP_CMPL) ? ACC_READ : ACC_WRITE) | ot); opreg = OR_TMP0; } else { opreg = rm; @@ -3872,7 +4002,10 @@ static target_ulong disas_insn(DisasCont if (mod != 3) { if (op == 0) s->rip_offset = insn_const_size(ot); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ((op == 2 || op == 3) ? ACC_WRITE : ACC_READ) | ot); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); @@ -4118,7 +4251,10 @@ static target_ulong disas_insn(DisasCont } } if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ((op < 2) ? ACC_WRITE : ACC_READ) | ot); if (op >= 2 && op != 3 && op != 5) gen_op_ld_T0_A0(ot + s->mem_index); } else { @@ -4357,7 +4493,9 @@ static target_ulong disas_insn(DisasCont gen_op_mov_reg_T1(ot, reg); gen_op_mov_reg_T0(ot, rm); } else { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_mov_TN_reg(ot, 0, reg); gen_op_ld_T1_A0(ot + s->mem_index); gen_op_addl_T0_T1(); @@ -4389,7 +4527,9 @@ static target_ulong disas_insn(DisasCont rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, t0, rm); } else { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); tcg_gen_mov_tl(a0, cpu_A0); gen_op_ld_v(ot + s->mem_index, t0, a0); rm = 0; /* avoid warning */ @@ -4444,7 +4584,9 @@ static target_ulong disas_insn(DisasCont gen_jmp_im(pc_start - s->cs_base); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_BYTE); tcg_gen_helper_0_1(helper_cmpxchg8b, cpu_A0); } s->cc_op = CC_OP_EFLAGS; @@ -4614,7 +4756,9 @@ static target_ulong disas_insn(DisasCont mod = (modrm >> 6) & 3; if (mod != 3) { s->rip_offset = insn_const_size(ot); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); } val = insn_get(s, ot); gen_op_movl_T0_im(val); @@ -4703,7 +4847,9 @@ static target_ulong disas_insn(DisasCont } gen_op_mov_reg_T0(d_ot, reg); } else { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); if (b & 8) { gen_op_lds_T0_A0(ot + s->mem_index); } else { @@ -4725,7 +4871,9 @@ static target_ulong disas_insn(DisasCont s->override = -1; val = s->addseg; s->addseg = 0; - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); s->addseg = val; gen_op_mov_reg_A0(ot - OT_WORD, reg); break; @@ -4756,11 +4904,15 @@ static target_ulong disas_insn(DisasCont } gen_op_movl_A0_im(offset_addr); } - gen_add_A0_ds_seg(s); + seg_reg = gen_add_A0_ds_seg(s); if ((b & 2) == 0) { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | ot); gen_op_ld_T0_A0(ot + s->mem_index); gen_op_mov_reg_T0(ot, R_EAX); } else { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_mov_TN_reg(ot, 0, R_EAX); gen_op_st_T0_A0(ot + s->mem_index); } @@ -4785,7 +4937,9 @@ static target_ulong disas_insn(DisasCont else tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); } - gen_add_A0_ds_seg(s); + seg_reg = gen_add_A0_ds_seg(s); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_BYTE); gen_op_ldu_T0_A0(OT_BYTE + s->mem_index); gen_op_mov_reg_T0(OT_BYTE, R_EAX); break; @@ -4837,7 +4991,9 @@ static target_ulong disas_insn(DisasCont gen_op_mov_reg_T0(ot, rm); gen_op_mov_reg_T1(ot, reg); } else { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_mov_TN_reg(ot, 0, reg); /* for xchg, lock is implicit */ if (!(prefixes & PREFIX_LOCK)) @@ -4874,7 +5030,10 @@ static target_ulong disas_insn(DisasCont mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_READ, + (1 << ot) + 2); gen_op_ld_T1_A0(ot + s->mem_index); gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); /* load the segment first to handle exceptions properly */ @@ -4909,7 +5068,9 @@ static target_ulong disas_insn(DisasCont if (shift == 2) { s->rip_offset = 1; } - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); opreg = OR_TMP0; } else { opreg = (modrm & 7) | REX_B(s); @@ -4959,7 +5120,9 @@ static target_ulong disas_insn(DisasCont rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); opreg = OR_TMP0; } else { opreg = rm; @@ -4990,7 +5153,7 @@ static target_ulong disas_insn(DisasCont op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); switch(op) { case 0x00 ... 0x07: /* fxxxs */ case 0x10 ... 0x17: /* fixxxl */ @@ -5002,22 +5165,34 @@ static target_ulong disas_insn(DisasCont switch(op >> 4) { case 0: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_1(helper_flds_FT0, cpu_tmp2_i32); break; case 1: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32); break; case 2: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_QUAD); tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_helper_0_1(helper_fldl_FT0, cpu_tmp1_i64); break; case 3: default: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_WORD); gen_op_lds_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_1(helper_fildl_FT0, cpu_tmp2_i32); @@ -5041,22 +5216,34 @@ static target_ulong disas_insn(DisasCont case 0: switch(op >> 4) { case 0: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_1(helper_flds_ST0, cpu_tmp2_i32); break; case 1: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32); break; case 2: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_QUAD); tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_helper_0_1(helper_fldl_ST0, cpu_tmp1_i64); break; case 3: default: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_READ | OT_WORD); gen_op_lds_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_1(helper_fildl_ST0, cpu_tmp2_i32); @@ -5067,17 +5254,26 @@ static target_ulong disas_insn(DisasCont /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_WRITE | OT_LONG); tcg_gen_helper_1_0(helper_fisttl_ST0, cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 2: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_WRITE | OT_QUAD); tcg_gen_helper_1_0(helper_fisttll_ST0, cpu_tmp1_i64); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); break; case 3: default: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_WRITE | OT_WORD); tcg_gen_helper_1_0(helper_fistt_ST0, cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); @@ -5088,22 +5284,34 @@ static target_ulong disas_insn(DisasCont default: switch(op >> 4) { case 0: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_WRITE | OT_LONG); tcg_gen_helper_1_0(helper_fsts_ST0, cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 1: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_WRITE | OT_LONG); tcg_gen_helper_1_0(helper_fistl_ST0, cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 2: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_WRITE | OT_QUAD); tcg_gen_helper_1_0(helper_fstl_ST0, cpu_tmp1_i64); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); break; case 3: default: + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, + ACC_WRITE | OT_WORD); tcg_gen_helper_1_0(helper_fist_ST0, cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); @@ -5115,6 +5323,9 @@ static target_ulong disas_insn(DisasCont } break; case 0x0c: /* fldenv mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_READ, + (s->dflag) ? 28 : 14); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -5122,11 +5333,16 @@ static target_ulong disas_insn(DisasCont cpu_A0, tcg_const_i32(s->dflag)); break; case 0x0d: /* fldcw mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_WORD); gen_op_ld_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_1(helper_fldcw, cpu_tmp2_i32); break; case 0x0e: /* fnstenv mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, + (s->dflag) ? 28 : 14); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -5134,17 +5350,23 @@ static target_ulong disas_insn(DisasCont cpu_A0, tcg_const_i32(s->dflag)); break; case 0x0f: /* fnstcw mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_WORD); tcg_gen_helper_1_0(helper_fnstcw, cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; case 0x1d: /* fldt mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_READ, 10); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); tcg_gen_helper_0_1(helper_fldt_ST0, cpu_A0); break; case 0x1f: /* fstpt mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, 10); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -5152,6 +5374,9 @@ static target_ulong disas_insn(DisasCont tcg_gen_helper_0_0(helper_fpop); break; case 0x2c: /* frstor mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, + (s->dflag) ? 108 : 94); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -5159,6 +5384,9 @@ static target_ulong disas_insn(DisasCont cpu_A0, tcg_const_i32(s->dflag)); break; case 0x2e: /* fnsave mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, + (s->dflag) ? 108 : 94); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -5166,17 +5394,23 @@ static target_ulong disas_insn(DisasCont cpu_A0, tcg_const_i32(s->dflag)); break; case 0x2f: /* fnstsw mem */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_WORD); tcg_gen_helper_1_0(helper_fnstsw, cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; case 0x3c: /* fbld */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_READ, 10); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); tcg_gen_helper_0_1(helper_fbld_ST0, cpu_A0); break; case 0x3e: /* fbstp */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, 10); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -5184,11 +5418,15 @@ static target_ulong disas_insn(DisasCont tcg_gen_helper_0_0(helper_fpop); break; case 0x3d: /* fildll */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_QUAD); tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_helper_0_1(helper_fildll_ST0, cpu_tmp1_i64); break; case 0x3f: /* fistpll */ + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_QUAD); tcg_gen_helper_1_0(helper_fistll_ST0, cpu_tmp1_i64); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); @@ -5734,6 +5972,8 @@ static target_ulong disas_insn(DisasCont tcg_const_i32(val)); } else { gen_stack_A0(s); + if (seg_checks) + gen_check_segmented_access(R_SS, ACC_READ | (OT_LONG + s->dflag)); /* pop offset */ gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); if (s->dflag == 0) @@ -5870,7 +6110,9 @@ static target_ulong disas_insn(DisasCont mod = (modrm >> 6) & 3; t0 = tcg_temp_local_new(TCG_TYPE_TL); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); } else { rm = (modrm & 7) | REX_B(s); @@ -6012,7 +6254,9 @@ static target_ulong disas_insn(DisasCont rm = (modrm & 7) | REX_B(s); if (mod != 3) { s->rip_offset = 1; - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); @@ -6043,7 +6287,9 @@ static target_ulong disas_insn(DisasCont rm = (modrm & 7) | REX_B(s); gen_op_mov_TN_reg(OT_LONG, 1, reg); if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); /* specific case: we need to add a displacement */ gen_exts(ot, cpu_T[1]); tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot); @@ -6272,7 +6518,9 @@ static target_ulong disas_insn(DisasCont if (mod == 3) goto illegal_op; gen_op_mov_TN_reg(ot, 0, reg); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | (ot + 1)); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); if (ot == OT_WORD) @@ -6553,7 +6801,10 @@ static target_ulong disas_insn(DisasCont if (mod == 3) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, + CODE64(s) ? 10 : 6); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_T0_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); @@ -6602,7 +6853,10 @@ static target_ulong disas_insn(DisasCont } } else { /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, + CODE64(s) ? 10 : 6); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_T0_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); @@ -6708,7 +6962,10 @@ static target_ulong disas_insn(DisasCont } else { gen_svm_check_intercept(s, pc_start, op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE); - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_READ, + CODE64(s) ? 10 : 6); gen_op_ld_T1_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index); @@ -6824,7 +7081,9 @@ static target_ulong disas_insn(DisasCont mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | ot); gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); } else { gen_op_mov_v_reg(ot, t0, rm); @@ -7013,7 +7272,9 @@ static target_ulong disas_insn(DisasCont gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_WRITE, 512); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -7028,7 +7289,9 @@ static target_ulong disas_insn(DisasCont gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access_size(seg_reg, ACC_READ, 512); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); @@ -7044,11 +7307,15 @@ static target_ulong disas_insn(DisasCont if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) || mod == 3) goto illegal_op; - gen_lea_modrm(s, modrm, ®_addr, &offset_addr); + seg_reg = gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if (op == 2) { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_READ | OT_LONG); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); } else { + if (seg_checks && seg_reg >= 0) + gen_check_segmented_access(seg_reg, ACC_WRITE | OT_LONG); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); gen_op_st_T0_A0(OT_LONG + s->mem_index); } Index: b/vl.c =================================================================== --- a/vl.c +++ b/vl.c @@ -198,6 +198,7 @@ CharDriverState *serial_hds[MAX_SERIAL_P CharDriverState *parallel_hds[MAX_PARALLEL_PORTS]; #ifdef TARGET_I386 int win2k_install_hack = 0; +int seg_checks = 0; #endif int usb_enabled = 0; static VLANState *first_vlan; @@ -7434,6 +7435,7 @@ static void help(int exitcode) "-std-vga simulate a standard VGA card with VESA Bochs Extensions\n" " (default is CL-GD5446 PCI VGA)\n" "-no-acpi disable ACPI\n" + "-seg-checks enable runtime segment access checks\n" #endif #ifdef CONFIG_CURSES "-curses use a curses/ncurses interface instead of SDL\n" @@ -7495,6 +7497,7 @@ enum { QEMU_OPTION_snapshot, #ifdef TARGET_I386 QEMU_OPTION_no_fd_bootchk, + QEMU_OPTION_seg_checks, #endif QEMU_OPTION_m, QEMU_OPTION_nographic, @@ -7588,6 +7591,7 @@ const QEMUOption qemu_options[] = { { "snapshot", 0, QEMU_OPTION_snapshot }, #ifdef TARGET_I386 { "no-fd-bootchk", 0, QEMU_OPTION_no_fd_bootchk }, + { "seg-checks", 0, QEMU_OPTION_seg_checks }, #endif { "m", HAS_ARG, QEMU_OPTION_m }, { "nographic", 0, QEMU_OPTION_nographic }, @@ -8383,6 +8387,9 @@ int main(int argc, char **argv) case QEMU_OPTION_kernel_kqemu: kqemu_allowed = 2; break; + case QEMU_OPTION_seg_checks: + seg_checks = 1; + break; #endif case QEMU_OPTION_usb: usb_enabled = 1; Index: b/target-i386/helper.h =================================================================== --- a/target-i386/helper.h +++ b/target-i386/helper.h @@ -218,4 +218,9 @@ DEF_HELPER(target_ulong, helper_rclq, (t DEF_HELPER(target_ulong, helper_rcrq, (target_ulong t0, target_ulong t1)) #endif +DEF_HELPER(void, helper_check_segmented_access, (target_ulong a0, + int seg_reg, int type)) +DEF_HELPER(void, helper_check_segmented_access_size, (target_ulong a0, + int seg_reg, int type, int size)) + #undef DEF_HELPER Index: b/target-i386/op_helper.c =================================================================== --- a/target-i386/op_helper.c +++ b/target-i386/op_helper.c @@ -2231,6 +2231,65 @@ void helper_load_seg(int seg_reg, int se } } +static void log_seg_violation(target_ulong a0, int seg_reg, int type, int size) +{ + static const char *seg_name[] = { "ES", "CS", "SS", "DS", "FS", "GS" }; + + fprintf(logfile, "segment violation: %s %d byte via %s from %x:" + TARGET_FMT_lx ", base " TARGET_FMT_lx ", limit %x, " + "flags %x\n", + (type & ACC_WRITE) ? "write" : "read", size, seg_name[seg_reg], + env->segs[seg_reg].selector, a0 - env->segs[seg_reg].base, + env->segs[seg_reg].base, env->segs[seg_reg].limit, + env->segs[seg_reg].flags); +} + +void helper_check_segmented_access_size(target_ulong a0, int seg_reg, + int type, int size) +{ + int seg_type = env->segs[seg_reg].flags & 0x1F00; + target_long addr = a0 - env->segs[seg_reg].base; + + if (!(env->cr[0] & CR0_PE_MASK) || (env->hflags & HF_CS64_MASK)) + return; + + if (!(seg_type & 0x1000)) { + if (loglevel & CPU_LOG_INT) + log_seg_violation(a0, seg_reg, type, size); + raise_exception(EXCP0D_GPF); + } + + if (type & ACC_WRITE) { + if (seg_type & 0x0800 || !(seg_type & 0x0200)) { + if (loglevel & CPU_LOG_INT) + log_seg_violation(a0, seg_reg, type, size); + raise_exception(EXCP0D_GPF); + } + } else { + if (seg_type & 0x0800 && !(seg_type & 0x0200)) { + if (loglevel & CPU_LOG_INT) + log_seg_violation(a0, seg_reg, type, size); + raise_exception(EXCP0D_GPF); + } + } + + if (((seg_type & 0x0C00) == 0x0400 + && (addr > (((env->segs[seg_reg].flags & 0x400000) ? + 0xFFFFFFFF : 0xFFFF) - size) + || addr <= env->segs[seg_reg].limit)) + || addr > env->segs[seg_reg].limit - size) { + if (loglevel & CPU_LOG_INT) + log_seg_violation(a0, seg_reg, type, size); + raise_exception(EXCP0D_GPF); + } +} + +void helper_check_segmented_access(target_ulong a0, int seg_reg, int type) +{ + helper_check_segmented_access_size(a0, seg_reg, type, + (1 << (type & 0xFF)) - 1); +} + /* protected mode jump */ void helper_ljmp_protected(int new_cs, target_ulong new_eip, int next_eip_addend)