From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:42581) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1VF9UW-0003r9-7K for qemu-devel@nongnu.org; Thu, 29 Aug 2013 17:11:02 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1VF9UQ-0001x3-BT for qemu-devel@nongnu.org; Thu, 29 Aug 2013 17:10:56 -0400 Received: from mail-qc0-x230.google.com ([2607:f8b0:400d:c01::230]:39246) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1VF9UQ-0001ws-77 for qemu-devel@nongnu.org; Thu, 29 Aug 2013 17:10:50 -0400 Received: by mail-qc0-f176.google.com with SMTP id u20so468722qcx.35 for ; Thu, 29 Aug 2013 14:10:49 -0700 (PDT) Sender: Richard Henderson From: Richard Henderson Date: Thu, 29 Aug 2013 14:09:45 -0700 Message-Id: <1377810586-19931-18-git-send-email-rth@twiddle.net> In-Reply-To: <1377810586-19931-1-git-send-email-rth@twiddle.net> References: <1377810586-19931-1-git-send-email-rth@twiddle.net> Subject: [Qemu-devel] [PATCH v2 17/18] tcg-i386: Adjust tcg_out_tlb_load for x32 List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: aurelien@aurel32.net Signed-off-by: Richard Henderson --- tcg/i386/tcg-target.c | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index 247c9d2..cde134f 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -1085,33 +1085,46 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, const int addrlo = args[addrlo_idx]; const int r0 = TCG_REG_L0; const int r1 = TCG_REG_L1; - TCGType type = TCG_TYPE_I32; - int rexw = 0; + TCGType ttype = TCG_TYPE_I32; + TCGType htype = TCG_TYPE_I32; + int trexw = 0, hrexw = 0; - if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) { - type = TCG_TYPE_I64; - rexw = P_REXW; + if (TCG_TARGET_REG_BITS == 64) { + if (TARGET_LONG_BITS == 64) { + ttype = TCG_TYPE_I64; + trexw = P_REXW; + } + if (TCG_TYPE_PTR == TCG_TYPE_I64) { + htype = TCG_TYPE_I64; + hrexw = P_REXW; + } } - tcg_out_mov(s, type, r0, addrlo); - tcg_out_mov(s, type, r1, addrlo); + tcg_out_mov(s, htype, r0, addrlo); + tcg_out_mov(s, ttype, r1, addrlo); - tcg_out_shifti(s, SHIFT_SHR + rexw, r0, + tcg_out_shifti(s, SHIFT_SHR + hrexw, r0, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - tgen_arithi(s, ARITH_AND + rexw, r1, + tgen_arithi(s, ARITH_AND + trexw, r1, TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0); - tgen_arithi(s, ARITH_AND + rexw, r0, + tgen_arithi(s, ARITH_AND + hrexw, r0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); - tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r0, TCG_AREG0, r0, 0, + tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0, offsetof(CPUArchState, tlb_table[mem_index][0]) + which); /* cmp 0(r0), r1 */ - tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r1, r0, 0); + tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0); - tcg_out_mov(s, type, r1, addrlo); + /* Prepare for both the fast path add of the tlb addend, and the slow + path function argument setup. There are two cases worth note: + For 32-bit guest and x86_64 host, MOVL zero-extends the guest address + before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ + copies the entire guest address for the slow path, while truncation + for the 32-bit host happens with the fastpath ADDL below. */ + tcg_out_mov(s, ttype, r1, addrlo); /* jne slow_path */ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0); @@ -1131,7 +1144,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, /* TLB Hit. */ /* add addend(r0), r1 */ - tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r1, r0, + tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0, offsetof(CPUTLBEntry, addend) - which); } #elif defined(__x86_64__) && defined(__linux__) -- 1.8.1.4