From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wr0-f198.google.com (mail-wr0-f198.google.com [209.85.128.198]) by kanga.kvack.org (Postfix) with ESMTP id B82136B0010 for ; Fri, 9 Feb 2018 04:26:00 -0500 (EST) Received: by mail-wr0-f198.google.com with SMTP id 17so4204704wrm.10 for ; Fri, 09 Feb 2018 01:26:00 -0800 (PST) Received: from theia.8bytes.org (8bytes.org. [81.169.241.247]) by mx.google.com with ESMTPS id l16si1610515edd.55.2018.02.09.01.25.59 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 09 Feb 2018 01:25:59 -0800 (PST) From: Joerg Roedel Subject: [PATCH 11/31] x86/entry/32: Add PTI cr3 switches to NMI handler code Date: Fri, 9 Feb 2018 10:25:20 +0100 Message-Id: <1518168340-9392-12-git-send-email-joro@8bytes.org> In-Reply-To: <1518168340-9392-1-git-send-email-joro@8bytes.org> References: <1518168340-9392-1-git-send-email-joro@8bytes.org> Sender: owner-linux-mm@kvack.org List-ID: To: Thomas Gleixner , Ingo Molnar , "H . Peter Anvin" Cc: x86@kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Linus Torvalds , Andy Lutomirski , Dave Hansen , Josh Poimboeuf , Juergen Gross , Peter Zijlstra , Borislav Petkov , Jiri Kosina , Boris Ostrovsky , Brian Gerst , David Laight , Denys Vlasenko , Eduardo Valentin , Greg KH , Will Deacon , aliguori@amazon.com, daniel.gruss@iaik.tugraz.at, hughd@google.com, keescook@google.com, Andrea Arcangeli , Waiman Long , Pavel Machek , jroedel@suse.de, joro@8bytes.org From: Joerg Roedel The NMI handler is special, as it needs to leave with the same cr3 as it was entered with. We need to do this because we could enter the NMI handler from kernel code with user-cr3 already loaded. Signed-off-by: Joerg Roedel --- arch/x86/entry/entry_32.S | 52 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 6 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index be1d814..9693485 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -77,6 +77,8 @@ #endif .endm +#define PTI_SWITCH_MASK (1 << PAGE_SHIFT) + /* * User gs save/restore * @@ -167,8 +169,30 @@ .endm -.macro SAVE_ALL_NMI +.macro SAVE_ALL_NMI cr3_reg:req SAVE_ALL + + /* + * Now switch the CR3 when PTI is enabled. + * + * We can enter with either user or kernel cr3, the code will + * store the old cr3 in \cr3_reg and switches to the kernel cr3 + * if necessary. + */ + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + + movl %cr3, \cr3_reg + testl $PTI_SWITCH_MASK, \cr3_reg + jz .Lend_\@ /* Already on kernel cr3 */ + + /* On user cr3 - write new kernel cr3 */ + andl $(~PTI_SWITCH_MASK), \cr3_reg + movl \cr3_reg, %cr3 + + /* Restore user cr3 value */ + orl $PTI_SWITCH_MASK, \cr3_reg + +.Lend_\@: .endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The @@ -227,13 +251,29 @@ RESTORE_SKIP_SEGMENTS \pop .endm -.macro RESTORE_ALL_NMI pop=0 +.macro RESTORE_ALL_NMI cr3_reg:req pop=0 /* * Restore segments - might cause exceptions when loading * user-space segments */ RESTORE_SEGMENTS + /* + * Now switch the CR3 when PTI is enabled. + * + * We enter with kernel cr3 and switch the cr3 to the value + * stored on \cr3_reg, which is either a user or a kernel cr3. + */ + ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI + + testl $PTI_SWITCH_MASK, \cr3_reg + jz .Lswitched_\@ + + /* User cr3 in \cr3_reg - write it to hardware cr3 */ + movl \cr3_reg, %cr3 + +.Lswitched_\@: + /* Restore integer registers and unwind stack to iret frame */ RESTORE_INT_REGS RESTORE_SKIP_SEGMENTS \pop @@ -1142,7 +1182,7 @@ ENTRY(nmi) #endif pushl %eax # pt_regs->orig_ax - SAVE_ALL_NMI + SAVE_ALL_NMI cr3_reg=%edi ENCODE_FRAME_POINTER xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer @@ -1170,7 +1210,7 @@ ENTRY(nmi) .Lnmi_return: CHECK_AND_APPLY_ESPFIX - RESTORE_ALL_NMI pop=4 + RESTORE_ALL_NMI cr3_reg=%edi pop=4 jmp .Lirq_return #ifdef CONFIG_X86_ESPFIX32 @@ -1186,12 +1226,12 @@ ENTRY(nmi) pushl 16(%esp) .endr pushl %eax - SAVE_ALL_NMI + SAVE_ALL_NMI cr3_reg=%edi ENCODE_FRAME_POINTER FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx, %edx # zero error code call do_nmi - RESTORE_ALL_NMI + RESTORE_ALL_NMI cr3_reg=%edi lss 12+4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif -- 2.7.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org