From: Ingo Molnar <mingo@kernel.org>
To: linux-kernel@vger.kernel.org
Cc: Dave Hansen <dave.hansen@linux.intel.com>,
Andy Lutomirski <luto@amacapital.net>,
Thomas Gleixner <tglx@linutronix.de>,
"H . Peter Anvin" <hpa@zytor.com>,
Peter Zijlstra <peterz@infradead.org>,
Borislav Petkov <bp@alien8.de>,
Linus Torvalds <torvalds@linux-foundation.org>
Subject: [PATCH 10/43] x86/entry: Remap the TSS into the cpu entry area
Date: Fri, 24 Nov 2017 10:14:15 +0100 [thread overview]
Message-ID: <20171124091448.7649-11-mingo@kernel.org> (raw)
In-Reply-To: <20171124091448.7649-1-mingo@kernel.org>
From: Andy Lutomirski <luto@kernel.org>
This has a secondary purpose: it puts the entry stack into a region
with a well-controlled layout. A subsequent patch will take
advantage of this to streamline the SYSCALL entry code to be able to
find it more easily.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/cdcba7e1e82122461b3ca36bb3ef6713ba605e35.1511497875.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/x86/entry/entry_32.S | 6 ++++--
arch/x86/include/asm/fixmap.h | 7 +++++++
arch/x86/kernel/asm-offsets.c | 3 +++
arch/x86/kernel/cpu/common.c | 38 ++++++++++++++++++++++++++++++++------
arch/x86/kernel/dumpstack.c | 3 ++-
arch/x86/power/cpu.c | 11 ++++++-----
6 files changed, 54 insertions(+), 14 deletions(-)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4838037f97f6..0ab316c46806 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -941,7 +941,8 @@ ENTRY(debug)
movl %esp, %eax # pt_regs pointer
/* Are we currently on the SYSENTER stack? */
- PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+ movl PER_CPU_VAR(cpu_entry_area), %ecx
+ addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
cmpl $SIZEOF_SYSENTER_stack, %ecx
jb .Ldebug_from_sysenter_stack
@@ -984,7 +985,8 @@ ENTRY(nmi)
movl %esp, %eax # pt_regs pointer
/* Are we currently on the SYSENTER stack? */
- PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
+ movl PER_CPU_VAR(cpu_entry_area), %ecx
+ addl $CPU_ENTRY_AREA_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
cmpl $SIZEOF_SYSENTER_stack, %ecx
jb .Lnmi_from_sysenter_stack
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0f4c92f02968..3a42da14c2cb 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -51,6 +51,13 @@ extern unsigned long __FIXADDR_TOP;
*/
struct cpu_entry_area {
char gdt[PAGE_SIZE];
+
+ /*
+ * The GDT is just below cpu_tss and thus serves (on x86_64) as a
+ * a read-only guard page for the SYSENTER stack at the bottom
+ * of the TSS region.
+ */
+ struct tss_struct tss;
};
#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index b275863128eb..55858b277cf6 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -98,4 +98,7 @@ void common(void) {
OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
/* Size of SYSENTER_stack */
DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
+
+ /* Layout info for cpu_entry_area */
+ OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d173f6013467..c67742df569a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -490,6 +490,19 @@ void load_percpu_segment(int cpu)
load_stack_canary_segment();
}
+static void set_percpu_fixmap_pages(int fixmap_index, void *ptr, int pages, pgprot_t prot)
+{
+ int i;
+
+ for (i = 0; i < pages; i++)
+ __set_fixmap(fixmap_index - i, per_cpu_ptr_to_phys(ptr + i*PAGE_SIZE), prot);
+}
+
+#ifdef CONFIG_X86_32
+/* The 32-bit entry code needs to find cpu_entry_area. */
+DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+#endif
+
/* Setup the fixmap mappings only once per-processor */
static inline void setup_cpu_entry_area(int cpu)
{
@@ -531,7 +544,15 @@ static inline void setup_cpu_entry_area(int cpu)
*/
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
+ BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+ set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
+ &per_cpu(cpu_tss, cpu),
+ sizeof(struct tss_struct) / PAGE_SIZE,
+ PAGE_KERNEL);
+#ifdef CONFIG_X86_32
+ this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu));
+#endif
}
/* Load the original GDT from the per-cpu structure */
@@ -1282,7 +1303,8 @@ void enable_sep_cpu(void)
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
wrmsr(MSR_IA32_SYSENTER_ESP,
- (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
+ (unsigned long)&get_cpu_entry_area(cpu)->tss +
+ offsetofend(struct tss_struct, SYSENTER_stack),
0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
@@ -1395,6 +1417,8 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
/* May not be marked __init: used by software suspend */
void syscall_init(void)
{
+ int cpu = smp_processor_id();
+
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
@@ -1408,7 +1432,7 @@ void syscall_init(void)
*/
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
- (unsigned long)this_cpu_ptr(&cpu_tss) +
+ (unsigned long)&get_cpu_entry_area(cpu)->tss +
offsetofend(struct tss_struct, SYSENTER_stack));
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
#else
@@ -1618,11 +1642,13 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, me);
+ setup_cpu_entry_area(cpu);
+
/*
* Initialize the TSS. Don't bother initializing sp0, as the initial
* task never enters user mode.
*/
- set_tss_desc(cpu, &t->x86_tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();
load_mm_ldt(&init_mm);
@@ -1635,7 +1661,6 @@ void cpu_init(void)
if (is_uv_system())
uv_cpu_init();
- setup_cpu_entry_area(cpu);
load_fixmap_gdt(cpu);
}
@@ -1676,11 +1701,13 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, curr);
+ setup_cpu_entry_area(cpu);
+
/*
* Initialize the TSS. Don't bother initializing sp0, as the initial
* task never enters user mode.
*/
- set_tss_desc(cpu, &t->x86_tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc();
load_mm_ldt(&init_mm);
@@ -1697,7 +1724,6 @@ void cpu_init(void)
fpu__init_cpu();
- setup_cpu_entry_area(cpu);
load_fixmap_gdt(cpu);
}
#endif
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index a8aa70c05489..bb61919c9335 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -45,7 +45,8 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
bool in_sysenter_stack(unsigned long *stack, struct stack_info *info)
{
- struct tss_struct *tss = this_cpu_ptr(&cpu_tss);
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &get_cpu_entry_area(cpu)->tss;
/* Treat the canary as part of the stack for unwinding purposes. */
void *begin = &tss->SYSENTER_stack_canary;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 50593e138281..04d5157fe7f8 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -160,18 +160,19 @@ static void do_fpu_end(void)
static void fix_processor_context(void)
{
int cpu = smp_processor_id();
- struct tss_struct *t = &per_cpu(cpu_tss, cpu);
#ifdef CONFIG_X86_64
struct desc_struct *desc = get_cpu_gdt_rw(cpu);
tss_desc tss;
#endif
/*
- * This just modifies memory; should not be necessary. But... This is
- * necessary, because 386 hardware has concept of busy TSS or some
- * similar stupidity.
+ * We need to reload TR, which requires that we change the
+ * GDT entry to indicate "available" first.
+ *
+ * XXX: This could probably all be replaced by a call to
+ * force_reload_TR().
*/
- set_tss_desc(cpu, &t->x86_tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
#ifdef CONFIG_X86_64
memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
--
2.14.1
next prev parent reply other threads:[~2017-11-24 9:26 UTC|newest]
Thread overview: 78+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-11-24 9:14 [PATCH 00/43] x86 entry-stack and Kaiser series, 2017/11/24 version Ingo Molnar
2017-11-24 9:14 ` [PATCH 01/43] x86/decoder: Add new TEST instruction pattern Ingo Molnar
2017-11-24 10:38 ` Borislav Petkov
2017-12-02 7:39 ` Robert Elliott (Persistent Memory)
2017-11-24 9:14 ` [PATCH 02/43] x86/entry/64: Allocate and enable the SYSENTER stack Ingo Molnar
2017-11-24 9:14 ` [PATCH 03/43] x86/dumpstack: Add get_stack_info() support for " Ingo Molnar
2017-11-24 9:14 ` [PATCH 04/43] x86/gdt: Put per-cpu GDT remaps in ascending order Ingo Molnar
2017-11-24 9:14 ` [PATCH 05/43] x86/fixmap: Generalize the GDT fixmap mechanism Ingo Molnar
2017-11-24 11:00 ` Borislav Petkov
2017-11-24 9:14 ` [PATCH 06/43] x86/kasan/64: Teach KASAN about the cpu_entry_area Ingo Molnar
2017-11-24 9:14 ` [PATCH 07/43] x86/entry: Fix assumptions that the HW TSS is at the beginning of cpu_tss Ingo Molnar
2017-11-24 9:14 ` [PATCH 08/43] x86/dumpstack: Handle stack overflow on all stacks Ingo Molnar
2017-11-24 9:14 ` [PATCH 09/43] x86/entry: Move SYSENTER_stack to the beginning of struct tss_struct Ingo Molnar
2017-11-24 11:44 ` Borislav Petkov
2017-11-24 9:14 ` Ingo Molnar [this message]
2017-11-24 9:14 ` [PATCH 11/43] x86/entry/64: Separate cpu_current_top_of_stack from TSS.sp0 Ingo Molnar
2017-11-24 14:19 ` Borislav Petkov
2017-11-24 9:14 ` [PATCH 12/43] x86/espfix/64: Stop assuming that pt_regs is on the entry stack Ingo Molnar
2017-11-24 9:14 ` [PATCH 13/43] x86/entry/64: Use a percpu trampoline stack for IDT entries Ingo Molnar
2017-11-24 11:27 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 14/43] x86/entry/64: Return to userspace from the trampoline stack Ingo Molnar
2017-11-24 13:46 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 15/43] x86/entry/64: Create a percpu SYSCALL entry trampoline Ingo Molnar
2017-11-24 13:52 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 16/43] x86/irq: Remove an old outdated comment about context tracking races Ingo Molnar
2017-11-24 13:53 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 17/43] x86/irq/64: Print the offending IP in the stack overflow warning Ingo Molnar
2017-11-24 14:22 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 18/43] x86/entry/64: Move the IST stacks into cpu_entry_area Ingo Molnar
2017-11-24 14:23 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 19/43] x86/entry/64: Remove the SYSENTER stack canary Ingo Molnar
2017-11-24 14:23 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 20/43] x86/entry: Clean up SYSENTER_stack code Ingo Molnar
2017-11-24 14:24 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 21/43] x86/mm/kaiser: Disable global pages by default with KAISER Ingo Molnar
2017-11-24 9:14 ` [PATCH 22/43] x86/mm/kaiser: Prepare assembly for entry/exit CR3 switching Ingo Molnar
2017-11-24 12:05 ` Peter Zijlstra
2017-11-24 12:17 ` Ingo Molnar
2017-11-24 12:45 ` Peter Zijlstra
2017-11-24 13:04 ` Thomas Gleixner
2017-11-24 9:14 ` [PATCH 23/43] x86/mm/kaiser: Introduce user-mapped per-cpu areas Ingo Molnar
2017-11-24 9:14 ` [PATCH 24/43] x86/mm/kaiser: Mark per-cpu data structures required for entry/exit Ingo Molnar
2017-11-24 9:14 ` [PATCH 25/43] x86/mm/kaiser: Unmap kernel from userspace page tables (core patch) Ingo Molnar
2017-11-24 12:13 ` Peter Zijlstra
2017-11-24 13:46 ` Ingo Molnar
2017-11-24 12:16 ` Peter Zijlstra
2017-11-24 16:33 ` Dave Hansen
2017-11-26 15:13 ` Ingo Molnar
2017-11-24 13:30 ` Peter Zijlstra
2017-11-26 15:15 ` Ingo Molnar
2017-11-27 8:59 ` [PATCH] x86/mm/kaiser: Use the other page_table_lock pattern Peter Zijlstra
2017-11-27 8:59 ` [PATCH] mm: Unify page_table_lock allocation pattern Peter Zijlstra
2017-11-24 9:14 ` [PATCH 26/43] x86/mm/kaiser: Allow NX poison to be set in p4d/pgd Ingo Molnar
2017-11-24 9:14 ` [PATCH 27/43] x86/mm/kaiser: Make sure static PGDs are 8k in size Ingo Molnar
2017-11-24 9:14 ` [PATCH 28/43] x86/mm/kaiser: Map CPU entry area Ingo Molnar
2017-11-24 13:43 ` Peter Zijlstra
2017-11-24 9:14 ` [PATCH 29/43] x86/mm/kaiser: Map dynamically-allocated LDTs Ingo Molnar
2017-11-24 9:14 ` [PATCH 30/43] x86/mm/kaiser: Map espfix structures Ingo Molnar
2017-11-24 13:47 ` Peter Zijlstra
2017-11-24 16:17 ` Andy Lutomirski
2017-11-27 9:14 ` Peter Zijlstra
2017-11-27 15:35 ` Peter Zijlstra
2017-11-24 9:14 ` [PATCH 31/43] x86/mm/kaiser: Map entry stack variable Ingo Molnar
2017-11-24 9:14 ` [PATCH 32/43] x86/mm/kaiser: Map virtually-addressed performance monitoring buffers Ingo Molnar
2017-11-24 9:14 ` [PATCH 33/43] x86/mm: Move CR3 construction functions Ingo Molnar
2017-11-24 9:14 ` [PATCH 34/43] x86/mm: Remove hard-coded ASID limit checks Ingo Molnar
2017-11-24 9:14 ` [PATCH 35/43] x86/mm: Put mmu-to-h/w ASID translation in one place Ingo Molnar
2017-11-24 9:14 ` [PATCH 36/43] x86/mm: Allow flushing for future ASID switches Ingo Molnar
2017-11-24 9:14 ` [PATCH 37/43] x86/mm/kaiser: Use PCID feature to make user and kernel switches faster Ingo Molnar
2017-11-24 9:14 ` [PATCH 38/43] x86/mm/kaiser: Disable native VSYSCALL Ingo Molnar
2017-11-24 9:14 ` [PATCH 39/43] x86/mm/kaiser: Add debugfs file to turn KAISER on/off at runtime Ingo Molnar
2017-11-24 9:14 ` [PATCH 40/43] x86/mm/kaiser: Add a function to check for KAISER being enabled Ingo Molnar
2017-11-24 9:14 ` [PATCH 41/43] x86/mm/kaiser: Un-poison PGDs at runtime Ingo Molnar
2017-11-24 9:14 ` [PATCH 42/43] x86/mm/kaiser: Allow KAISER to be enabled/disabled " Ingo Molnar
2017-11-24 9:14 ` [PATCH 43/43] x86/mm/kaiser: Add Kconfig Ingo Molnar
2017-11-24 13:55 ` [PATCH 00/43] x86 entry-stack and Kaiser series, 2017/11/24 version Ingo Molnar
2017-11-24 15:23 ` Thomas Gleixner
2017-11-24 17:19 ` Ingo Molnar
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171124091448.7649-11-mingo@kernel.org \
--to=mingo@kernel.org \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=hpa@zytor.com \
--cc=linux-kernel@vger.kernel.org \
--cc=luto@amacapital.net \
--cc=peterz@infradead.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).