linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Brian Gerst <brgerst@gmail.com>
To: Andy Lutomirski <luto@amacapital.net>
Cc: "Andy Lutomirski" <luto@kernel.org>,
	"the arch/x86 maintainers" <x86@kernel.org>,
	"Linux Kernel Mailing List" <linux-kernel@vger.kernel.org>,
	"Borislav Petkov" <bp@alien8.de>,
	"Frédéric Weisbecker" <fweisbec@gmail.com>,
	"Denys Vlasenko" <dvlasenk@redhat.com>,
	"Linus Torvalds" <torvalds@linux-foundation.org>
Subject: [PATCH] x86/entry/64: Remove duplicate syscall table for fast path
Date: Wed,  9 Dec 2015 08:02:53 -0500	[thread overview]
Message-ID: <1449666173-15366-1-git-send-email-brgerst@gmail.com> (raw)
In-Reply-To: <CALCETrWqQiP7ST=Rr00gaYYF6Yr+49_gV9dWpZOp=wMvsy=yog@mail.gmail.com>

Instead of using a duplicate syscall table for the fast path, create stubs for
the syscalls that need pt_regs that save the extra registers if a flag for the
slow path is not set.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
To: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: the arch/x86 maintainers <x86@kernel.org>
Cc: Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
---

Applies on top of Andy's syscall cleanup series.


 arch/x86/entry/calling.h           | 32 ++++++++++++++++----------------
 arch/x86/entry/common.c            |  4 ++++
 arch/x86/entry/entry_64.S          | 36 +++++++++++++++++++++++++++++-------
 arch/x86/entry/syscall_64.c        | 25 +++++--------------------
 arch/x86/include/asm/thread_info.h |  1 +
 5 files changed, 55 insertions(+), 43 deletions(-)

diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index e32206e..7c58bd2 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -129,22 +129,22 @@ For 32-bit we have the following conventions - kernel is built with
 	SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
 	.endm
 
-	.macro SAVE_EXTRA_REGS offset=0
-	movq %r15, 0*8+\offset(%rsp)
-	movq %r14, 1*8+\offset(%rsp)
-	movq %r13, 2*8+\offset(%rsp)
-	movq %r12, 3*8+\offset(%rsp)
-	movq %rbp, 4*8+\offset(%rsp)
-	movq %rbx, 5*8+\offset(%rsp)
-	.endm
-
-	.macro RESTORE_EXTRA_REGS offset=0
-	movq 0*8+\offset(%rsp), %r15
-	movq 1*8+\offset(%rsp), %r14
-	movq 2*8+\offset(%rsp), %r13
-	movq 3*8+\offset(%rsp), %r12
-	movq 4*8+\offset(%rsp), %rbp
-	movq 5*8+\offset(%rsp), %rbx
+	.macro SAVE_EXTRA_REGS offset=0 base=rsp
+	movq %r15, 0*8+\offset(%\base)
+	movq %r14, 1*8+\offset(%\base)
+	movq %r13, 2*8+\offset(%\base)
+	movq %r12, 3*8+\offset(%\base)
+	movq %rbp, 4*8+\offset(%\base)
+	movq %rbx, 5*8+\offset(%\base)
+	.endm
+
+	.macro RESTORE_EXTRA_REGS offset=0 base=rsp
+	movq 0*8+\offset(%\base), %r15
+	movq 1*8+\offset(%\base), %r14
+	movq 2*8+\offset(%\base), %r13
+	movq 3*8+\offset(%\base), %r12
+	movq 4*8+\offset(%\base), %rbp
+	movq 5*8+\offset(%\base), %rbx
 	.endm
 
 	.macro ZERO_EXTRA_REGS
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 016ac47..4381aca 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -342,6 +342,8 @@ __visible void do_syscall_64(struct pt_regs *regs)
 	struct thread_info *ti = pt_regs_to_thread_info(regs);
 	unsigned long nr = regs->orig_ax;
 
+	ti->status |= TS_SLOWPATH;
+
 	enter_from_user_mode();
 	local_irq_enable();
 
@@ -360,6 +362,8 @@ __visible void do_syscall_64(struct pt_regs *regs)
 	}
 
 	syscall_return_slowpath(regs);
+
+	ti->status &= ~TS_SLOWPATH;
 }
 #endif
 
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1ab5362..5852ec6 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -188,7 +188,7 @@ entry_SYSCALL_64_fastpath:
 #endif
 	ja	1f				/* return -ENOSYS (already in pt_regs->ax) */
 	movq	%r10, %rcx
-	call	*sys_call_table_fastpath_64(, %rax, 8)
+	call	*sys_call_table(, %rax, 8)
 	movq	%rax, RAX(%rsp)
 1:
 
@@ -306,15 +306,37 @@ END(entry_SYSCALL_64)
 
 ENTRY(stub_ptregs_64)
 	/*
-	 * Syscalls marked as needing ptregs that go through the fast path
-	 * land here.  We transfer to the slow path.
+	 * Syscalls marked as needing ptregs land here.
+	 * If we are on the fast path, we need to save the extra regs.
+	 * If we are on the slow path, the extra regs are already saved.
 	 */
-	DISABLE_INTERRUPTS(CLBR_NONE)
-	TRACE_IRQS_OFF
-	addq	$8, %rsp
-	jmp	entry_SYSCALL64_slow_path
+	movq	PER_CPU_VAR(cpu_current_top_of_stack), %r10
+	testl	$TS_SLOWPATH, ASM_THREAD_INFO(TI_status, %r10, 0)
+	jnz	1f
+	subq	$SIZEOF_PTREGS, %r10
+	SAVE_EXTRA_REGS base=r10
+	movq	%r10, %rbx
+	call	*%rax
+	movq	%rbx, %r10
+	RESTORE_EXTRA_REGS base=r10
+	ret
+1:
+	jmp	*%rax
 END(stub_ptregs_64)
 
+.macro ptregs_stub func
+ENTRY(ptregs_\func)
+	leaq	\func(%rip), %rax
+	jmp	stub_ptregs_64
+END(ptregs_\func)
+.endm
+
+#define __SYSCALL_64_QUAL_(sym)
+#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
+
+#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
+#include <asm/syscalls_64.h>
+
 /*
  * A newly forked process directly context switches into this address.
  *
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index 601745c..9dbc5ab 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -6,11 +6,14 @@
 #include <asm/asm-offsets.h>
 #include <asm/syscall.h>
 
-#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_64_QUAL_(sym) sym
+#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym
+
+#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 #include <asm/syscalls_64.h>
 #undef __SYSCALL_64
 
-#define __SYSCALL_64(nr, sym, qual) [nr] = sym,
+#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym),
 
 extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
@@ -22,21 +25,3 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
 	[0 ... __NR_syscall_max] = &sys_ni_syscall,
 #include <asm/syscalls_64.h>
 };
-
-#undef __SYSCALL_64
-
-extern long stub_ptregs_64(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
-
-#define __SYSCALL_64_QUAL_(nr, sym) [nr] = sym,
-#define __SYSCALL_64_QUAL_ptregs(nr, sym) [nr] = stub_ptregs_64,
-
-#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(nr, sym)
-
-asmlinkage const sys_call_ptr_t sys_call_table_fastpath_64[__NR_syscall_max+1] = {
-	/*
-	 * Smells like a compiler bug -- it doesn't work
-	 * when the & below is removed.
-	 */
-	[0 ... __NR_syscall_max] = &sys_ni_syscall,
-#include <asm/syscalls_64.h>
-};
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index ae210d6..358e3a9 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -229,6 +229,7 @@ static inline unsigned long current_stack_pointer(void)
  * ever touches our thread-synchronous status, so we don't
  * have to worry about atomic accesses.
  */
+#define TS_SLOWPATH		0x0001	/* syscall slowpath (64BIT) */
 #define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
 #define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal() */
 
-- 
2.5.0


  parent reply	other threads:[~2015-12-09 13:02 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-07 21:51 [PATCH 00/12] x86: Rewrite 64-bit syscall code Andy Lutomirski
2015-12-07 21:51 ` [PATCH 01/12] selftests/x86: Extend Makefile to allow 64-bit only tests Andy Lutomirski
2015-12-08  9:34   ` Borislav Petkov
2015-12-09 18:55     ` Andy Lutomirski
2015-12-09 19:11   ` Shuah Khan
2015-12-09 19:22     ` Andy Lutomirski
2015-12-09 19:58       ` Shuah Khan
2015-12-07 21:51 ` [PATCH 02/12] selftests/x86: Add check_initial_reg_state Andy Lutomirski
2015-12-08  9:54   ` Borislav Petkov
2015-12-09 18:56     ` Andy Lutomirski
2015-12-09 19:09       ` Borislav Petkov
2015-12-09 19:20         ` Andy Lutomirski
2015-12-09 19:28           ` Borislav Petkov
2015-12-07 21:51 ` [PATCH 03/12] x86/syscalls: Refactor syscalltbl.sh Andy Lutomirski
2015-12-07 21:51 ` [PATCH 04/12] x86/syscalls: Remove __SYSCALL_COMMON and __SYSCALL_X32 Andy Lutomirski
2015-12-07 21:51 ` [PATCH 05/12] x86/syscalls: Move compat syscall entry handling into syscalltbl.sh Andy Lutomirski
2015-12-07 21:51 ` [PATCH 06/12] x86/syscalls: Add syscall entry qualifiers Andy Lutomirski
2015-12-07 21:51 ` [PATCH 07/12] x86/entry/64: Always run ptregs-using syscalls on the slow path Andy Lutomirski
2015-12-08  0:50   ` Brian Gerst
2015-12-08  0:54     ` Brian Gerst
2015-12-08  1:12       ` Andy Lutomirski
2015-12-08 13:07         ` Brian Gerst
2015-12-08 18:56           ` Ingo Molnar
2015-12-08 21:51             ` Andy Lutomirski
2015-12-09  4:43   ` Brian Gerst
2015-12-09  5:45     ` Andy Lutomirski
2015-12-09  6:21       ` Andy Lutomirski
2015-12-09 12:52         ` Brian Gerst
2015-12-09 13:02         ` Brian Gerst [this message]
2015-12-09 18:53           ` [PATCH] x86/entry/64: Remove duplicate syscall table for fast path Andy Lutomirski
2015-12-09 21:08             ` Brian Gerst
2015-12-09 21:15               ` Andy Lutomirski
2015-12-09 23:50                 ` Andy Lutomirski
2015-12-10  5:42                   ` Brian Gerst
2015-12-10  5:54                     ` Andy Lutomirski
2015-12-09 19:30           ` Andy Lutomirski
2015-12-07 21:51 ` [PATCH 08/12] x86/entry/64: Call all native slow-path syscalls with full pt-regs Andy Lutomirski
2015-12-07 21:51 ` [PATCH 09/12] x86/entry/64: Stop using int_ret_from_sys_call in ret_from_fork Andy Lutomirski
2015-12-07 21:51 ` [PATCH 10/12] x86/entry/64: Migrate the 64-bit syscall slow path to C Andy Lutomirski
2015-12-07 21:51 ` [PATCH 11/12] x86/entry/32: Change INT80 to be an interrupt gate Andy Lutomirski
2016-04-01  1:45   ` Rusty Russell
2016-04-01  7:40     ` [tip:x86/urgent] lguest, x86/entry/32: Fix handling of guest syscalls using interrupt gates tip-bot for Rusty Russell
2015-12-07 21:51 ` [PATCH 12/12] x86/entry: Do enter_from_user_mode with IRQs off Andy Lutomirski
2015-12-07 22:55 ` [PATCH 00/12] x86: Rewrite 64-bit syscall code Andy Lutomirski
2015-12-08  4:42   ` Ingo Molnar
2015-12-08  5:42     ` Andy Lutomirski
2015-12-08  7:00       ` Ingo Molnar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1449666173-15366-1-git-send-email-brgerst@gmail.com \
    --to=brgerst@gmail.com \
    --cc=bp@alien8.de \
    --cc=dvlasenk@redhat.com \
    --cc=fweisbec@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@amacapital.net \
    --cc=luto@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).