From: "Christopher M. Riedl" <cmr@codefail.de>
To: linuxppc-dev@lists.ozlabs.org
Subject: [PATCH 3/8] powerpc: Mark functions called inside uaccess blocks w/ 'notrace'
Date: Thu, 15 Oct 2020 10:01:54 -0500 [thread overview]
Message-ID: <20201015150159.28933-4-cmr@codefail.de> (raw)
In-Reply-To: <20201015150159.28933-1-cmr@codefail.de>
Functions called between user_*_access_begin() and user_*_access_end()
should be either inlined or marked 'notrace' to prevent leaving
userspace access exposed. Mark any such functions relevant to signal
handling so that subsequent patches can call them inside uaccess blocks.
Signed-off-by: Christopher M. Riedl <cmr@codefail.de>
---
arch/powerpc/kernel/process.c | 20 ++++++++++----------
arch/powerpc/mm/mem.c | 4 ++--
2 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ba2c987b8403..bf5d9654bd2c 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -84,7 +84,7 @@ extern unsigned long _get_SP(void);
*/
bool tm_suspend_disabled __ro_after_init = false;
-static void check_if_tm_restore_required(struct task_struct *tsk)
+static void notrace check_if_tm_restore_required(struct task_struct *tsk)
{
/*
* If we are saving the current thread's registers, and the
@@ -151,7 +151,7 @@ void notrace __msr_check_and_clear(unsigned long bits)
EXPORT_SYMBOL(__msr_check_and_clear);
#ifdef CONFIG_PPC_FPU
-static void __giveup_fpu(struct task_struct *tsk)
+static void notrace __giveup_fpu(struct task_struct *tsk)
{
unsigned long msr;
@@ -163,7 +163,7 @@ static void __giveup_fpu(struct task_struct *tsk)
tsk->thread.regs->msr = msr;
}
-void giveup_fpu(struct task_struct *tsk)
+void notrace giveup_fpu(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
@@ -177,7 +177,7 @@ EXPORT_SYMBOL(giveup_fpu);
* Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk.
*/
-void flush_fp_to_thread(struct task_struct *tsk)
+void notrace flush_fp_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
/*
@@ -234,7 +234,7 @@ static inline void __giveup_fpu(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
-static void __giveup_altivec(struct task_struct *tsk)
+static void notrace __giveup_altivec(struct task_struct *tsk)
{
unsigned long msr;
@@ -246,7 +246,7 @@ static void __giveup_altivec(struct task_struct *tsk)
tsk->thread.regs->msr = msr;
}
-void giveup_altivec(struct task_struct *tsk)
+void notrace giveup_altivec(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(enable_kernel_altivec);
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
*/
-void flush_altivec_to_thread(struct task_struct *tsk)
+void notrace flush_altivec_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
@@ -300,7 +300,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
-static void __giveup_vsx(struct task_struct *tsk)
+static void notrace __giveup_vsx(struct task_struct *tsk)
{
unsigned long msr = tsk->thread.regs->msr;
@@ -317,7 +317,7 @@ static void __giveup_vsx(struct task_struct *tsk)
__giveup_altivec(tsk);
}
-static void giveup_vsx(struct task_struct *tsk)
+static void notrace giveup_vsx(struct task_struct *tsk)
{
check_if_tm_restore_required(tsk);
@@ -352,7 +352,7 @@ void enable_kernel_vsx(void)
}
EXPORT_SYMBOL(enable_kernel_vsx);
-void flush_vsx_to_thread(struct task_struct *tsk)
+void notrace flush_vsx_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index ddc32cc1b6cf..da2345a2abc6 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -378,7 +378,7 @@ static inline bool flush_coherent_icache(unsigned long addr)
* @start: the start address
* @stop: the stop address (exclusive)
*/
-static void invalidate_icache_range(unsigned long start, unsigned long stop)
+static void notrace invalidate_icache_range(unsigned long start, unsigned long stop)
{
unsigned long shift = l1_icache_shift();
unsigned long bytes = l1_icache_bytes();
@@ -402,7 +402,7 @@ static void invalidate_icache_range(unsigned long start, unsigned long stop)
* @start: the start address
* @stop: the stop address (exclusive)
*/
-void flush_icache_range(unsigned long start, unsigned long stop)
+void notrace flush_icache_range(unsigned long start, unsigned long stop)
{
if (flush_coherent_icache(start))
return;
--
2.28.0
next prev parent reply other threads:[~2020-10-15 15:57 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-15 15:01 [PATCH 0/8] Improve signal performance on PPC64 with KUAP Christopher M. Riedl
2020-10-15 15:01 ` [PATCH 1/8] powerpc/uaccess: Add unsafe_copy_from_user Christopher M. Riedl
2020-10-16 6:54 ` Christoph Hellwig
2020-10-16 13:18 ` Christophe Leroy
2020-10-16 13:17 ` Christophe Leroy
2020-10-20 3:00 ` Christopher M. Riedl
2020-10-15 15:01 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx,fpr}_from_user() Christopher M. Riedl
2020-10-16 13:48 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx, fpr}_from_user() Christophe Leroy
2020-10-20 2:01 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx,fpr}_from_user() Christopher M. Riedl
2021-02-06 16:32 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx, fpr}_from_user() Christophe Leroy
2021-02-06 17:39 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx,fpr}_from_user() Christopher M. Riedl
2021-02-07 10:12 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx, fpr}_from_user() Christophe Leroy
2021-02-08 17:14 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx,fpr}_from_user() Christopher M. Riedl
2021-02-08 17:18 ` [PATCH 2/8] powerpc/signal: Add unsafe_copy_{vsx, fpr}_from_user() Christophe Leroy
2020-10-15 15:01 ` Christopher M. Riedl [this message]
2020-10-16 6:56 ` [PATCH 3/8] powerpc: Mark functions called inside uaccess blocks w/ 'notrace' Christoph Hellwig
2020-10-16 9:41 ` Peter Zijlstra
2020-10-20 7:34 ` Michael Ellerman
2020-10-16 7:02 ` Christophe Leroy
2020-10-20 1:59 ` Christopher M. Riedl
2020-10-15 15:01 ` [PATCH 4/8] powerpc/signal64: Replace setup_sigcontext() w/ unsafe_setup_sigcontext() Christopher M. Riedl
2020-10-15 15:01 ` [PATCH 5/8] powerpc/signal64: Replace restore_sigcontext() w/ unsafe_restore_sigcontext() Christopher M. Riedl
2020-10-15 15:01 ` [PATCH 6/8] powerpc/signal64: Replace setup_trampoline() w/ unsafe_setup_trampoline() Christopher M. Riedl
2020-10-16 13:56 ` Christophe Leroy
2020-10-20 2:42 ` Christopher M. Riedl
2020-10-20 5:02 ` Christophe Leroy
2020-10-15 15:01 ` [PATCH 7/8] powerpc/signal64: Rewrite handle_rt_signal64() to minimise uaccess switches Christopher M. Riedl
2020-10-16 14:00 ` Christophe Leroy
2020-10-20 2:44 ` Christopher M. Riedl
2020-10-15 15:01 ` [PATCH 8/8] powerpc/signal64: Rewrite rt_sigreturn() " Christopher M. Riedl
2020-10-16 14:07 ` Christophe Leroy
2020-10-20 2:45 ` Christopher M. Riedl
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201015150159.28933-4-cmr@codefail.de \
--to=cmr@codefail.de \
--cc=linuxppc-dev@lists.ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).