linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
From: Anton Blanchard <anton@samba.org>
To: benh@kernel.crashing.org, paulus@samba.org, mpe@ellerman.id.au,
	mikey@neuling.org, cyrilbur@gmail.com
Cc: linuxppc-dev@lists.ozlabs.org
Subject: [PATCH 12/19] powerpc: Create msr_check_and_{set,clear}()
Date: Wed, 28 Oct 2015 11:51:00 +1100	[thread overview]
Message-ID: <1445993467-667-12-git-send-email-anton@samba.org> (raw)
In-Reply-To: <1445993467-667-1-git-send-email-anton@samba.org>

Create helper functions to set and clear MSR bits after first
checking if they are already set. Grouping them will make it
easy to avoid the MSR writes in a subsequent optimisation.

Signed-off-by: Anton Blanchard <anton@samba.org>
---
 arch/powerpc/kernel/process.c | 107 ++++++++++++++++++++----------------------
 1 file changed, 52 insertions(+), 55 deletions(-)

diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c39aa5a..5f244d0 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -87,23 +87,46 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
-#ifdef CONFIG_PPC_FPU
-void giveup_fpu(struct task_struct *tsk)
+static void msr_check_and_set(unsigned long bits)
 {
-	u64 oldmsr = mfmsr();
-	u64 newmsr;
+	unsigned long oldmsr = mfmsr();
+	unsigned long newmsr;
 
-	check_if_tm_restore_required(tsk);
+	newmsr = oldmsr | bits;
 
-	newmsr = oldmsr | MSR_FP;
 #ifdef CONFIG_VSX
-	if (cpu_has_feature(CPU_FTR_VSX))
+	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
 		newmsr |= MSR_VSX;
 #endif
+
 	if (oldmsr != newmsr)
 		mtmsr_isync(newmsr);
+}
+
+static void msr_check_and_clear(unsigned long bits)
+{
+	unsigned long oldmsr = mfmsr();
+	unsigned long newmsr;
+
+	newmsr = oldmsr & ~bits;
+
+#ifdef CONFIG_VSX
+	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+		newmsr &= ~MSR_VSX;
+#endif
 
+	if (oldmsr != newmsr)
+		mtmsr_isync(newmsr);
+}
+
+#ifdef CONFIG_PPC_FPU
+void giveup_fpu(struct task_struct *tsk)
+{
+	check_if_tm_restore_required(tsk);
+
+	msr_check_and_set(MSR_FP);
 	__giveup_fpu(tsk);
+	msr_check_and_clear(MSR_FP);
 }
 EXPORT_SYMBOL(giveup_fpu);
 
@@ -144,30 +167,21 @@ void enable_kernel_fp(void)
 {
 	WARN_ON(preemptible());
 
-	if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
-		giveup_fpu(current);
-	} else {
-		u64 oldmsr = mfmsr();
+	msr_check_and_set(MSR_FP);
 
-		if (!(oldmsr & MSR_FP))
-			mtmsr_isync(oldmsr | MSR_FP);
-	}
+	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+		__giveup_fpu(current);
 }
 EXPORT_SYMBOL(enable_kernel_fp);
 
 #ifdef CONFIG_ALTIVEC
 void giveup_altivec(struct task_struct *tsk)
 {
-	u64 oldmsr = mfmsr();
-	u64 newmsr;
-
 	check_if_tm_restore_required(tsk);
 
-	newmsr = oldmsr | MSR_VEC;
-	if (oldmsr != newmsr)
-		mtmsr_isync(newmsr);
-
+	msr_check_and_set(MSR_VEC);
 	__giveup_altivec(tsk);
+	msr_check_and_clear(MSR_VEC);
 }
 EXPORT_SYMBOL(giveup_altivec);
 
@@ -175,14 +189,10 @@ void enable_kernel_altivec(void)
 {
 	WARN_ON(preemptible());
 
-	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
-		giveup_altivec(current);
-	} else {
-		u64 oldmsr = mfmsr();
+	msr_check_and_set(MSR_VEC);
 
-		if (!(oldmsr & MSR_VEC))
-			mtmsr_isync(oldmsr | MSR_VEC);
-	}
+	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
+		__giveup_altivec(current);
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
 
@@ -207,20 +217,15 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 #ifdef CONFIG_VSX
 void giveup_vsx(struct task_struct *tsk)
 {
-	u64 oldmsr = mfmsr();
-	u64 newmsr;
-
 	check_if_tm_restore_required(tsk);
 
-	newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
-	if (oldmsr != newmsr)
-		mtmsr_isync(newmsr);
-
+	msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
 	if (tsk->thread.regs->msr & MSR_FP)
 		__giveup_fpu(tsk);
 	if (tsk->thread.regs->msr & MSR_VEC)
 		__giveup_altivec(tsk);
 	__giveup_vsx(tsk);
+	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
 EXPORT_SYMBOL(giveup_vsx);
 
@@ -228,13 +233,14 @@ void enable_kernel_vsx(void)
 {
 	WARN_ON(preemptible());
 
-	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
-		giveup_vsx(current);
-	} else {
-		u64 oldmsr = mfmsr();
+	msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
 
-		if (!(oldmsr & MSR_VSX))
-			mtmsr_isync(oldmsr | MSR_VSX);
+	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+		if (current->thread.regs->msr & MSR_FP)
+			__giveup_fpu(current);
+		if (current->thread.regs->msr & MSR_VEC)
+			__giveup_altivec(current);
+		__giveup_vsx(current);
 	}
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
@@ -256,16 +262,11 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
 #ifdef CONFIG_SPE
 void giveup_spe(struct task_struct *)
 {
-	u64 oldmsr = mfmsr();
-	u64 newmsr;
-
 	check_if_tm_restore_required(tsk);
 
-	newmsr = oldmsr | MSR_SPE;
-	if (oldmsr != newmsr)
-		mtmsr_isync(newmsr);
-
+	msr_check_and_set(MSR_SPE);
 	__giveup_spe(tsk);
+	msr_check_and_clear(MSR_SPE);
 }
 EXPORT_SYMBOL(giveup_spe);
 
@@ -273,14 +274,10 @@ void enable_kernel_spe(void)
 {
 	WARN_ON(preemptible());
 
-	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
-		giveup_spe(current);
-	} else {
-		u64 oldmsr = mfmsr();
+	msr_check_and_set(MSR_SPE);
 
-		if (!(oldmsr & MSR_SPE))
-			mtmsr_isync(oldmsr | MSR_SPE);
-	}
+	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+		__giveup_spe(current);
 }
 EXPORT_SYMBOL(enable_kernel_spe);
 
-- 
2.5.0

  parent reply	other threads:[~2015-10-28  0:51 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-10-28  0:50 [PATCH 01/19] powerpc: Don't disable kernel FP/VMX/VSX MSR bits on context switch Anton Blanchard
2015-10-28  0:50 ` [PATCH 02/19] powerpc: Don't disable MSR bits in do_load_up_transact_*() functions Anton Blanchard
2015-10-28  0:50 ` [PATCH 03/19] powerpc: Create context switch helpers save_sprs() and restore_sprs() Anton Blanchard
2015-10-28  0:50 ` [PATCH 04/19] powerpc: Remove redundant mflr in _switch Anton Blanchard
2015-10-28  0:50 ` [PATCH 05/19] powerpc: Remove UP only lazy floating point and vector optimisations Anton Blanchard
2015-10-28  0:50 ` [PATCH 06/19] powerpc: Simplify TM restore checks Anton Blanchard
2015-10-28  0:50 ` [PATCH 07/19] powerpc: Create mtmsrd_isync() Anton Blanchard
2015-10-28  0:50 ` [PATCH 08/19] powerpc: Remove NULL task struct pointer checks in FP and vector code Anton Blanchard
2015-10-28  0:50 ` [PATCH 09/19] powerpc: Move part of giveup_fpu,altivec,spe into c Anton Blanchard
2015-10-28  3:17   ` [PATCH 09/19] powerpc: Move part of giveup_fpu, altivec, spe " kbuild test robot
2015-10-28  0:50 ` [PATCH 10/19] powerpc: Move part of giveup_vsx " Anton Blanchard
2015-10-28  0:50 ` [PATCH 11/19] crypto: vmx: Only call enable_kernel_vsx() Anton Blanchard
2015-10-28  0:51 ` Anton Blanchard [this message]
2015-10-28  0:51 ` [PATCH 13/19] powerpc: Create disable_kernel_{fp,altivec,vsx,spe}() Anton Blanchard
2015-10-28  0:51 ` [PATCH 14/19] powerpc: Add ppc_strict_facility_enable boot option Anton Blanchard
2015-10-28  0:51 ` [PATCH 15/19] powerpc: Remove fp_enable() and vec_enable(), use msr_check_and_{set, clear}() Anton Blanchard
2015-10-28  0:51 ` [PATCH 16/19] powerpc: create giveup_all() Anton Blanchard
2015-10-28  0:51 ` [PATCH 17/19] powerpc: create flush_all_to_thread() Anton Blanchard
2015-10-28  0:51 ` [PATCH 18/19] powerpc: Rearrange __switch_to() Anton Blanchard
2015-10-28  0:51 ` [PATCH 19/19] powerpc: clean up asm/switch_to.h Anton Blanchard
2015-10-28  3:19   ` kbuild test robot
2015-10-28  4:24   ` kbuild test robot
  -- strict thread matches above, loose matches on Subject: below --
2015-10-29  0:43 [PATCH 00/19] Context switch improvements Anton Blanchard
2015-10-29  0:44 ` [PATCH 12/19] powerpc: Create msr_check_and_{set,clear}() Anton Blanchard
2015-10-29  7:25   ` kbuild test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1445993467-667-12-git-send-email-anton@samba.org \
    --to=anton@samba.org \
    --cc=benh@kernel.crashing.org \
    --cc=cyrilbur@gmail.com \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mikey@neuling.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).