* [PATCH 1/3] powerpc/mm/64s: Consolidate SLB assertions
@ 2018-11-06 12:37 Michael Ellerman
2018-11-06 12:37 ` [PATCH 2/3] powerpc/mm/64s: Use PPC_SLBFEE macro Michael Ellerman
` (2 more replies)
0 siblings, 3 replies; 5+ messages in thread
From: Michael Ellerman @ 2018-11-06 12:37 UTC (permalink / raw)
To: linuxppc-dev; +Cc: npiggin
The code for assert_slb_exists() and assert_slb_notexists() is almost
identical, except for the polarity of the WARN_ON(). In a future patch
we'll need to modify this code, so consolidate it now into a single
function.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
arch/powerpc/mm/slb.c | 29 +++++++++--------------------
1 file changed, 9 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index c3fdf2969d9f..f3e002ee457b 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -58,7 +58,7 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
}
-static void assert_slb_exists(unsigned long ea)
+static void assert_slb_presence(bool present, unsigned long ea)
{
#ifdef CONFIG_DEBUG_VM
unsigned long tmp;
@@ -66,19 +66,8 @@ static void assert_slb_exists(unsigned long ea)
WARN_ON_ONCE(mfmsr() & MSR_EE);
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
- WARN_ON(tmp == 0);
-#endif
-}
-static void assert_slb_notexists(unsigned long ea)
-{
-#ifdef CONFIG_DEBUG_VM
- unsigned long tmp;
-
- WARN_ON_ONCE(mfmsr() & MSR_EE);
-
- asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
- WARN_ON(tmp != 0);
+ WARN_ON(present == (tmp == 0));
#endif
}
@@ -114,7 +103,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
*/
slb_shadow_update(ea, ssize, flags, index);
- assert_slb_notexists(ea);
+ assert_slb_presence(false, ea);
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, ssize, flags)),
"r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +126,7 @@ void __slb_restore_bolted_realmode(void)
"r" (be64_to_cpu(p->save_area[index].esid)));
}
- assert_slb_exists(local_paca->kstack);
+ assert_slb_presence(true, local_paca->kstack);
}
/*
@@ -185,7 +174,7 @@ void slb_flush_and_restore_bolted(void)
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
: "memory");
- assert_slb_exists(get_paca()->kstack);
+ assert_slb_presence(true, get_paca()->kstack);
get_paca()->slb_cache_ptr = 0;
@@ -443,9 +432,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
ea = (unsigned long)
get_paca()->slb_cache[i] << SID_SHIFT;
/*
- * Could assert_slb_exists here, but hypervisor
- * or machine check could have come in and
- * removed the entry at this point.
+ * Could assert_slb_presence(true) here, but
+ * hypervisor or machine check could have come
+ * in and removed the entry at this point.
*/
slbie_data = ea;
@@ -676,7 +665,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
* User preloads should add isync afterwards in case the kernel
* accesses user memory before it returns to userspace with rfid.
*/
- assert_slb_notexists(ea);
+ assert_slb_presence(false, ea);
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
barrier();
--
2.17.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/3] powerpc/mm/64s: Use PPC_SLBFEE macro
2018-11-06 12:37 [PATCH 1/3] powerpc/mm/64s: Consolidate SLB assertions Michael Ellerman
@ 2018-11-06 12:37 ` Michael Ellerman
2018-11-06 12:37 ` [PATCH 3/3] powerpc/mm/64s: Only use slbfee on CPUs that support it Michael Ellerman
2018-11-07 21:27 ` [1/3] powerpc/mm/64s: Consolidate SLB assertions Michael Ellerman
2 siblings, 0 replies; 5+ messages in thread
From: Michael Ellerman @ 2018-11-06 12:37 UTC (permalink / raw)
To: linuxppc-dev; +Cc: npiggin
Old toolchains don't know about slbfee and break the build, eg:
{standard input}:37: Error: Unrecognized opcode: `slbfee.'
Fix it by using the macro version. We need to add an underscore
version that takes raw register numbers from the inline asm, rather
than our Rx macros.
Fixes: e15a4fea4dee ("powerpc/64s/hash: Add some SLB debugging tests")
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
arch/powerpc/include/asm/ppc-opcode.h | 2 ++
arch/powerpc/mm/slb.c | 3 ++-
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 6093bc8f74e5..a6e9e314c707 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -493,6 +493,8 @@
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
__PPC_RT(t) | __PPC_RB(b))
+#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
+ ___PPC_RT(t) | ___PPC_RB(b))
#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
__PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
/* PASemi instructions */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index f3e002ee457b..457fd29448b1 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -19,6 +19,7 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
+#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
@@ -65,7 +66,7 @@ static void assert_slb_presence(bool present, unsigned long ea)
WARN_ON_ONCE(mfmsr() & MSR_EE);
- asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
+ asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
WARN_ON(present == (tmp == 0));
#endif
--
2.17.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/3] powerpc/mm/64s: Only use slbfee on CPUs that support it
2018-11-06 12:37 [PATCH 1/3] powerpc/mm/64s: Consolidate SLB assertions Michael Ellerman
2018-11-06 12:37 ` [PATCH 2/3] powerpc/mm/64s: Use PPC_SLBFEE macro Michael Ellerman
@ 2018-11-06 12:37 ` Michael Ellerman
2018-11-07 0:20 ` Nicholas Piggin
2018-11-07 21:27 ` [1/3] powerpc/mm/64s: Consolidate SLB assertions Michael Ellerman
2 siblings, 1 reply; 5+ messages in thread
From: Michael Ellerman @ 2018-11-06 12:37 UTC (permalink / raw)
To: linuxppc-dev; +Cc: npiggin
The slbfee instruction was only added in ISA 2.05 (Power6), it's not
supported on older CPUs. We don't have a CPU feature for that ISA
version though, so just use the ISA 2.06 feature flag.
Fixes: e15a4fea4dee ("powerpc/64s/hash: Add some SLB debugging tests")
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
arch/powerpc/mm/slb.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 457fd29448b1..b663a36f9ada 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -66,6 +66,9 @@ static void assert_slb_presence(bool present, unsigned long ea)
WARN_ON_ONCE(mfmsr() & MSR_EE);
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ return;
+
asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
WARN_ON(present == (tmp == 0));
--
2.17.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 3/3] powerpc/mm/64s: Only use slbfee on CPUs that support it
2018-11-06 12:37 ` [PATCH 3/3] powerpc/mm/64s: Only use slbfee on CPUs that support it Michael Ellerman
@ 2018-11-07 0:20 ` Nicholas Piggin
0 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2018-11-07 0:20 UTC (permalink / raw)
To: Michael Ellerman; +Cc: linuxppc-dev
On Tue, 6 Nov 2018 23:37:09 +1100
Michael Ellerman <mpe@ellerman.id.au> wrote:
> The slbfee instruction was only added in ISA 2.05 (Power6), it's not
> supported on older CPUs. We don't have a CPU feature for that ISA
> version though, so just use the ISA 2.06 feature flag.
>
> Fixes: e15a4fea4dee ("powerpc/64s/hash: Add some SLB debugging tests")
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Ugh, thank for fixing all that up. Looks good.
Thanks,
Nick
> ---
> arch/powerpc/mm/slb.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> index 457fd29448b1..b663a36f9ada 100644
> --- a/arch/powerpc/mm/slb.c
> +++ b/arch/powerpc/mm/slb.c
> @@ -66,6 +66,9 @@ static void assert_slb_presence(bool present, unsigned long ea)
>
> WARN_ON_ONCE(mfmsr() & MSR_EE);
>
> + if (!cpu_has_feature(CPU_FTR_ARCH_206))
> + return;
> +
> asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
>
> WARN_ON(present == (tmp == 0));
> --
> 2.17.2
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [1/3] powerpc/mm/64s: Consolidate SLB assertions
2018-11-06 12:37 [PATCH 1/3] powerpc/mm/64s: Consolidate SLB assertions Michael Ellerman
2018-11-06 12:37 ` [PATCH 2/3] powerpc/mm/64s: Use PPC_SLBFEE macro Michael Ellerman
2018-11-06 12:37 ` [PATCH 3/3] powerpc/mm/64s: Only use slbfee on CPUs that support it Michael Ellerman
@ 2018-11-07 21:27 ` Michael Ellerman
2 siblings, 0 replies; 5+ messages in thread
From: Michael Ellerman @ 2018-11-07 21:27 UTC (permalink / raw)
To: Michael Ellerman, linuxppc-dev; +Cc: npiggin
On Tue, 2018-11-06 at 12:37:07 UTC, Michael Ellerman wrote:
> The code for assert_slb_exists() and assert_slb_notexists() is almost
> identical, except for the polarity of the WARN_ON(). In a future patch
> we'll need to modify this code, so consolidate it now into a single
> function.
>
> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Series applied to powerpc fixes.
https://git.kernel.org/powerpc/c/0ae790683fc28bb718d74f87cdf753
cheers
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2018-11-07 21:31 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-11-06 12:37 [PATCH 1/3] powerpc/mm/64s: Consolidate SLB assertions Michael Ellerman
2018-11-06 12:37 ` [PATCH 2/3] powerpc/mm/64s: Use PPC_SLBFEE macro Michael Ellerman
2018-11-06 12:37 ` [PATCH 3/3] powerpc/mm/64s: Only use slbfee on CPUs that support it Michael Ellerman
2018-11-07 0:20 ` Nicholas Piggin
2018-11-07 21:27 ` [1/3] powerpc/mm/64s: Consolidate SLB assertions Michael Ellerman
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).