* [PATCH v5 1/3] powerpc: Improve FSCR init and context switching
2016-05-06 2:30 ` [PATCH v5 0/3] POWER9 Load Monitor Support Michael Neuling
@ 2016-05-06 2:30 ` Michael Neuling
2016-05-06 2:30 ` [PATCH v5 2/3] powerpc: Load Monitor Register Support Michael Neuling
` (2 subsequent siblings)
3 siblings, 0 replies; 28+ messages in thread
From: Michael Neuling @ 2016-05-06 2:30 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, anton, mikey
This fixes a few issues with FSCR init and switching.
In this patch:
powerpc: Create context switch helpers save_sprs() and restore_sprs()
Author: Anton Blanchard <anton@samba.org>
commit 152d523e6307c7152f9986a542f873b5c5863937
We moved the setting of the FSCR register from inside an
CPU_FTR_ARCH_207S section to inside just a CPU_FTR_ARCH_DSCR section.
Hence we are setting FSCR on POWER6/7 where the FSCR doesn't
exist. This is harmless but we shouldn't do it.
Also, we can simplify the FSCR context switch. We don't need to go
through the calculation involving dscr_inherit. We can just restore
what we saved last time.
Also, we currently don't explicitly init the FSCR for userspace
applications. Currently we init FSCR on boot in __init_fscr: and then
the first task inherits based on that. Currently it works but is
delicate. This adds init_fscr() to explicitly set the FSCR for
userspace applications and removes __init_fscr: boot time init.
Based on patch by Jack Miller.
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
arch/powerpc/kernel/cpu_setup_power.S | 10 ----------
arch/powerpc/kernel/process.c | 23 +++++++++++++++--------
2 files changed, 15 insertions(+), 18 deletions(-)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119..75f98c8 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -49,7 +49,6 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
- bl __init_FSCR
bl __init_PMU
bl __init_hvmode_206
mtlr r11
@@ -67,7 +66,6 @@ _GLOBAL(__setup_cpu_power8)
_GLOBAL(__restore_cpu_power8)
mflr r11
- bl __init_FSCR
bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
@@ -86,7 +84,6 @@ _GLOBAL(__restore_cpu_power8)
_GLOBAL(__setup_cpu_power9)
mflr r11
- bl __init_FSCR
bl __init_hvmode_206
mtlr r11
beqlr
@@ -102,7 +99,6 @@ _GLOBAL(__setup_cpu_power9)
_GLOBAL(__restore_cpu_power9)
mflr r11
- bl __init_FSCR
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -155,12 +151,6 @@ __init_LPCR:
isync
blr
-__init_FSCR:
- mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
- mtspr SPRN_FSCR,r3
- blr
-
__init_HFSCR:
mfspr r3,SPRN_HFSCR
ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ea8a28f..e934677 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1023,18 +1023,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
- u64 fscr = old_thread->fscr & ~FSCR_DSCR;
-
- if (new_thread->dscr_inherit) {
+ if (new_thread->dscr_inherit)
dscr = new_thread->dscr;
- fscr |= FSCR_DSCR;
- }
if (old_thread->dscr != dscr)
mtspr(SPRN_DSCR, dscr);
-
- if (old_thread->fscr != fscr)
- mtspr(SPRN_FSCR, fscr);
}
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
@@ -1045,6 +1038,9 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->ebbrr != new_thread->ebbrr)
mtspr(SPRN_EBBRR, new_thread->ebbrr);
+ if (old_thread->fscr != new_thread->fscr)
+ mtspr(SPRN_FSCR, new_thread->fscr);
+
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
@@ -1391,6 +1387,15 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
#endif
}
+#ifdef CONFIG_PPC64
+void init_fscr(struct task_struct *tsk)
+{
+ tsk->thread.fscr = FSCR_TAR|FSCR_EBB;
+ if (current->thread.dscr_inherit)
+ tsk->thread.fscr |= FSCR_DSCR;
+}
+#endif
+
/*
* Copy a thread..
*/
@@ -1484,6 +1489,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.dscr_inherit = current->thread.dscr_inherit;
p->thread.dscr = mfspr(SPRN_DSCR);
}
+ init_fscr(p);
if (cpu_has_feature(CPU_FTR_HAS_PPR))
p->thread.ppr = INIT_PPR;
#endif
@@ -1574,6 +1580,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->gpr[2] = 0;
regs->msr = MSR_USER32;
}
+ init_fscr(current);
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH v5 2/3] powerpc: Load Monitor Register Support
2016-05-06 2:30 ` [PATCH v5 0/3] POWER9 Load Monitor Support Michael Neuling
2016-05-06 2:30 ` [PATCH v5 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
@ 2016-05-06 2:30 ` Michael Neuling
2016-05-06 2:30 ` [PATCH v5 3/3] powerpc: Load Monitor Register Tests Michael Neuling
2016-06-08 10:38 ` [PATCH v6 0/3] POWER9 Load Monitor Support Michael Neuling
3 siblings, 0 replies; 28+ messages in thread
From: Michael Neuling @ 2016-05-06 2:30 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, anton, mikey
From: Jack Miller <jack@codezen.org>
This enables new registers, LMRR and LMSER, that can trigger an EBB in
userspace code when a monitored load (via the new ldmx instruction)
loads memory from a monitored space. This facility is controlled by a
new FSCR bit, LM.
This patch disables the FSCR LM control bit on task init and enables
that bit when a load monitor facility unavailable exception is taken
for using it. On context switch, this bit is then used to determine
whether the two relevant registers are saved and restored. This is
done lazily for performance reasons.
Signed-off-by: Jack Miller <jack@codezen.org>
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
arch/powerpc/include/asm/processor.h | 2 ++
arch/powerpc/include/asm/reg.h | 5 +++++
arch/powerpc/kernel/process.c | 19 +++++++++++++++++++
arch/powerpc/kernel/traps.c | 4 ++++
4 files changed, 30 insertions(+)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 009fab1..2bb822b 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long mmcr2;
unsigned mmcr0;
unsigned used_ebb;
+ unsigned long lmrr;
+ unsigned long lmser;
#endif
};
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c1e82e9..2e906ec 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -282,6 +282,8 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
#define SPRN_LDBAR 0x352 /* LD Base Address Register */
@@ -291,6 +293,7 @@
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
@@ -300,10 +303,12 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_LM __MASK(FSCR_LM_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_LM __MASK(FSCR_LM_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e934677..cbcf922 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1009,6 +1009,14 @@ static inline void save_sprs(struct thread_struct *t)
*/
t->tar = mfspr(SPRN_TAR);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally save Load Monitor registers, if enabled */
+ if (t->fscr & FSCR_LM) {
+ t->lmrr = mfspr(SPRN_LMRR);
+ t->lmser = mfspr(SPRN_LMSER);
+ }
+ }
#endif
}
@@ -1044,6 +1052,16 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally restore Load Monitor registers, if enabled */
+ if (new_thread->fscr & FSCR_LM) {
+ if (old_thread->lmrr != new_thread->lmrr);
+ mtspr(SPRN_LMRR, new_thread->lmrr);
+ if (old_thread->lmser != new_thread->lmser);
+ mtspr(SPRN_LMSER, new_thread->lmser);
+ }
+ }
#endif
}
@@ -1393,6 +1411,7 @@ void init_fscr(struct task_struct *tsk)
tsk->thread.fscr = FSCR_TAR|FSCR_EBB;
if (current->thread.dscr_inherit)
tsk->thread.fscr |= FSCR_DSCR;
+ tsk->thread.fscr &= ~FSCR_LM;
}
#endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9229ba6..93ff3ac 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1376,6 +1376,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
+ [FSCR_LM_LG] = "LM",
};
char *facility = "unknown";
u64 value;
@@ -1432,6 +1433,9 @@ void facility_unavailable_exception(struct pt_regs *regs)
emulate_single_step(regs);
}
return;
+ } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
+ mtspr(SPRN_FSCR, value | FSCR_LM);
+ return;
}
if ((status < ARRAY_SIZE(facility_strings)) &&
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH v5 3/3] powerpc: Load Monitor Register Tests
2016-05-06 2:30 ` [PATCH v5 0/3] POWER9 Load Monitor Support Michael Neuling
2016-05-06 2:30 ` [PATCH v5 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
2016-05-06 2:30 ` [PATCH v5 2/3] powerpc: Load Monitor Register Support Michael Neuling
@ 2016-05-06 2:30 ` Michael Neuling
2016-06-08 10:38 ` [PATCH v6 0/3] POWER9 Load Monitor Support Michael Neuling
3 siblings, 0 replies; 28+ messages in thread
From: Michael Neuling @ 2016-05-06 2:30 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, anton, mikey
From: Jack Miller <jack@codezen.org>
Adds two tests. One is a simple test to ensure that the new registers
LMRR and LMSER are properly maintained. The other actually uses the
existing EBB test infrastructure to test that LMRR and LMSER behave as
documented.
Signed-off-by: Jack Miller <jack@codezen.org>
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
tools/testing/selftests/powerpc/pmu/ebb/.gitignore | 2 +
tools/testing/selftests/powerpc/pmu/ebb/Makefile | 2 +-
tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c | 143 +++++++++++++++++++++
tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h | 39 ++++++
.../selftests/powerpc/pmu/ebb/ebb_lmr_regs.c | 37 ++++++
tools/testing/selftests/powerpc/pmu/ebb/reg.h | 5 +
6 files changed, 227 insertions(+), 1 deletion(-)
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 42bddbe..44b7df1 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -20,3 +20,5 @@ back_to_back_ebbs_test
lost_exception_test
no_handler_test
cycles_with_mmcr2_test
+ebb_lmr
+ebb_lmr_regs
\ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 8d2279c4..6b0453e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -14,7 +14,7 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test \
- cycles_with_mmcr2_test
+ cycles_with_mmcr2_test ebb_lmr ebb_lmr_regs
all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
new file mode 100644
index 0000000..c47ebd5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define SIZE (32 * 1024 * 1024) /* 32M */
+#define LM_SIZE 0 /* Smallest encoding, 32M */
+
+#define SECTIONS 64 /* 1 per bit in LMSER */
+#define SECTION_SIZE (SIZE / SECTIONS)
+#define SECTION_LONGS (SECTION_SIZE / sizeof(long))
+
+static unsigned long *test_mem;
+
+static int lmr_count = 0;
+
+void ebb_lmr_handler(void)
+{
+ lmr_count++;
+}
+
+void ldmx_full_section(unsigned long *mem, int section)
+{
+ unsigned long *ptr;
+ int i;
+
+ for (i = 0; i < SECTION_LONGS; i++) {
+ ptr = &mem[(SECTION_LONGS * section) + i];
+ ldmx((unsigned long) &ptr);
+ ebb_lmr_reset();
+ }
+}
+
+unsigned long section_masks[] = {
+ 0x8000000000000000,
+ 0xFF00000000000000,
+ 0x0000000F70000000,
+ 0x8000000000000001,
+ 0xF0F0F0F0F0F0F0F0,
+ 0x0F0F0F0F0F0F0F0F,
+ 0x0
+};
+
+int ebb_lmr_section_test(unsigned long *mem)
+{
+ unsigned long *mask = section_masks;
+ int i;
+
+ for (; *mask; mask++) {
+ mtspr(SPRN_LMSER, *mask);
+ printf("Testing mask 0x%016lx\n", mfspr(SPRN_LMSER));
+
+ for (i = 0; i < 64; i++) {
+ lmr_count = 0;
+ ldmx_full_section(mem, i);
+ if (*mask & (1UL << (63 - i)))
+ FAIL_IF(lmr_count != SECTION_LONGS);
+ else
+ FAIL_IF(lmr_count);
+ }
+ }
+
+ return 0;
+}
+
+int ebb_lmr(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ setup_ebb_handler(ebb_lmr_handler);
+
+ ebb_global_enable();
+
+ FAIL_IF(posix_memalign((void **)&test_mem, SIZE, SIZE) != 0);
+
+ mtspr(SPRN_LMSER, 0);
+
+ FAIL_IF(mfspr(SPRN_LMSER) != 0);
+
+ mtspr(SPRN_LMRR, ((unsigned long)test_mem | LM_SIZE));
+
+ FAIL_IF(mfspr(SPRN_LMRR) != ((unsigned long)test_mem | LM_SIZE));
+
+ /* Read every single byte to ensure we get no false positives */
+ for (i = 0; i < SECTIONS; i++)
+ ldmx_full_section(test_mem, i);
+
+ FAIL_IF(lmr_count != 0);
+
+ /* Turn on the first section */
+
+ mtspr(SPRN_LMSER, (1UL << 63));
+ FAIL_IF(mfspr(SPRN_LMSER) != (1UL << 63));
+
+ /* Enable LM (BESCR) */
+
+ mtspr(SPRN_BESCR, mfspr(SPRN_BESCR) | BESCR_LME);
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LME));
+
+ ldmx((unsigned long)&test_mem);
+
+ FAIL_IF(lmr_count != 1); // exactly one exception
+ FAIL_IF(mfspr(SPRN_BESCR) & BESCR_LME); // LM now disabled
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LMEO)); // occurred bit set
+
+ printf("Simple LMR EBB OK\n");
+
+ /* This shouldn't cause an EBB since it's been disabled */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 1);
+
+ printf("LMR disable on EBB OK\n");
+
+ ebb_lmr_reset();
+
+ /* This should cause an EBB or reset is broken */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 2);
+
+ printf("LMR reset EBB OK\n");
+
+ ebb_lmr_reset();
+
+ return ebb_lmr_section_test(test_mem);
+}
+
+int main(void)
+{
+ int ret = test_harness(ebb_lmr, "ebb_lmr");
+
+ if (test_mem)
+ free(test_mem);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
new file mode 100644
index 0000000..ef50abd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
@@ -0,0 +1,39 @@
+#ifndef _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+#define _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+
+#include "reg.h"
+
+#ifndef PPC_FEATURE2_ARCH_3_00
+#define PPC_FEATURE2_ARCH_3_00 0x00800000
+#endif
+
+#define lmr_is_supported() have_hwcap2(PPC_FEATURE2_ARCH_3_00)
+
+static inline void ebb_lmr_reset(void)
+{
+ unsigned long bescr = mfspr(SPRN_BESCR);
+ bescr &= ~(BESCR_LMEO);
+ bescr |= BESCR_LME;
+ mtspr(SPRN_BESCR, bescr);
+}
+
+#define LDMX(t, a, b)\
+ (0x7c00026a | \
+ (((t) & 0x1f) << 21) | \
+ (((a) & 0x1f) << 16) | \
+ (((b) & 0x1f) << 11))
+
+static inline unsigned long ldmx(unsigned long address)
+{
+ unsigned long ret;
+
+ asm volatile ("mr 9, %1\r\n"
+ ".long " __stringify(LDMX(9, 0, 9)) "\r\n"
+ "mr %0, 9\r\n":"=r"(ret)
+ :"r"(address)
+ :"r9");
+
+ return ret;
+}
+
+#endif
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
new file mode 100644
index 0000000..aff4241
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define CHECKS 10000
+
+int ebb_lmr_regs(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ ebb_global_enable();
+
+ for (i = 0; i < CHECKS; i++) {
+ mtspr(SPRN_LMRR, i << 25); // skip size and rsvd bits
+ mtspr(SPRN_LMSER, i);
+
+ FAIL_IF(mfspr(SPRN_LMRR) != (i << 25));
+ FAIL_IF(mfspr(SPRN_LMSER) != i);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(ebb_lmr_regs, "ebb_lmr_regs");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/reg.h b/tools/testing/selftests/powerpc/pmu/ebb/reg.h
index 5921b0d..0659a7b 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/reg.h
+++ b/tools/testing/selftests/powerpc/pmu/ebb/reg.h
@@ -34,6 +34,11 @@
#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */
#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */
+#define BESCR_LME (0x1ul << 34) /* Load Monitor Enable */
+#define BESCR_LMEO (0x1ul << 2) /* Load Monitor Exception Occurred */
+
+#define SPRN_LMRR 813 /* Load Monitor Region Register */
+#define SPRN_LMSER 814 /* Load Monitor Section Enable Register */
#define SPRN_PMC1 771
#define SPRN_PMC2 772
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH v6 0/3] POWER9 Load Monitor Support
2016-05-06 2:30 ` [PATCH v5 0/3] POWER9 Load Monitor Support Michael Neuling
` (2 preceding siblings ...)
2016-05-06 2:30 ` [PATCH v5 3/3] powerpc: Load Monitor Register Tests Michael Neuling
@ 2016-06-08 10:38 ` Michael Neuling
2016-06-08 10:38 ` [PATCH v6 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
` (3 more replies)
3 siblings, 4 replies; 28+ messages in thread
From: Michael Neuling @ 2016-06-08 10:38 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, anton, mikey, cyrilbur
This patches series adds support for the POWER9 Load Monitor
instruction (ldmx) based on work from Jack Miller.
The first patch is a clean up of the FSCR handling. The second patch
adds the actual ldmx support to the kernel. The third patch is a
couple of ldmx selftests.
v6:
- PATCH 1/3:
- Suggestions from mpe.
- Init the FSCR using existing INIT_THREAD macro rather than
init_fscr() function.
- Set fscr when taking DSCR exception in
facility_unavailable_exception().
- PATCH 2/3:
- Remove erroneous semicolons in restore_sprs().
- PATCH 3/3:
- no change.
v5:
- PATCH 1/3:
- Change FSCR cleanup more extensive.
- PATCH 2/3:
- Moves FSCR_LM clearing to new init_fscr().
- PATCH 3/3:
- Added test cases to .gitignore.
- Removed test again PPC_FEATURE2_EBB since it's not needed.
- Added parenthesis on input parameter usage for LDMX() macro.
Jack Miller (2):
powerpc: Load Monitor Register Support
powerpc: Load Monitor Register Tests
Michael Neuling (1):
powerpc: Improve FSCR init and context switching
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH v6 1/3] powerpc: Improve FSCR init and context switching
2016-06-08 10:38 ` [PATCH v6 0/3] POWER9 Load Monitor Support Michael Neuling
@ 2016-06-08 10:38 ` Michael Neuling
2016-06-08 10:38 ` [PATCH v6 2/3] powerpc: Load Monitor Register Support Michael Neuling
` (2 subsequent siblings)
3 siblings, 0 replies; 28+ messages in thread
From: Michael Neuling @ 2016-06-08 10:38 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, anton, mikey, cyrilbur
This fixes a few issues with FSCR init and switching.
In this patch:
powerpc: Create context switch helpers save_sprs() and restore_sprs()
Author: Anton Blanchard <anton@samba.org>
commit 152d523e6307c7152f9986a542f873b5c5863937
We moved the setting of the FSCR register from inside an
CPU_FTR_ARCH_207S section to inside just a CPU_FTR_ARCH_DSCR section.
Hence we are setting FSCR on POWER6/7 where the FSCR doesn't
exist. This is harmless but we shouldn't do it.
Also, we can simplify the FSCR context switch. We don't need to go
through the calculation involving dscr_inherit. We can just restore
what we saved last time.
Also, we currently don't explicitly init the FSCR for userspace
applications. Currently we init FSCR on boot in __init_fscr: and then
the first task inherits based on that. Currently it works but is
delicate. This adds the initial fscr value to INIT_THREAD to
explicitly set the FSCR for userspace applications and removes
__init_fscr: boot time init.
Based on patch by Jack Miller.
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
arch/powerpc/include/asm/processor.h | 1 +
arch/powerpc/kernel/cpu_setup_power.S | 10 ----------
arch/powerpc/kernel/process.c | 12 ++++--------
arch/powerpc/kernel/traps.c | 3 ++-
4 files changed, 7 insertions(+), 19 deletions(-)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 009fab1..1833fe9 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -347,6 +347,7 @@ struct thread_struct {
.fs = KERNEL_DS, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
+ .fscr = FSCR_TAR | FSCR_EBB \
}
#endif
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119..75f98c8 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -49,7 +49,6 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
- bl __init_FSCR
bl __init_PMU
bl __init_hvmode_206
mtlr r11
@@ -67,7 +66,6 @@ _GLOBAL(__setup_cpu_power8)
_GLOBAL(__restore_cpu_power8)
mflr r11
- bl __init_FSCR
bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
@@ -86,7 +84,6 @@ _GLOBAL(__restore_cpu_power8)
_GLOBAL(__setup_cpu_power9)
mflr r11
- bl __init_FSCR
bl __init_hvmode_206
mtlr r11
beqlr
@@ -102,7 +99,6 @@ _GLOBAL(__setup_cpu_power9)
_GLOBAL(__restore_cpu_power9)
mflr r11
- bl __init_FSCR
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -155,12 +151,6 @@ __init_LPCR:
isync
blr
-__init_FSCR:
- mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
- mtspr SPRN_FSCR,r3
- blr
-
__init_HFSCR:
mfspr r3,SPRN_HFSCR
ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e2f12cb..74ea8db 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1023,18 +1023,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
- u64 fscr = old_thread->fscr & ~FSCR_DSCR;
-
- if (new_thread->dscr_inherit) {
+ if (new_thread->dscr_inherit)
dscr = new_thread->dscr;
- fscr |= FSCR_DSCR;
- }
if (old_thread->dscr != dscr)
mtspr(SPRN_DSCR, dscr);
-
- if (old_thread->fscr != fscr)
- mtspr(SPRN_FSCR, fscr);
}
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
@@ -1045,6 +1038,9 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->ebbrr != new_thread->ebbrr)
mtspr(SPRN_EBBRR, new_thread->ebbrr);
+ if (old_thread->fscr != new_thread->fscr)
+ mtspr(SPRN_FSCR, new_thread->fscr);
+
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9229ba6..a4b00ee 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1418,7 +1418,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
rd = (instword >> 21) & 0x1f;
current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
- mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ current->thread.fscr = value | FSCR_DSCR;
+ mtspr(SPRN_FSCR, current->thread.fscr);
}
/* Read from DSCR (mfspr RT, 0x03) */
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH v6 2/3] powerpc: Load Monitor Register Support
2016-06-08 10:38 ` [PATCH v6 0/3] POWER9 Load Monitor Support Michael Neuling
2016-06-08 10:38 ` [PATCH v6 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
@ 2016-06-08 10:38 ` Michael Neuling
2016-06-08 10:38 ` [PATCH v6 3/3] powerpc: Load Monitor Register Tests Michael Neuling
2016-06-09 2:31 ` [PATCH v7 0/3] POWER9 Load Monitor Support Michael Neuling
3 siblings, 0 replies; 28+ messages in thread
From: Michael Neuling @ 2016-06-08 10:38 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, anton, mikey, cyrilbur
From: Jack Miller <jack@codezen.org>
This enables new registers, LMRR and LMSER, that can trigger an EBB in
userspace code when a monitored load (via the new ldmx instruction)
loads memory from a monitored space. This facility is controlled by a
new FSCR bit, LM.
This patch disables the FSCR LM control bit on task init and enables
that bit when a load monitor facility unavailable exception is taken
for using it. On context switch, this bit is then used to determine
whether the two relevant registers are saved and restored. This is
done lazily for performance reasons.
Signed-off-by: Jack Miller <jack@codezen.org>
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
arch/powerpc/include/asm/processor.h | 2 ++
arch/powerpc/include/asm/reg.h | 5 +++++
arch/powerpc/kernel/process.c | 18 ++++++++++++++++++
arch/powerpc/kernel/traps.c | 4 ++++
4 files changed, 29 insertions(+)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 1833fe9..ac7670d 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long mmcr2;
unsigned mmcr0;
unsigned used_ebb;
+ unsigned long lmrr;
+ unsigned long lmser;
#endif
};
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a0948f4..ce44fe2 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -282,6 +282,8 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
#define SPRN_LDBAR 0x352 /* LD Base Address Register */
@@ -291,6 +293,7 @@
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
@@ -300,10 +303,12 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_LM __MASK(FSCR_LM_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_LM __MASK(FSCR_LM_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 74ea8db..2e22f60 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1009,6 +1009,14 @@ static inline void save_sprs(struct thread_struct *t)
*/
t->tar = mfspr(SPRN_TAR);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally save Load Monitor registers, if enabled */
+ if (t->fscr & FSCR_LM) {
+ t->lmrr = mfspr(SPRN_LMRR);
+ t->lmser = mfspr(SPRN_LMSER);
+ }
+ }
#endif
}
@@ -1044,6 +1052,16 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally restore Load Monitor registers, if enabled */
+ if (new_thread->fscr & FSCR_LM) {
+ if (old_thread->lmrr != new_thread->lmrr)
+ mtspr(SPRN_LMRR, new_thread->lmrr);
+ if (old_thread->lmser != new_thread->lmser)
+ mtspr(SPRN_LMSER, new_thread->lmser);
+ }
+ }
#endif
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a4b00ee..aabdeac 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1376,6 +1376,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
+ [FSCR_LM_LG] = "LM",
};
char *facility = "unknown";
u64 value;
@@ -1433,6 +1434,9 @@ void facility_unavailable_exception(struct pt_regs *regs)
emulate_single_step(regs);
}
return;
+ } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
+ mtspr(SPRN_FSCR, value | FSCR_LM);
+ return;
}
if ((status < ARRAY_SIZE(facility_strings)) &&
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH v6 3/3] powerpc: Load Monitor Register Tests
2016-06-08 10:38 ` [PATCH v6 0/3] POWER9 Load Monitor Support Michael Neuling
2016-06-08 10:38 ` [PATCH v6 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
2016-06-08 10:38 ` [PATCH v6 2/3] powerpc: Load Monitor Register Support Michael Neuling
@ 2016-06-08 10:38 ` Michael Neuling
2016-06-09 2:31 ` [PATCH v7 0/3] POWER9 Load Monitor Support Michael Neuling
3 siblings, 0 replies; 28+ messages in thread
From: Michael Neuling @ 2016-06-08 10:38 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, anton, mikey, cyrilbur
From: Jack Miller <jack@codezen.org>
Adds two tests. One is a simple test to ensure that the new registers
LMRR and LMSER are properly maintained. The other actually uses the
existing EBB test infrastructure to test that LMRR and LMSER behave as
documented.
Signed-off-by: Jack Miller <jack@codezen.org>
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
tools/testing/selftests/powerpc/pmu/ebb/.gitignore | 2 +
tools/testing/selftests/powerpc/pmu/ebb/Makefile | 2 +-
tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c | 143 +++++++++++++++++++++
tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h | 39 ++++++
.../selftests/powerpc/pmu/ebb/ebb_lmr_regs.c | 37 ++++++
tools/testing/selftests/powerpc/reg.h | 5 +
6 files changed, 227 insertions(+), 1 deletion(-)
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 42bddbe..44b7df1 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -20,3 +20,5 @@ back_to_back_ebbs_test
lost_exception_test
no_handler_test
cycles_with_mmcr2_test
+ebb_lmr
+ebb_lmr_regs
\ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 8d2279c4..6b0453e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -14,7 +14,7 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test \
- cycles_with_mmcr2_test
+ cycles_with_mmcr2_test ebb_lmr ebb_lmr_regs
all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
new file mode 100644
index 0000000..c47ebd5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define SIZE (32 * 1024 * 1024) /* 32M */
+#define LM_SIZE 0 /* Smallest encoding, 32M */
+
+#define SECTIONS 64 /* 1 per bit in LMSER */
+#define SECTION_SIZE (SIZE / SECTIONS)
+#define SECTION_LONGS (SECTION_SIZE / sizeof(long))
+
+static unsigned long *test_mem;
+
+static int lmr_count = 0;
+
+void ebb_lmr_handler(void)
+{
+ lmr_count++;
+}
+
+void ldmx_full_section(unsigned long *mem, int section)
+{
+ unsigned long *ptr;
+ int i;
+
+ for (i = 0; i < SECTION_LONGS; i++) {
+ ptr = &mem[(SECTION_LONGS * section) + i];
+ ldmx((unsigned long) &ptr);
+ ebb_lmr_reset();
+ }
+}
+
+unsigned long section_masks[] = {
+ 0x8000000000000000,
+ 0xFF00000000000000,
+ 0x0000000F70000000,
+ 0x8000000000000001,
+ 0xF0F0F0F0F0F0F0F0,
+ 0x0F0F0F0F0F0F0F0F,
+ 0x0
+};
+
+int ebb_lmr_section_test(unsigned long *mem)
+{
+ unsigned long *mask = section_masks;
+ int i;
+
+ for (; *mask; mask++) {
+ mtspr(SPRN_LMSER, *mask);
+ printf("Testing mask 0x%016lx\n", mfspr(SPRN_LMSER));
+
+ for (i = 0; i < 64; i++) {
+ lmr_count = 0;
+ ldmx_full_section(mem, i);
+ if (*mask & (1UL << (63 - i)))
+ FAIL_IF(lmr_count != SECTION_LONGS);
+ else
+ FAIL_IF(lmr_count);
+ }
+ }
+
+ return 0;
+}
+
+int ebb_lmr(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ setup_ebb_handler(ebb_lmr_handler);
+
+ ebb_global_enable();
+
+ FAIL_IF(posix_memalign((void **)&test_mem, SIZE, SIZE) != 0);
+
+ mtspr(SPRN_LMSER, 0);
+
+ FAIL_IF(mfspr(SPRN_LMSER) != 0);
+
+ mtspr(SPRN_LMRR, ((unsigned long)test_mem | LM_SIZE));
+
+ FAIL_IF(mfspr(SPRN_LMRR) != ((unsigned long)test_mem | LM_SIZE));
+
+ /* Read every single byte to ensure we get no false positives */
+ for (i = 0; i < SECTIONS; i++)
+ ldmx_full_section(test_mem, i);
+
+ FAIL_IF(lmr_count != 0);
+
+ /* Turn on the first section */
+
+ mtspr(SPRN_LMSER, (1UL << 63));
+ FAIL_IF(mfspr(SPRN_LMSER) != (1UL << 63));
+
+ /* Enable LM (BESCR) */
+
+ mtspr(SPRN_BESCR, mfspr(SPRN_BESCR) | BESCR_LME);
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LME));
+
+ ldmx((unsigned long)&test_mem);
+
+ FAIL_IF(lmr_count != 1); // exactly one exception
+ FAIL_IF(mfspr(SPRN_BESCR) & BESCR_LME); // LM now disabled
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LMEO)); // occurred bit set
+
+ printf("Simple LMR EBB OK\n");
+
+ /* This shouldn't cause an EBB since it's been disabled */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 1);
+
+ printf("LMR disable on EBB OK\n");
+
+ ebb_lmr_reset();
+
+ /* This should cause an EBB or reset is broken */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 2);
+
+ printf("LMR reset EBB OK\n");
+
+ ebb_lmr_reset();
+
+ return ebb_lmr_section_test(test_mem);
+}
+
+int main(void)
+{
+ int ret = test_harness(ebb_lmr, "ebb_lmr");
+
+ if (test_mem)
+ free(test_mem);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
new file mode 100644
index 0000000..ef50abd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
@@ -0,0 +1,39 @@
+#ifndef _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+#define _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+
+#include "reg.h"
+
+#ifndef PPC_FEATURE2_ARCH_3_00
+#define PPC_FEATURE2_ARCH_3_00 0x00800000
+#endif
+
+#define lmr_is_supported() have_hwcap2(PPC_FEATURE2_ARCH_3_00)
+
+static inline void ebb_lmr_reset(void)
+{
+ unsigned long bescr = mfspr(SPRN_BESCR);
+ bescr &= ~(BESCR_LMEO);
+ bescr |= BESCR_LME;
+ mtspr(SPRN_BESCR, bescr);
+}
+
+#define LDMX(t, a, b)\
+ (0x7c00026a | \
+ (((t) & 0x1f) << 21) | \
+ (((a) & 0x1f) << 16) | \
+ (((b) & 0x1f) << 11))
+
+static inline unsigned long ldmx(unsigned long address)
+{
+ unsigned long ret;
+
+ asm volatile ("mr 9, %1\r\n"
+ ".long " __stringify(LDMX(9, 0, 9)) "\r\n"
+ "mr %0, 9\r\n":"=r"(ret)
+ :"r"(address)
+ :"r9");
+
+ return ret;
+}
+
+#endif
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
new file mode 100644
index 0000000..aff4241
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define CHECKS 10000
+
+int ebb_lmr_regs(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ ebb_global_enable();
+
+ for (i = 0; i < CHECKS; i++) {
+ mtspr(SPRN_LMRR, i << 25); // skip size and rsvd bits
+ mtspr(SPRN_LMSER, i);
+
+ FAIL_IF(mfspr(SPRN_LMRR) != (i << 25));
+ FAIL_IF(mfspr(SPRN_LMSER) != i);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(ebb_lmr_regs, "ebb_lmr_regs");
+}
diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h
index 65bfdee..fddf368 100644
--- a/tools/testing/selftests/powerpc/reg.h
+++ b/tools/testing/selftests/powerpc/reg.h
@@ -34,6 +34,11 @@
#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */
#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */
+#define BESCR_LME (0x1ul << 34) /* Load Monitor Enable */
+#define BESCR_LMEO (0x1ul << 2) /* Load Monitor Exception Occurred */
+
+#define SPRN_LMRR 813 /* Load Monitor Region Register */
+#define SPRN_LMSER 814 /* Load Monitor Section Enable Register */
#define SPRN_PMC1 771
#define SPRN_PMC2 772
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH v7 0/3] POWER9 Load Monitor Support
2016-06-08 10:38 ` [PATCH v6 0/3] POWER9 Load Monitor Support Michael Neuling
` (2 preceding siblings ...)
2016-06-08 10:38 ` [PATCH v6 3/3] powerpc: Load Monitor Register Tests Michael Neuling
@ 2016-06-09 2:31 ` Michael Neuling
2016-06-09 2:31 ` [PATCH v7 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
` (2 more replies)
3 siblings, 3 replies; 28+ messages in thread
From: Michael Neuling @ 2016-06-09 2:31 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, cyrilbur, anton, mikey
This patches series adds support for the POWER9 Load Monitor
instruction (ldmx) based on work from Jack Miller.
The first patch is a clean up of the FSCR handling. The second patch
adds the actual ldmx support to the kernel. The third patch is a
couple of ldmx selftests.
v7:
- Suggestions from the "prestigious" mpe.
- PATCH 1/3:
- Use current->thread.fscr rather than what the hardware gives us.
- PATCH 2/3:
- Use current->thread.fscr rather than what the hardware gives us.
- PATCH 3/3:
- no change.
v6:
- PATCH 1/3:
- Suggestions from mpe.
- Init the FSCR using existing INIT_THREAD macro rather than
init_fscr() function.
- Set fscr when taking DSCR exception in
facility_unavailable_exception().
- PATCH 2/3:
- Remove erroneous semicolons in restore_sprs().
- PATCH 3/3:
- no change.
v5:
- PATCH 1/3:
- Change FSCR cleanup more extensive.
- PATCH 2/3:
- Moves FSCR_LM clearing to new init_fscr().
- PATCH 3/3:
- Added test cases to .gitignore.
- Removed test again PPC_FEATURE2_EBB since it's not needed.
- Added parenthesis on input parameter usage for LDMX() macro.
Jack Miller (2):
powerpc: Load Monitor Register Support
powerpc: Load Monitor Register Tests
Michael Neuling (1):
powerpc: Improve FSCR init and context switching
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH v7 1/3] powerpc: Improve FSCR init and context switching
2016-06-09 2:31 ` [PATCH v7 0/3] POWER9 Load Monitor Support Michael Neuling
@ 2016-06-09 2:31 ` Michael Neuling
2016-06-21 12:27 ` [v7,1/3] " Michael Ellerman
2016-06-09 2:31 ` [PATCH v7 2/3] powerpc: Load Monitor Register Support Michael Neuling
2016-06-09 2:31 ` [PATCH v7 3/3] powerpc: Load Monitor Register Tests Michael Neuling
2 siblings, 1 reply; 28+ messages in thread
From: Michael Neuling @ 2016-06-09 2:31 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, cyrilbur, anton, mikey
This fixes a few issues with FSCR init and switching.
In this patch:
powerpc: Create context switch helpers save_sprs() and restore_sprs()
Author: Anton Blanchard <anton@samba.org>
commit 152d523e6307c7152f9986a542f873b5c5863937
We moved the setting of the FSCR register from inside an
CPU_FTR_ARCH_207S section to inside just a CPU_FTR_ARCH_DSCR section.
Hence we are setting FSCR on POWER6/7 where the FSCR doesn't
exist. This is harmless but we shouldn't do it.
Also, we can simplify the FSCR context switch. We don't need to go
through the calculation involving dscr_inherit. We can just restore
what we saved last time.
Also, we currently don't explicitly init the FSCR for userspace
applications. Currently we init FSCR on boot in __init_fscr: and then
the first task inherits based on that. Currently it works but is
delicate. This adds the initial fscr value to INIT_THREAD to
explicitly set the FSCR for userspace applications and removes
__init_fscr: boot time init.
Based on patch by Jack Miller.
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
arch/powerpc/include/asm/processor.h | 1 +
arch/powerpc/kernel/cpu_setup_power.S | 10 ----------
arch/powerpc/kernel/process.c | 12 ++++--------
arch/powerpc/kernel/traps.c | 3 ++-
4 files changed, 7 insertions(+), 19 deletions(-)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 009fab1..1833fe9 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -347,6 +347,7 @@ struct thread_struct {
.fs = KERNEL_DS, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
+ .fscr = FSCR_TAR | FSCR_EBB \
}
#endif
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119..75f98c8 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -49,7 +49,6 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
- bl __init_FSCR
bl __init_PMU
bl __init_hvmode_206
mtlr r11
@@ -67,7 +66,6 @@ _GLOBAL(__setup_cpu_power8)
_GLOBAL(__restore_cpu_power8)
mflr r11
- bl __init_FSCR
bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
@@ -86,7 +84,6 @@ _GLOBAL(__restore_cpu_power8)
_GLOBAL(__setup_cpu_power9)
mflr r11
- bl __init_FSCR
bl __init_hvmode_206
mtlr r11
beqlr
@@ -102,7 +99,6 @@ _GLOBAL(__setup_cpu_power9)
_GLOBAL(__restore_cpu_power9)
mflr r11
- bl __init_FSCR
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -155,12 +151,6 @@ __init_LPCR:
isync
blr
-__init_FSCR:
- mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
- mtspr SPRN_FSCR,r3
- blr
-
__init_HFSCR:
mfspr r3,SPRN_HFSCR
ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e2f12cb..74ea8db 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1023,18 +1023,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
- u64 fscr = old_thread->fscr & ~FSCR_DSCR;
-
- if (new_thread->dscr_inherit) {
+ if (new_thread->dscr_inherit)
dscr = new_thread->dscr;
- fscr |= FSCR_DSCR;
- }
if (old_thread->dscr != dscr)
mtspr(SPRN_DSCR, dscr);
-
- if (old_thread->fscr != fscr)
- mtspr(SPRN_FSCR, fscr);
}
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
@@ -1045,6 +1038,9 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->ebbrr != new_thread->ebbrr)
mtspr(SPRN_EBBRR, new_thread->ebbrr);
+ if (old_thread->fscr != new_thread->fscr)
+ mtspr(SPRN_FSCR, new_thread->fscr);
+
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9229ba6..667cf78 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1418,7 +1418,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
rd = (instword >> 21) & 0x1f;
current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
- mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ current->thread.fscr |= FSCR_DSCR;
+ mtspr(SPRN_FSCR, current->thread.fscr);
}
/* Read from DSCR (mfspr RT, 0x03) */
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* [PATCH v7 2/3] powerpc: Load Monitor Register Support
2016-06-09 2:31 ` [PATCH v7 0/3] POWER9 Load Monitor Support Michael Neuling
2016-06-09 2:31 ` [PATCH v7 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
@ 2016-06-09 2:31 ` Michael Neuling
2016-06-21 12:27 ` [v7,2/3] " Michael Ellerman
2016-06-09 2:31 ` [PATCH v7 3/3] powerpc: Load Monitor Register Tests Michael Neuling
2 siblings, 1 reply; 28+ messages in thread
From: Michael Neuling @ 2016-06-09 2:31 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, cyrilbur, anton, mikey
From: Jack Miller <jack@codezen.org>
This enables new registers, LMRR and LMSER, that can trigger an EBB in
userspace code when a monitored load (via the new ldmx instruction)
loads memory from a monitored space. This facility is controlled by a
new FSCR bit, LM.
This patch disables the FSCR LM control bit on task init and enables
that bit when a load monitor facility unavailable exception is taken
for using it. On context switch, this bit is then used to determine
whether the two relevant registers are saved and restored. This is
done lazily for performance reasons.
Signed-off-by: Jack Miller <jack@codezen.org>
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
arch/powerpc/include/asm/processor.h | 2 ++
arch/powerpc/include/asm/reg.h | 5 +++++
arch/powerpc/kernel/process.c | 18 ++++++++++++++++++
arch/powerpc/kernel/traps.c | 9 +++++++++
4 files changed, 34 insertions(+)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 1833fe9..ac7670d 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long mmcr2;
unsigned mmcr0;
unsigned used_ebb;
+ unsigned long lmrr;
+ unsigned long lmser;
#endif
};
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a0948f4..ce44fe2 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -282,6 +282,8 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
#define SPRN_LDBAR 0x352 /* LD Base Address Register */
@@ -291,6 +293,7 @@
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
@@ -300,10 +303,12 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_LM __MASK(FSCR_LM_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_LM __MASK(FSCR_LM_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 74ea8db..2e22f60 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1009,6 +1009,14 @@ static inline void save_sprs(struct thread_struct *t)
*/
t->tar = mfspr(SPRN_TAR);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally save Load Monitor registers, if enabled */
+ if (t->fscr & FSCR_LM) {
+ t->lmrr = mfspr(SPRN_LMRR);
+ t->lmser = mfspr(SPRN_LMSER);
+ }
+ }
#endif
}
@@ -1044,6 +1052,16 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally restore Load Monitor registers, if enabled */
+ if (new_thread->fscr & FSCR_LM) {
+ if (old_thread->lmrr != new_thread->lmrr)
+ mtspr(SPRN_LMRR, new_thread->lmrr);
+ if (old_thread->lmser != new_thread->lmser)
+ mtspr(SPRN_LMSER, new_thread->lmser);
+ }
+ }
#endif
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 667cf78..b2e434b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1376,6 +1376,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
+ [FSCR_LM_LG] = "LM",
};
char *facility = "unknown";
u64 value;
@@ -1433,6 +1434,14 @@ void facility_unavailable_exception(struct pt_regs *regs)
emulate_single_step(regs);
}
return;
+ } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * This process has touched LM, so turn it on forever
+ * for this process
+ */
+ current->thread.fscr |= FSCR_LM;
+ mtspr(SPRN_FSCR, current->thread.fscr);
+ return;
}
if ((status < ARRAY_SIZE(facility_strings)) &&
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread
* Re: [v7,2/3] powerpc: Load Monitor Register Support
2016-06-09 2:31 ` [PATCH v7 2/3] powerpc: Load Monitor Register Support Michael Neuling
@ 2016-06-21 12:27 ` Michael Ellerman
0 siblings, 0 replies; 28+ messages in thread
From: Michael Ellerman @ 2016-06-21 12:27 UTC (permalink / raw)
To: Michael Neuling, Jack Miller; +Cc: mikey, linuxppc-dev, cyrilbur, anton
On Thu, 2016-09-06 at 02:31:09 UTC, Michael Neuling wrote:
> From: Jack Miller <jack@codezen.org>
>
> This enables new registers, LMRR and LMSER, that can trigger an EBB in
> userspace code when a monitored load (via the new ldmx instruction)
> loads memory from a monitored space. This facility is controlled by a
> new FSCR bit, LM.
>
> This patch disables the FSCR LM control bit on task init and enables
> that bit when a load monitor facility unavailable exception is taken
> for using it. On context switch, this bit is then used to determine
> whether the two relevant registers are saved and restored. This is
> done lazily for performance reasons.
>
> Signed-off-by: Jack Miller <jack@codezen.org>
> Signed-off-by: Michael Neuling <mikey@neuling.org>
Applied to powerpc next, thanks.
https://git.kernel.org/powerpc/c/bd3ea317fddfd0f2044f94bed2
cheers
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH v7 3/3] powerpc: Load Monitor Register Tests
2016-06-09 2:31 ` [PATCH v7 0/3] POWER9 Load Monitor Support Michael Neuling
2016-06-09 2:31 ` [PATCH v7 1/3] powerpc: Improve FSCR init and context switching Michael Neuling
2016-06-09 2:31 ` [PATCH v7 2/3] powerpc: Load Monitor Register Support Michael Neuling
@ 2016-06-09 2:31 ` Michael Neuling
2016-06-21 12:27 ` [v7,3/3] " Michael Ellerman
2 siblings, 1 reply; 28+ messages in thread
From: Michael Neuling @ 2016-06-09 2:31 UTC (permalink / raw)
To: Michael Ellerman, Jack Miller; +Cc: linuxppc-dev, cyrilbur, anton, mikey
From: Jack Miller <jack@codezen.org>
Adds two tests. One is a simple test to ensure that the new registers
LMRR and LMSER are properly maintained. The other actually uses the
existing EBB test infrastructure to test that LMRR and LMSER behave as
documented.
Signed-off-by: Jack Miller <jack@codezen.org>
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
tools/testing/selftests/powerpc/pmu/ebb/.gitignore | 2 +
tools/testing/selftests/powerpc/pmu/ebb/Makefile | 2 +-
tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c | 143 +++++++++++++++++++++
tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h | 39 ++++++
.../selftests/powerpc/pmu/ebb/ebb_lmr_regs.c | 37 ++++++
tools/testing/selftests/powerpc/reg.h | 5 +
6 files changed, 227 insertions(+), 1 deletion(-)
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
create mode 100644 tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 42bddbe..44b7df1 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -20,3 +20,5 @@ back_to_back_ebbs_test
lost_exception_test
no_handler_test
cycles_with_mmcr2_test
+ebb_lmr
+ebb_lmr_regs
\ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 8d2279c4..6b0453e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -14,7 +14,7 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test \
- cycles_with_mmcr2_test
+ cycles_with_mmcr2_test ebb_lmr ebb_lmr_regs
all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
new file mode 100644
index 0000000..c47ebd5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define SIZE (32 * 1024 * 1024) /* 32M */
+#define LM_SIZE 0 /* Smallest encoding, 32M */
+
+#define SECTIONS 64 /* 1 per bit in LMSER */
+#define SECTION_SIZE (SIZE / SECTIONS)
+#define SECTION_LONGS (SECTION_SIZE / sizeof(long))
+
+static unsigned long *test_mem;
+
+static int lmr_count = 0;
+
+void ebb_lmr_handler(void)
+{
+ lmr_count++;
+}
+
+void ldmx_full_section(unsigned long *mem, int section)
+{
+ unsigned long *ptr;
+ int i;
+
+ for (i = 0; i < SECTION_LONGS; i++) {
+ ptr = &mem[(SECTION_LONGS * section) + i];
+ ldmx((unsigned long) &ptr);
+ ebb_lmr_reset();
+ }
+}
+
+unsigned long section_masks[] = {
+ 0x8000000000000000,
+ 0xFF00000000000000,
+ 0x0000000F70000000,
+ 0x8000000000000001,
+ 0xF0F0F0F0F0F0F0F0,
+ 0x0F0F0F0F0F0F0F0F,
+ 0x0
+};
+
+int ebb_lmr_section_test(unsigned long *mem)
+{
+ unsigned long *mask = section_masks;
+ int i;
+
+ for (; *mask; mask++) {
+ mtspr(SPRN_LMSER, *mask);
+ printf("Testing mask 0x%016lx\n", mfspr(SPRN_LMSER));
+
+ for (i = 0; i < 64; i++) {
+ lmr_count = 0;
+ ldmx_full_section(mem, i);
+ if (*mask & (1UL << (63 - i)))
+ FAIL_IF(lmr_count != SECTION_LONGS);
+ else
+ FAIL_IF(lmr_count);
+ }
+ }
+
+ return 0;
+}
+
+int ebb_lmr(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ setup_ebb_handler(ebb_lmr_handler);
+
+ ebb_global_enable();
+
+ FAIL_IF(posix_memalign((void **)&test_mem, SIZE, SIZE) != 0);
+
+ mtspr(SPRN_LMSER, 0);
+
+ FAIL_IF(mfspr(SPRN_LMSER) != 0);
+
+ mtspr(SPRN_LMRR, ((unsigned long)test_mem | LM_SIZE));
+
+ FAIL_IF(mfspr(SPRN_LMRR) != ((unsigned long)test_mem | LM_SIZE));
+
+ /* Read every single byte to ensure we get no false positives */
+ for (i = 0; i < SECTIONS; i++)
+ ldmx_full_section(test_mem, i);
+
+ FAIL_IF(lmr_count != 0);
+
+ /* Turn on the first section */
+
+ mtspr(SPRN_LMSER, (1UL << 63));
+ FAIL_IF(mfspr(SPRN_LMSER) != (1UL << 63));
+
+ /* Enable LM (BESCR) */
+
+ mtspr(SPRN_BESCR, mfspr(SPRN_BESCR) | BESCR_LME);
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LME));
+
+ ldmx((unsigned long)&test_mem);
+
+ FAIL_IF(lmr_count != 1); // exactly one exception
+ FAIL_IF(mfspr(SPRN_BESCR) & BESCR_LME); // LM now disabled
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LMEO)); // occurred bit set
+
+ printf("Simple LMR EBB OK\n");
+
+ /* This shouldn't cause an EBB since it's been disabled */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 1);
+
+ printf("LMR disable on EBB OK\n");
+
+ ebb_lmr_reset();
+
+ /* This should cause an EBB or reset is broken */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 2);
+
+ printf("LMR reset EBB OK\n");
+
+ ebb_lmr_reset();
+
+ return ebb_lmr_section_test(test_mem);
+}
+
+int main(void)
+{
+ int ret = test_harness(ebb_lmr, "ebb_lmr");
+
+ if (test_mem)
+ free(test_mem);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
new file mode 100644
index 0000000..ef50abd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
@@ -0,0 +1,39 @@
+#ifndef _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+#define _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+
+#include "reg.h"
+
+#ifndef PPC_FEATURE2_ARCH_3_00
+#define PPC_FEATURE2_ARCH_3_00 0x00800000
+#endif
+
+#define lmr_is_supported() have_hwcap2(PPC_FEATURE2_ARCH_3_00)
+
+static inline void ebb_lmr_reset(void)
+{
+ unsigned long bescr = mfspr(SPRN_BESCR);
+ bescr &= ~(BESCR_LMEO);
+ bescr |= BESCR_LME;
+ mtspr(SPRN_BESCR, bescr);
+}
+
+#define LDMX(t, a, b)\
+ (0x7c00026a | \
+ (((t) & 0x1f) << 21) | \
+ (((a) & 0x1f) << 16) | \
+ (((b) & 0x1f) << 11))
+
+static inline unsigned long ldmx(unsigned long address)
+{
+ unsigned long ret;
+
+ asm volatile ("mr 9, %1\r\n"
+ ".long " __stringify(LDMX(9, 0, 9)) "\r\n"
+ "mr %0, 9\r\n":"=r"(ret)
+ :"r"(address)
+ :"r9");
+
+ return ret;
+}
+
+#endif
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
new file mode 100644
index 0000000..aff4241
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define CHECKS 10000
+
+int ebb_lmr_regs(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ ebb_global_enable();
+
+ for (i = 0; i < CHECKS; i++) {
+ mtspr(SPRN_LMRR, i << 25); // skip size and rsvd bits
+ mtspr(SPRN_LMSER, i);
+
+ FAIL_IF(mfspr(SPRN_LMRR) != (i << 25));
+ FAIL_IF(mfspr(SPRN_LMSER) != i);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(ebb_lmr_regs, "ebb_lmr_regs");
+}
diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h
index 65bfdee..fddf368 100644
--- a/tools/testing/selftests/powerpc/reg.h
+++ b/tools/testing/selftests/powerpc/reg.h
@@ -34,6 +34,11 @@
#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */
#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */
+#define BESCR_LME (0x1ul << 34) /* Load Monitor Enable */
+#define BESCR_LMEO (0x1ul << 2) /* Load Monitor Exception Occurred */
+
+#define SPRN_LMRR 813 /* Load Monitor Region Register */
+#define SPRN_LMSER 814 /* Load Monitor Section Enable Register */
#define SPRN_PMC1 771
#define SPRN_PMC2 772
--
2.7.4
^ permalink raw reply related [flat|nested] 28+ messages in thread