* [PATCH 01/14] powerpc/64s/exception: machine check fwnmi remove HV case
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 02/14] powerpc/64s/exception: machine check remove bitrotted comment Nicholas Piggin
` (12 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
fwnmi does not trigger in HV mode, so remove always-true feature test.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 53c1b0a2ebce..db7ef8c8566f 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1026,9 +1026,8 @@ TRAMP_REAL_BEGIN(machine_check_pSeries)
.globl machine_check_fwnmi
machine_check_fwnmi:
EXCEPTION_PROLOG_0 PACA_EXMC
-BEGIN_FTR_SECTION
b machine_check_common_early
-END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
+
machine_check_pSeries_0:
EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
/*
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 02/14] powerpc/64s/exception: machine check remove bitrotted comment
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
2019-07-03 7:54 ` [PATCH 01/14] powerpc/64s/exception: machine check fwnmi remove HV case Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 03/14] powerpc/64s/exception: machine check fix KVM guest test Nicholas Piggin
` (11 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 4 ----
1 file changed, 4 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index db7ef8c8566f..e8734a1dfdb9 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -933,10 +933,6 @@ EXC_COMMON_BEGIN(system_reset_common)
EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
- /* This is moved out of line as it can be patched by FW, but
- * some code path might still want to branch into the original
- * vector
- */
EXCEPTION_PROLOG_0 PACA_EXMC
BEGIN_FTR_SECTION
b machine_check_common_early
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 03/14] powerpc/64s/exception: machine check fix KVM guest test
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
2019-07-03 7:54 ` [PATCH 01/14] powerpc/64s/exception: machine check fwnmi remove HV case Nicholas Piggin
2019-07-03 7:54 ` [PATCH 02/14] powerpc/64s/exception: machine check remove bitrotted comment Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 04/14] powerpc/64s/exception: machine check adjust RFI target Nicholas Piggin
` (10 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
The machine_check_handle_early hypervisor guest test is skipped if
!HVMODE or MSR[HV]=0, which is wrong for PR or nested hypervisors
that could be running a guest in this state.
Test HSTATE_IN_GUEST up front and use that to branch out to the KVM
handler, then MSR[PR] alone can test for this kernel's userspace.
This matches all other interrupt handling.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 33 +++++++++++-----------------
1 file changed, 13 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e8734a1dfdb9..5789a00691f9 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1108,11 +1108,8 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
-BEGIN_FTR_SECTION
- b 4f
-END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
-#ifdef CONFIG_PPC_P7_NAP
+#ifdef CONFIG_PPC_P7_NAP
/*
* Check if thread was in power saving mode. We come here when any
* of the following is true:
@@ -1128,30 +1125,26 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
- /*
- * Check if we are coming from hypervisor userspace. If yes then we
- * continue in host kernel in V mode to deliver the MC event.
- */
- rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
- beq 5f
-4: andi. r11,r12,MSR_PR /* See if coming from user. */
- bne 9f /* continue in V mode if we are. */
-
-5:
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-BEGIN_FTR_SECTION
/*
- * We are coming from kernel context. Check if we are coming from
- * guest. if yes, then we can continue. We will fall through
- * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
+ * Check if we are coming from guest. If yes, then run the normal
+ * exception handler which will take the do_kvm_200->kvmppc_interrupt
+ * branch to deliver the MC event to guest.
*/
lbz r11,HSTATE_IN_GUEST(r13)
cmpwi r11,0 /* Check if coming from guest */
bne 9f /* continue if we are. */
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
#endif
+
+ /*
+ * Check if we are coming from userspace. If yes, then run the normal
+ * exception handler which will deliver the MC event to this kernel.
+ */
+ andi. r11,r12,MSR_PR /* See if coming from user. */
+ bne 9f /* continue in V mode if we are. */
+
/*
- * At this point we are not sure about what context we come from.
+ * At this point we are coming from kernel context.
* Queue up the MCE event and return from the interrupt.
* But before that, check if this is an un-recoverable exception.
* If yes, then stay on emergency stack and panic.
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 04/14] powerpc/64s/exception: machine check adjust RFI target
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (2 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 03/14] powerpc/64s/exception: machine check fix KVM guest test Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 05/14] powerpc/64s/exception: machine check pseries should always run the early handler Nicholas Piggin
` (9 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
The host kernel delivery case for powernv does RFI_TO_USER_OR_KERNEL,
but should just use RFI_TO_KERNEL which makes it clear this is not a
user case.
This is not a bug because RFI_TO_USER_OR_KERNEL deals with kernel
returns just fine.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 5789a00691f9..0186a44bb981 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1184,7 +1184,7 @@ BEGIN_FTR_SECTION
*/
bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP
- RFI_TO_USER_OR_KERNEL
+ RFI_TO_KERNEL
FTR_SECTION_ELSE
/*
* pSeries: Return from MC interrupt. Before that stay on emergency
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 05/14] powerpc/64s/exception: machine check pseries should always run the early handler
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (3 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 04/14] powerpc/64s/exception: machine check adjust RFI target Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 06/14] powerpc/64s/exception: machine check remove machine_check_pSeries_0 branch Nicholas Piggin
` (8 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
Now that pseries with fwnmi registered runs the early machine check
handler, there is no good reason to special case the non-fwnmi case
and skip the early handler. Reducing the code and number of paths is
a top priority for asm code, it's better to handle this in C where
possible (and the pseries early handler is a no-op if fwnmi is not
registered).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 4 ----
1 file changed, 4 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 0186a44bb981..a69ceb28cf4c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -934,11 +934,7 @@ EXC_COMMON_BEGIN(system_reset_common)
EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
EXCEPTION_PROLOG_0 PACA_EXMC
-BEGIN_FTR_SECTION
b machine_check_common_early
-FTR_SECTION_ELSE
- b machine_check_pSeries_0
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
TRAMP_REAL_BEGIN(machine_check_common_early)
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 06/14] powerpc/64s/exception: machine check remove machine_check_pSeries_0 branch
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (4 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 05/14] powerpc/64s/exception: machine check pseries should always run the early handler Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 07/14] powerpc/64s/exception: machine check use correct cfar for late handler Nicholas Piggin
` (7 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
This label has only one caller, so unwind the branch and move it
inline. The location of the comment is adjusted to match similar
one in system reset.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a69ceb28cf4c..54ca2b189d43 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1014,20 +1014,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
b 1b
b . /* prevent speculative execution */
-TRAMP_REAL_BEGIN(machine_check_pSeries)
- .globl machine_check_fwnmi
-machine_check_fwnmi:
+#ifdef CONFIG_PPC_PSERIES
+TRAMP_REAL_BEGIN(machine_check_fwnmi)
EXCEPTION_PROLOG_0 PACA_EXMC
b machine_check_common_early
-
-machine_check_pSeries_0:
- EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
- /*
- * MSR_RI is not enabled, because PACA_EXMC is being used, so a
- * nested machine check corrupts it. machine_check_common enables
- * MSR_RI.
- */
- EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
+#endif
TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
@@ -1197,7 +1188,13 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
/* Deliver the machine check to host kernel in V mode. */
MACHINE_CHECK_HANDLER_WINDUP
EXCEPTION_PROLOG_0 PACA_EXMC
- b machine_check_pSeries_0
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
+ EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
+ /*
+ * MSR_RI is not enabled, because PACA_EXMC is being used, so a
+ * nested machine check corrupts it. machine_check_common enables
+ * MSR_RI.
+ */
EXC_COMMON_BEGIN(unrecover_mce)
/* Invoke machine_check_exception to print MCE event and panic. */
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 07/14] powerpc/64s/exception: machine check use correct cfar for late handler
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (5 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 06/14] powerpc/64s/exception: machine check remove machine_check_pSeries_0 branch Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 08/14] powerpc/64s/exception: machine check pseries should skip the late handler for host kernel MCEs Nicholas Piggin
` (6 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
Bare metal machine checks run an "early" handler in real mode before
running the main handler which reports the event.
The main handler runs exactly as a normal interrupt handler, after the
"windup" which sets registers back as they were at interrupt entry.
CFAR does not get restored by the windup code, so that will be wrong
when the handler is run.
Restore the CFAR to the saved value before running the late handler.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 54ca2b189d43..f2c24a4ae723 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1186,6 +1186,10 @@ FTR_SECTION_ELSE
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
9:
/* Deliver the machine check to host kernel in V mode. */
+BEGIN_FTR_SECTION
+ ld r10,ORIG_GPR3(r1)
+ mtspr SPRN_CFAR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MACHINE_CHECK_HANDLER_WINDUP
EXCEPTION_PROLOG_0 PACA_EXMC
EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 08/14] powerpc/64s/exception: machine check pseries should skip the late handler for host kernel MCEs
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (6 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 07/14] powerpc/64s/exception: machine check use correct cfar for late handler Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 09/14] powerpc/64s/exception: machine check restructure to reuse common macros Nicholas Piggin
` (5 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
The powernv machine check handler copes with taking a MCE from one of
three contexts, guest, host kernel, and host user. In each case the
early handler runs first on a special stack. Then:
- The guest case branches to the KVM interrupt handler (via standard
interrupt macros).
- The host user case will run the "late" handler which is like a
normal interrupt that runs in virtual mode and uses the regular
kernel stack.
- The host kernel case queues the event and schedules it for
processing with irq work.
The last case is important, it must not enable virtual memory because
the MMU state may not be set up to deal with that (e.g., SLB might be
clear), it must not use the regular kernel stack for similar reasons
(e.g., might be in OPAL with OPAL stack in r1), and the kernel does
not expect anything to touch its stack if interrupts are disabled.
The pseries handler does not do this queueing, but instead it always
runs the late handler for host MCEs, which has some of the same
problems.
Rather than special casing pseries, just have it follow what powernv
is doing and queue up these events.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 22 ++--------------------
1 file changed, 2 insertions(+), 20 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f2c24a4ae723..ac7b5bb614d9 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1163,7 +1163,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
cmpdi r3,0 /* see if we handled MCE successfully */
beq 1b /* if !handled then panic */
-BEGIN_FTR_SECTION
+
/*
* Return from MC interrupt.
* Queue up the MCE event so that we can log it later, while
@@ -1172,18 +1172,7 @@ BEGIN_FTR_SECTION
bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP
RFI_TO_KERNEL
-FTR_SECTION_ELSE
- /*
- * pSeries: Return from MC interrupt. Before that stay on emergency
- * stack and call machine_check_exception to log the MCE event.
- */
- LOAD_HANDLER(r10,mce_return)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- RFI_TO_KERNEL
- b .
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+
9:
/* Deliver the machine check to host kernel in V mode. */
BEGIN_FTR_SECTION
@@ -1212,13 +1201,6 @@ EXC_COMMON_BEGIN(unrecover_mce)
bl unrecoverable_exception
b 1b
-EXC_COMMON_BEGIN(mce_return)
- /* Invoke machine_check_exception to print MCE event and return. */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception
- MACHINE_CHECK_HANDLER_WINDUP
- RFI_TO_KERNEL
- b .
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
EXCEPTION_PROLOG_0 PACA_EXGEN
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 09/14] powerpc/64s/exception: machine check restructure to reuse common macros
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (7 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 08/14] powerpc/64s/exception: machine check pseries should skip the late handler for host kernel MCEs Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 10/14] powerpc/64s/exception: machine check move tramp code Nicholas Piggin
` (4 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
Follow the pattern of sreset and HMI handlers more closely: use
EXCEPTION_PROLOG_COMMON_1 rather than open-coding it, and run the
handler at the relocated location.
This helps later simplification and code sharing.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 71 ++++++++++++++--------------
1 file changed, 36 insertions(+), 35 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ac7b5bb614d9..3cc5ee6e4b56 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -934,17 +934,23 @@ EXC_COMMON_BEGIN(system_reset_common)
EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
EXCEPTION_PROLOG_0 PACA_EXMC
- b machine_check_common_early
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 1, 1, 0
+ mfctr r10 /* save ctr, even for !RELOCATABLE */
+ BRANCH_TO_C000(r11, machine_check_early_common)
+ /*
+ * MSR_RI is not enabled, because PACA_EXMC is being used, so a
+ * nested machine check corrupts it. machine_check_common enables
+ * MSR_RI.
+ */
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
-TRAMP_REAL_BEGIN(machine_check_common_early)
- EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0, 0, 0
+
+EXC_COMMON_BEGIN(machine_check_early_common)
+ mtctr r10 /* Restore ctr */
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
+
/*
- * Register contents:
- * R13 = PACA
- * R9 = CR
- * Original R9 to R13 is saved on PACA_EXMC
- *
* Switch to mc_emergency stack and handle re-entrancy (we limit
* the nested MCE upto level 4 to avoid stack overflow).
* Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
@@ -965,32 +971,30 @@ TRAMP_REAL_BEGIN(machine_check_common_early)
* the machine check is handled then the idle wakeup code is called
* to restore state.
*/
- mr r11,r1 /* Save r1 */
lhz r10,PACA_IN_MCE(r13)
cmpwi r10,0 /* Are we in nested machine check */
- bne 0f /* Yes, we are. */
- /* First machine check entry */
- ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
-0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
+ cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */
addi r10,r10,1 /* increment paca->in_mce */
sth r10,PACA_IN_MCE(r13)
+
+ mr r10,r1 /* Save r1 */
+ bne 1f
+ /* First machine check entry */
+ ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
+1: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
/* Limit nested MCE to level 4 to avoid stack overflow */
- cmpwi r10,MAX_MCE_DEPTH
- bgt 2f /* Check if we hit limit of 4 */
- std r11,GPR1(r1) /* Save r1 on the stack. */
- std r11,0(r1) /* make stack chain pointer */
- mfspr r11,SPRN_SRR0 /* Save SRR0 */
- std r11,_NIP(r1)
- mfspr r11,SPRN_SRR1 /* Save SRR1 */
- std r11,_MSR(r1)
- mfspr r11,SPRN_DAR /* Save DAR */
- std r11,_DAR(r1)
- mfspr r11,SPRN_DSISR /* Save DSISR */
- std r11,_DSISR(r1)
- std r9,_CCR(r1) /* Save CR in stackframe */
+ bge cr1,2f /* Check if we hit limit of 4 */
+
+ EXCEPTION_PROLOG_COMMON_1()
/* We don't touch AMR here, we never go to virtual mode */
- /* Save r9 through r13 from EXMC save area to stack frame. */
EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
+ EXCEPTION_PROLOG_COMMON_3(0x200)
+
+ ld r3,PACA_EXMC+EX_DAR(r13)
+ lwz r4,PACA_EXMC+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+
mfmsr r11 /* get MSR value */
BEGIN_FTR_SECTION
ori r11,r11,MSR_ME /* turn on ME bit */
@@ -1016,8 +1020,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
#ifdef CONFIG_PPC_PSERIES
TRAMP_REAL_BEGIN(machine_check_fwnmi)
+ /* See comment at machine_check exception, don't turn on RI */
EXCEPTION_PROLOG_0 PACA_EXMC
- b machine_check_common_early
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 1, 1, 0
+ mfctr r10 /* save ctr */
+ BRANCH_TO_C000(r11, machine_check_early_common)
#endif
TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
@@ -1088,8 +1095,6 @@ EXC_COMMON_BEGIN(machine_check_idle_common)
* ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
*/
EXC_COMMON_BEGIN(machine_check_handle_early)
- std r0,GPR0(r1) /* Save r0 */
- EXCEPTION_PROLOG_COMMON_3(0x200)
bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
@@ -1180,14 +1185,10 @@ BEGIN_FTR_SECTION
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MACHINE_CHECK_HANDLER_WINDUP
+ /* See comment at machine_check exception, don't turn on RI */
EXCEPTION_PROLOG_0 PACA_EXMC
EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
- /*
- * MSR_RI is not enabled, because PACA_EXMC is being used, so a
- * nested machine check corrupts it. machine_check_common enables
- * MSR_RI.
- */
EXC_COMMON_BEGIN(unrecover_mce)
/* Invoke machine_check_exception to print MCE event and panic. */
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 10/14] powerpc/64s/exception: machine check move tramp code
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (8 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 09/14] powerpc/64s/exception: machine check restructure to reuse common macros Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 11/14] powerpc/64s/exception: simplify machine check early path Nicholas Piggin
` (3 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
Following convention, move the tramp code (unrelocated) above the
common handlers (relocated).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 3cc5ee6e4b56..9bb8c89e9e77 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -945,6 +945,17 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
+#ifdef CONFIG_PPC_PSERIES
+TRAMP_REAL_BEGIN(machine_check_fwnmi)
+ /* See comment at machine_check exception, don't turn on RI */
+ EXCEPTION_PROLOG_0 PACA_EXMC
+ EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 1, 1, 0
+ mfctr r10 /* save ctr */
+ BRANCH_TO_C000(r11, machine_check_early_common)
+#endif
+
+TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
+
EXC_COMMON_BEGIN(machine_check_early_common)
mtctr r10 /* Restore ctr */
mfspr r11,SPRN_SRR0
@@ -1018,17 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
b 1b
b . /* prevent speculative execution */
-#ifdef CONFIG_PPC_PSERIES
-TRAMP_REAL_BEGIN(machine_check_fwnmi)
- /* See comment at machine_check exception, don't turn on RI */
- EXCEPTION_PROLOG_0 PACA_EXMC
- EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 1, 1, 0
- mfctr r10 /* save ctr */
- BRANCH_TO_C000(r11, machine_check_early_common)
-#endif
-
-TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
-
EXC_COMMON_BEGIN(machine_check_common)
/*
* Machine check is different because we use a different
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 11/14] powerpc/64s/exception: simplify machine check early path
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (9 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 10/14] powerpc/64s/exception: machine check move tramp code Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 12/14] powerpc/64s/exception: machine check move unrecoverable handling out of line Nicholas Piggin
` (2 subsequent siblings)
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
machine_check_handle_early_common can reach machine_check_handle_early
directly now that it runs at the relocated address, so just branch
directly.
The rfi sequence is required to enable MSR[ME] but that step is moved
into a helper function, making the code easier to follow.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 30 +++++++++++++++++++---------
1 file changed, 21 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9bb8c89e9e77..fcc3e6d5807f 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1006,16 +1006,13 @@ EXC_COMMON_BEGIN(machine_check_early_common)
std r3,_DAR(r1)
std r4,_DSISR(r1)
- mfmsr r11 /* get MSR value */
+ li r10,MSR_RI
+ mtmsrd r10,1
BEGIN_FTR_SECTION
- ori r11,r11,MSR_ME /* turn on ME bit */
+ bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- ori r11,r11,MSR_RI /* turn on RI bit */
- LOAD_HANDLER(r12, machine_check_handle_early)
-1: mtspr SPRN_SRR0,r12
- mtspr SPRN_SRR1,r11
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
+ b machine_check_handle_early
+
2:
/* Stack overflow. Stay on emergency stack and panic.
* Keep the ME bit off while panic-ing, so that if we hit
@@ -1026,7 +1023,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
LOAD_HANDLER(r12, unrecover_mce)
li r10,MSR_ME
andc r11,r11,r10 /* Turn off MSR_ME */
- b 1b
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r11
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
EXC_COMMON_BEGIN(machine_check_common)
@@ -2270,6 +2269,19 @@ CLOSE_FIXED_SECTION(virt_trampolines);
USE_TEXT_SECTION()
+enable_machine_check:
+ mflr r0
+ bcl 20,31,$+4
+0: mflr r3
+ addi r3,r3,(1f - 0b)
+ mtspr SPRN_SRR0,r3
+ mfmsr r3
+ ori r3,r3,MSR_ME
+ mtspr SPRN_SRR1,r3
+ RFI_TO_KERNEL
+1: mtlr r0
+ blr
+
/*
* Hash table stuff
*/
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 12/14] powerpc/64s/exception: machine check move unrecoverable handling out of line
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (10 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 11/14] powerpc/64s/exception: simplify machine check early path Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 13/14] powerpc/64s/exception: untangle early machine check handler branch Nicholas Piggin
2019-07-03 7:54 ` [PATCH 14/14] powerpc/64s/exception: machine check improve labels and comments Nicholas Piggin
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
Similarly to the previous change, all callers of the unrecoverable
handler run relocated so can reach it with a direct branch. This makes
it easy to move out of line, which makes the "normal" path less
cluttered and easier to follow.
MSR[ME] manipulation still requires the rfi, so that is moved out of
line to its own function.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 82 +++++++++++++---------------
1 file changed, 39 insertions(+), 43 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index fcc3e6d5807f..88bcdd5b2dc7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -992,9 +992,9 @@ EXC_COMMON_BEGIN(machine_check_early_common)
bne 1f
/* First machine check entry */
ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
-1: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- /* Limit nested MCE to level 4 to avoid stack overflow */
- bge cr1,2f /* Check if we hit limit of 4 */
+1: /* Limit nested MCE to level 4 to avoid stack overflow */
+ bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */
+ subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
EXCEPTION_PROLOG_COMMON_1()
/* We don't touch AMR here, we never go to virtual mode */
@@ -1013,21 +1013,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
b machine_check_handle_early
-2:
- /* Stack overflow. Stay on emergency stack and panic.
- * Keep the ME bit off while panic-ing, so that if we hit
- * another machine check we checkstop.
- */
- addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
- ld r11,PACAKMSR(r13)
- LOAD_HANDLER(r12, unrecover_mce)
- li r10,MSR_ME
- andc r11,r11,r10 /* Turn off MSR_ME */
- mtspr SPRN_SRR0,r12
- mtspr SPRN_SRR1,r11
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
EXC_COMMON_BEGIN(machine_check_common)
/*
* Machine check is different because we use a different
@@ -1141,32 +1126,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
* If yes, then stay on emergency stack and panic.
*/
andi. r11,r12,MSR_RI
- bne 2f
-1: mfspr r11,SPRN_SRR0
- LOAD_HANDLER(r10,unrecover_mce)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- /*
- * We are going down. But there are chances that we might get hit by
- * another MCE during panic path and we may run into unstable state
- * with no way out. Hence, turn ME bit off while going down, so that
- * when another MCE is hit during panic path, system will checkstop
- * and hypervisor will get restarted cleanly by SP.
- */
- li r3,MSR_ME
- andc r10,r10,r3 /* Turn off MSR_ME */
- mtspr SPRN_SRR1,r10
- RFI_TO_KERNEL
- b .
-2:
+ beq unrecoverable_mce
+
/*
* Check if we have successfully handled/recovered from error, if not
* then stay on emergency stack and panic.
*/
ld r3,RESULT(r1) /* Load result */
cmpdi r3,0 /* see if we handled MCE successfully */
-
- beq 1b /* if !handled then panic */
+ beq unrecoverable_mce /* if !handled then panic */
/*
* Return from MC interrupt.
@@ -1189,17 +1157,31 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
-EXC_COMMON_BEGIN(unrecover_mce)
+EXC_COMMON_BEGIN(unrecoverable_mce)
+ /*
+ * We are going down. But there are chances that we might get hit by
+ * another MCE during panic path and we may run into unstable state
+ * with no way out. Hence, turn ME bit off while going down, so that
+ * when another MCE is hit during panic path, system will checkstop
+ * and hypervisor will get restarted cleanly by SP.
+ */
+ bl disable_machine_check
+ ld r10,PACAKMSR(r13)
+ li r3,MSR_ME
+ andc r10,r10,r3
+ mtmsrd r10
+
/* Invoke machine_check_exception to print MCE event and panic. */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
+
/*
- * We will not reach here. Even if we did, there is no way out. Call
- * unrecoverable_exception and die.
+ * We will not reach here. Even if we did, there is no way out.
+ * Call unrecoverable_exception and die.
*/
-1: addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl unrecoverable_exception
- b 1b
+ b .
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
@@ -2282,6 +2264,20 @@ enable_machine_check:
1: mtlr r0
blr
+disable_machine_check:
+ mflr r0
+ bcl 20,31,$+4
+0: mflr r3
+ addi r3,r3,(1f - 0b)
+ mtspr SPRN_SRR0,r3
+ mfmsr r3
+ li r4,MSR_ME
+ andc r3,r3,r4
+ mtspr SPRN_SRR1,r3
+ RFI_TO_KERNEL
+1: mtlr r0
+ blr
+
/*
* Hash table stuff
*/
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 13/14] powerpc/64s/exception: untangle early machine check handler branch
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (11 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 12/14] powerpc/64s/exception: machine check move unrecoverable handling out of line Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
2019-07-03 7:54 ` [PATCH 14/14] powerpc/64s/exception: machine check improve labels and comments Nicholas Piggin
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
machine_check_early_common now branches to machine_check_handle_early
which is its only caller.
Move interleaving code out of the way, and remove the branch.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 129 +++++++++++++--------------
1 file changed, 62 insertions(+), 67 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 88bcdd5b2dc7..8e16578cf932 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -956,6 +956,16 @@ TRAMP_REAL_BEGIN(machine_check_fwnmi)
TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
+#define MACHINE_CHECK_HANDLER_WINDUP \
+ /* Clear MSR_RI before setting SRR0 and SRR1. */\
+ li r9,0; \
+ mtmsrd r9,1; /* Clear MSR_RI */ \
+ /* Decrement paca->in_mce now RI is clear. */ \
+ lhz r12,PACA_IN_MCE(r13); \
+ subi r12,r12,1; \
+ sth r12,PACA_IN_MCE(r13); \
+ EXCEPTION_RESTORE_REGS EXC_STD
+
EXC_COMMON_BEGIN(machine_check_early_common)
mtctr r10 /* Restore ctr */
mfspr r11,SPRN_SRR0
@@ -1011,74 +1021,7 @@ EXC_COMMON_BEGIN(machine_check_early_common)
BEGIN_FTR_SECTION
bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- b machine_check_handle_early
-EXC_COMMON_BEGIN(machine_check_common)
- /*
- * Machine check is different because we use a different
- * save area: PACA_EXMC instead of PACA_EXGEN.
- */
- EXCEPTION_COMMON(PACA_EXMC, 0x200)
- FINISH_NAP
- RECONCILE_IRQ_STATE(r10, r11)
- ld r3,PACA_EXMC+EX_DAR(r13)
- lwz r4,PACA_EXMC+EX_DSISR(r13)
- /* Enable MSR_RI when finished with PACA_EXMC */
- li r10,MSR_RI
- mtmsrd r10,1
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception
- b ret_from_except
-
-#define MACHINE_CHECK_HANDLER_WINDUP \
- /* Clear MSR_RI before setting SRR0 and SRR1. */\
- li r9,0; \
- mtmsrd r9,1; /* Clear MSR_RI */ \
- /* Decrement paca->in_mce now RI is clear. */ \
- lhz r12,PACA_IN_MCE(r13); \
- subi r12,r12,1; \
- sth r12,PACA_IN_MCE(r13); \
- EXCEPTION_RESTORE_REGS EXC_STD
-
-#ifdef CONFIG_PPC_P7_NAP
-/*
- * This is an idle wakeup. Low level machine check has already been
- * done. Queue the event then call the idle code to do the wake up.
- */
-EXC_COMMON_BEGIN(machine_check_idle_common)
- bl machine_check_queue_event
-
- /*
- * We have not used any non-volatile GPRs here, and as a rule
- * most exception code including machine check does not.
- * Therefore PACA_NAPSTATELOST does not need to be set. Idle
- * wakeup will restore volatile registers.
- *
- * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
- *
- * Then decrement MCE nesting after finishing with the stack.
- */
- ld r3,_MSR(r1)
- ld r4,_LINK(r1)
-
- lhz r11,PACA_IN_MCE(r13)
- subi r11,r11,1
- sth r11,PACA_IN_MCE(r13)
-
- mtlr r4
- rlwinm r10,r3,47-31,30,31
- cmpwi cr1,r10,2
- bltlr cr1 /* no state loss, return to idle caller */
- b idle_return_gpr_loss
-#endif
- /*
- * Handle machine check early in real mode. We come here with
- * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
- */
-EXC_COMMON_BEGIN(machine_check_handle_early)
bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
@@ -1157,6 +1100,58 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 1, 1, 0
EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
+EXC_COMMON_BEGIN(machine_check_common)
+ /*
+ * Machine check is different because we use a different
+ * save area: PACA_EXMC instead of PACA_EXGEN.
+ */
+ EXCEPTION_COMMON(PACA_EXMC, 0x200)
+ FINISH_NAP
+ RECONCILE_IRQ_STATE(r10, r11)
+ ld r3,PACA_EXMC+EX_DAR(r13)
+ lwz r4,PACA_EXMC+EX_DSISR(r13)
+ /* Enable MSR_RI when finished with PACA_EXMC */
+ li r10,MSR_RI
+ mtmsrd r10,1
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+ bl save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl machine_check_exception
+ b ret_from_except
+
+#ifdef CONFIG_PPC_P7_NAP
+/*
+ * This is an idle wakeup. Low level machine check has already been
+ * done. Queue the event then call the idle code to do the wake up.
+ */
+EXC_COMMON_BEGIN(machine_check_idle_common)
+ bl machine_check_queue_event
+
+ /*
+ * We have not used any non-volatile GPRs here, and as a rule
+ * most exception code including machine check does not.
+ * Therefore PACA_NAPSTATELOST does not need to be set. Idle
+ * wakeup will restore volatile registers.
+ *
+ * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
+ *
+ * Then decrement MCE nesting after finishing with the stack.
+ */
+ ld r3,_MSR(r1)
+ ld r4,_LINK(r1)
+
+ lhz r11,PACA_IN_MCE(r13)
+ subi r11,r11,1
+ sth r11,PACA_IN_MCE(r13)
+
+ mtlr r4
+ rlwinm r10,r3,47-31,30,31
+ cmpwi cr1,r10,2
+ bltlr cr1 /* no state loss, return to idle caller */
+ b idle_return_gpr_loss
+#endif
+
EXC_COMMON_BEGIN(unrecoverable_mce)
/*
* We are going down. But there are chances that we might get hit by
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 14/14] powerpc/64s/exception: machine check improve labels and comments
2019-07-03 7:54 [PATCH 00/14] powerpc/64s/exception: machine check cleanup series Nicholas Piggin
` (12 preceding siblings ...)
2019-07-03 7:54 ` [PATCH 13/14] powerpc/64s/exception: untangle early machine check handler branch Nicholas Piggin
@ 2019-07-03 7:54 ` Nicholas Piggin
13 siblings, 0 replies; 15+ messages in thread
From: Nicholas Piggin @ 2019-07-03 7:54 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Mahesh Salgaonkar, Nicholas Piggin
Short forward and backward branches can be given number labels,
but larger significant divergences in code path a more readable
if they're given descriptive names.
Also adjusts a comment to account for guest delivery.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/kernel/exceptions-64s.S | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 8e16578cf932..0676a0f4c2d5 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1052,7 +1052,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
*/
lbz r11,HSTATE_IN_GUEST(r13)
cmpwi r11,0 /* Check if coming from guest */
- bne 9f /* continue if we are. */
+ bne mce_deliver /* continue if we are. */
#endif
/*
@@ -1060,7 +1060,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
* exception handler which will deliver the MC event to this kernel.
*/
andi. r11,r12,MSR_PR /* See if coming from user. */
- bne 9f /* continue in V mode if we are. */
+ bne mce_deliver /* continue in V mode if we are. */
/*
* At this point we are coming from kernel context.
@@ -1088,8 +1088,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
MACHINE_CHECK_HANDLER_WINDUP
RFI_TO_KERNEL
-9:
- /* Deliver the machine check to host kernel in V mode. */
+mce_deliver:
+ /*
+ * This is a host user or guest MCE. Restore all registers, then
+ * run the "late" handler. For host user, this will run the
+ * machine_check_exception handler in virtual mode like a normal
+ * interrupt handler. For guest, this will trigger the KVM test
+ * and branch to the KVM interrupt similarly to other interrupts.
+ */
BEGIN_FTR_SECTION
ld r10,ORIG_GPR3(r1)
mtspr SPRN_CFAR,r10
--
2.20.1
^ permalink raw reply related [flat|nested] 15+ messages in thread