* [PATCH v2 01/11] powerpc: Add #defs for paca->soft_enabled flags
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 02/11] powerpc: move set_soft_enabled() and rename Madhavan Srinivasan
` (9 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
Two #defs IRQ_DISABLE_LEVEL_NONE and IRQ_DISABLE_LEVEL_LINUX
are added to be used when updating paca->soft_enabled.
Replace the hardcoded values used when updating
paca->soft_enabled with IRQ_DISABLE_MASK_* #def.
No logic change.
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/exception-64s.h | 2 +-
arch/powerpc/include/asm/hw_irq.h | 21 ++++++++++++++-------
arch/powerpc/include/asm/irqflags.h | 6 +++---
arch/powerpc/include/asm/kvm_ppc.h | 2 +-
arch/powerpc/kernel/entry_64.S | 16 ++++++++--------
arch/powerpc/kernel/exceptions-64e.S | 6 +++---
arch/powerpc/kernel/head_64.S | 5 +++--
arch/powerpc/kernel/idle_book3e.S | 3 ++-
arch/powerpc/kernel/idle_power4.S | 3 ++-
arch/powerpc/kernel/irq.c | 9 +++++----
arch/powerpc/kernel/process.c | 3 ++-
arch/powerpc/kernel/setup_64.c | 3 +++
arch/powerpc/kernel/time.c | 2 +-
arch/powerpc/mm/hugetlbpage.c | 2 +-
arch/powerpc/perf/core-book3s.c | 2 +-
15 files changed, 50 insertions(+), 35 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index bed66e5743b3..38272fe8a757 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -408,7 +408,7 @@ label##_relon_hv: \
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
- cmpwi r10,0; \
+ cmpwi r10,IRQ_DISABLE_MASK_LINUX; \
li r10,SOFTEN_VALUE_##vec; \
beq masked_##h##interrupt
#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c7d82ff62a33..1fcc2fd7275a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -27,6 +27,12 @@
#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
#define PACA_IRQ_HMI 0x20
+/*
+ * flags for paca->soft_enabled
+ */
+#define IRQ_DISABLE_MASK_NONE 1
+#define IRQ_DISABLE_MASK_LINUX 0
+
#endif /* CONFIG_PPC64 */
#ifndef __ASSEMBLY__
@@ -58,9 +64,10 @@ static inline unsigned long arch_local_irq_disable(void)
unsigned long flags, zero;
asm volatile(
- "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
+ "li %1,%3; lbz %0,%2(13); stb %1,%2(13)"
: "=r" (flags), "=&r" (zero)
- : "i" (offsetof(struct paca_struct, soft_enabled))
+ : "i" (offsetof(struct paca_struct, soft_enabled)),\
+ "i" (IRQ_DISABLE_MASK_LINUX)
: "memory");
return flags;
@@ -70,7 +77,7 @@ extern void arch_local_irq_restore(unsigned long);
static inline void arch_local_irq_enable(void)
{
- arch_local_irq_restore(1);
+ arch_local_irq_restore(IRQ_DISABLE_MASK_NONE);
}
static inline unsigned long arch_local_irq_save(void)
@@ -80,7 +87,7 @@ static inline unsigned long arch_local_irq_save(void)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return flags == 0;
+ return flags == IRQ_DISABLE_MASK_LINUX;
}
static inline bool arch_irqs_disabled(void)
@@ -100,9 +107,9 @@ static inline bool arch_irqs_disabled(void)
u8 _was_enabled; \
__hard_irq_disable(); \
_was_enabled = local_paca->soft_enabled; \
- local_paca->soft_enabled = 0; \
+ local_paca->soft_enabled = IRQ_DISABLE_MASK_LINUX;\
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
- if (_was_enabled) \
+ if (_was_enabled == IRQ_DISABLE_MASK_NONE) \
trace_hardirqs_off(); \
} while(0)
@@ -125,7 +132,7 @@ static inline void may_hard_irq_enable(void)
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
{
- return !regs->softe;
+ return (regs->softe == IRQ_DISABLE_MASK_LINUX);
}
extern bool prep_irq_for_idle(void);
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index f2149066fe5d..d0ed2a7d7d10 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -48,8 +48,8 @@
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
- cmpwi cr0,__rA,0; \
- li __rA,0; \
+ cmpwi cr0,__rA,IRQ_DISABLE_MASK_LINUX;\
+ li __rA,IRQ_DISABLE_MASK_LINUX; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
beq 44f; \
@@ -63,7 +63,7 @@
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACAIRQHAPPENED(r13); \
- li __rB,0; \
+ li __rB,IRQ_DISABLE_MASK_LINUX; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \
stb __rB,PACASOFTIRQEN(r13); \
stb __rA,PACAIRQHAPPENED(r13)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 2544edabe7f3..740ee309cea8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -707,7 +707,7 @@ static inline void kvmppc_fix_ee_before_entry(void)
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
- local_paca->soft_enabled = 1;
+ local_paca->soft_enabled = IRQ_DISABLE_MASK_NONE;
#endif
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 5afd03e5e8b8..aef7b64cbbeb 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -131,7 +131,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
*/
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
lbz r10,PACASOFTIRQEN(r13)
- xori r10,r10,1
+ xori r10,r10,IRQ_DISABLE_MASK_NONE
1: tdnei r10,0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
@@ -147,7 +147,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
/* We do need to set SOFTE in the stack frame or the return
* from interrupt will be painful
*/
- li r10,1
+ li r10,IRQ_DISABLE_MASK_NONE
std r10,SOFTE(r1)
CURRENT_THREAD_INFO(r11, r1)
@@ -725,7 +725,7 @@ resume_kernel:
lwz r8,TI_PREEMPT(r9)
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
- cmpdi r0,0
+ cmpdi r0,IRQ_DISABLE_MASK_LINUX
crandc eq,cr1*4+eq,eq
bne restore
@@ -765,11 +765,11 @@ restore:
*/
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
- cmpwi cr0,r5,0
+ cmpwi cr0,r5,IRQ_DISABLE_MASK_LINUX
beq restore_irq_off
/* We are enabling, were we already enabled ? Yes, just return */
- cmpwi cr0,r6,1
+ cmpwi cr0,r6,IRQ_DISABLE_MASK_NONE
beq cr0,do_restore
/*
@@ -788,7 +788,7 @@ restore:
*/
restore_no_replay:
TRACE_ENABLE_INTS
- li r0,1
+ li r0,IRQ_DISABLE_MASK_NONE
stb r0,PACASOFTIRQEN(r13);
/*
@@ -894,7 +894,7 @@ restore_irq_off:
beq 1f
rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
stb r7,PACAIRQHAPPENED(r13)
-1: li r0,0
+1: li r0,IRQ_DISABLE_MASK_LINUX
stb r0,PACASOFTIRQEN(r13);
TRACE_DISABLE_INTS
b do_restore
@@ -1012,7 +1012,7 @@ _GLOBAL(enter_rtas)
* check it with the asm equivalent of WARN_ON
*/
lbz r0,PACASOFTIRQEN(r13)
-1: tdnei r0,0
+1: tdnei r0,IRQ_DISABLE_MASK_LINUX
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 38a1f96430e1..5c628b5696f6 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -210,9 +210,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
ld r5,SOFTE(r1)
/* Interrupts had better not already be enabled... */
- twnei r6,0
+ twnei r6,IRQ_DISABLE_MASK_LINUX
- cmpwi cr0,r5,0
+ cmpwi cr0,r5,IRQ_DISABLE_MASK_LINUX
beq 1f
TRACE_ENABLE_INTS
@@ -352,7 +352,7 @@ ret_from_mc_except:
#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
- cmpwi cr0,r10,0; /* yes -> go out of line */ \
+ cmpwi cr0,r10,IRQ_DISABLE_MASK_LINUX;/* yes -> go out of line */ \
beq masked_interrupt_book3e_##n
#define PROLOG_ADDITION_2REGS_GEN(n) \
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f765b0434731..4bd58b6ea380 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -732,7 +732,7 @@ _GLOBAL(pmac_secondary_start)
/* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug)
*/
- li r0,0
+ li r0,IRQ_DISABLE_MASK_LINUX
stb r0,PACASOFTIRQEN(r13)
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)
@@ -789,6 +789,7 @@ __secondary_start:
/* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug)
*/
+ li r7,IRQ_DISABLE_MASK_LINUX
stb r7,PACASOFTIRQEN(r13)
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)
@@ -954,7 +955,7 @@ start_here_common:
/* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug)
*/
- li r0,0
+ li r0,IRQ_DISABLE_MASK_LINUX
stb r0,PACASOFTIRQEN(r13)
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index 48c21acef915..a459c306b04e 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -17,6 +17,7 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <asm/epapr_hcalls.h>
+#include <asm/hw_irq.h>
/* 64-bit version only for now */
#ifdef CONFIG_PPC64
@@ -46,7 +47,7 @@ _GLOBAL(\name)
bl trace_hardirqs_on
addi r1,r1,128
#endif
- li r0,1
+ li r0,IRQ_DISABLE_MASK_NONE
stb r0,PACASOFTIRQEN(r13)
/* Interrupts will make use return to LR, so get something we want
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index f57a19348bdd..785e10619d8d 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -15,6 +15,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/irqflags.h>
+#include <asm/hw_irq.h>
#undef DEBUG
@@ -53,7 +54,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
mfmsr r7
#endif /* CONFIG_TRACE_IRQFLAGS */
- li r0,1
+ li r0,IRQ_DISABLE_MASK_NONE
stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
BEGIN_FTR_SECTION
DSSALL
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 08887cf2b20e..ed1123125063 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -67,6 +67,7 @@
#include <asm/smp.h>
#include <asm/debug.h>
#include <asm/livepatch.h>
+#include <asm/hw_irq.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -208,7 +209,7 @@ notrace void arch_local_irq_restore(unsigned long en)
/* Write the new soft-enabled value */
set_soft_enabled(en);
- if (!en)
+ if (en == IRQ_DISABLE_MASK_LINUX)
return;
/*
* From this point onward, we can take interrupts, preempt,
@@ -253,7 +254,7 @@ notrace void arch_local_irq_restore(unsigned long en)
}
#endif /* CONFIG_TRACE_IRQFLAGS */
- set_soft_enabled(0);
+ set_soft_enabled(IRQ_DISABLE_MASK_LINUX);
/*
* Check if anything needs to be re-emitted. We haven't
@@ -263,7 +264,7 @@ notrace void arch_local_irq_restore(unsigned long en)
replay = __check_irq_replay();
/* We can soft-enable now */
- set_soft_enabled(1);
+ set_soft_enabled(IRQ_DISABLE_MASK_NONE);
/*
* And replay if we have to. This will return with interrupts
@@ -337,7 +338,7 @@ bool prep_irq_for_idle(void)
* of entering the low power state.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
- local_paca->soft_enabled = 1;
+ local_paca->soft_enabled = IRQ_DISABLE_MASK_NONE;
/* Tell the caller to enter the low power state */
return true;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9ee2623e0f67..6efaea2dc805 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -54,6 +54,7 @@
#include <asm/debug.h>
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
+#include <asm/hw_irq.h>
#endif
#include <asm/code-patching.h>
#include <asm/exec.h>
@@ -1441,7 +1442,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->gpr[14] = ppc_function_entry((void *)usp);
#ifdef CONFIG_PPC64
clear_tsk_thread_flag(p, TIF_32BIT);
- childregs->softe = 1;
+ childregs->softe = IRQ_DISABLE_MASK_NONE;
#endif
childregs->gpr[15] = kthread_arg;
p->thread.regs = NULL; /* no user register state */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7ac8e6eaab5b..f31930b9bfc1 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -67,6 +67,7 @@
#include <asm/livepatch.h>
#include <asm/opal.h>
#include <asm/cputhreads.h>
+#include <asm/hw_irq.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -195,6 +196,8 @@ static void __init fixup_boot_paca(void)
get_paca()->cpu_start = 1;
/* Allow percpu accesses to work until we setup percpu data */
get_paca()->data_offset = 0;
+ /* Mark interrupts disabled in PACA */
+ get_paca()->soft_enabled = IRQ_DISABLE_MASK_LINUX;
}
static void __init configure_exceptions(void)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 3efbedefba6a..7105757cdb90 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -268,7 +268,7 @@ void accumulate_stolen_time(void)
* needs to reflect that so various debug stuff doesn't
* complain
*/
- local_paca->soft_enabled = 0;
+ local_paca->soft_enabled = IRQ_DISABLE_MASK_LINUX;
sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7372ee13eb1e..3270fa7880cd 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -914,7 +914,7 @@ void flush_dcache_icache_hugepage(struct page *page)
* So long as we atomically load page table pointers we are safe against teardown,
* we can follow the address down to the the page and take a ref on it.
* This function need to be called with interrupts disabled. We use this variant
- * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
+ * when we have MSR[EE] = 0 but the paca->soft_enabled = IRQ_DISABLE_MASK_NONE
*/
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4ed377f0f7b2..5e8302f85e3d 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -313,7 +313,7 @@ static inline void perf_read_regs(struct pt_regs *regs)
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
- return !regs->softe;
+ return (regs->softe == IRQ_DISABLE_MASK_LINUX);
}
/*
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 02/11] powerpc: move set_soft_enabled() and rename
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 01/11] powerpc: Add #defs for paca->soft_enabled flags Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 03/11] powerpc: Use soft_enabled_set api to update paca->soft_enabled Madhavan Srinivasan
` (8 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
Move set_soft_enabled() from powerpc/kernel/irq.c to
asm/hw_irq.c, to force updates to paca-soft_enabled
done via these access function. Add "memory" clobber
to hint compiler since paca->soft_enabled memory is the target
here
Renaming it as soft_enabled_set() will make
namespaces works better as prefix than a postfix
when new soft_enabled manipulation functions introduced.
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_irq.h | 15 +++++++++++++++
arch/powerpc/kernel/irq.c | 12 +++---------
2 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 1fcc2fd7275a..45243c8579db 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -47,6 +47,21 @@ extern void unknown_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64
#include <asm/paca.h>
+/*
+ *TODO:
+ * Currently none of the soft_eanbled modification helpers have clobbers
+ * for modifying the r13->soft_enabled memory itself. Secondly they only
+ * include "memory" clobber as a hint. Ideally, if all the accesses to
+ * soft_enabled go via these helpers, we could avoid the "memory" clobber.
+ * Former could be taken care by having location in the constraints.
+ */
+static inline notrace void soft_enabled_set(unsigned long enable)
+{
+ __asm__ __volatile__("stb %0,%1(13)"
+ : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))
+ : "memory");
+}
+
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ed1123125063..5a926ea5bd0b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -107,12 +107,6 @@ static inline notrace unsigned long get_irq_happened(void)
return happened;
}
-static inline notrace void set_soft_enabled(unsigned long enable)
-{
- __asm__ __volatile__("stb %0,%1(13)"
- : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
-}
-
static inline notrace int decrementer_check_overflow(void)
{
u64 now = get_tb_or_rtc();
@@ -208,7 +202,7 @@ notrace void arch_local_irq_restore(unsigned long en)
unsigned int replay;
/* Write the new soft-enabled value */
- set_soft_enabled(en);
+ soft_enabled_set(en);
if (en == IRQ_DISABLE_MASK_LINUX)
return;
/*
@@ -254,7 +248,7 @@ notrace void arch_local_irq_restore(unsigned long en)
}
#endif /* CONFIG_TRACE_IRQFLAGS */
- set_soft_enabled(IRQ_DISABLE_MASK_LINUX);
+ soft_enabled_set(IRQ_DISABLE_MASK_LINUX);
/*
* Check if anything needs to be re-emitted. We haven't
@@ -264,7 +258,7 @@ notrace void arch_local_irq_restore(unsigned long en)
replay = __check_irq_replay();
/* We can soft-enable now */
- set_soft_enabled(IRQ_DISABLE_MASK_NONE);
+ soft_enabled_set(IRQ_DISABLE_MASK_NONE);
/*
* And replay if we have to. This will return with interrupts
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 03/11] powerpc: Use soft_enabled_set api to update paca->soft_enabled
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 01/11] powerpc: Add #defs for paca->soft_enabled flags Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 02/11] powerpc: move set_soft_enabled() and rename Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 04/11] powerpc: Add soft_enabled manipulation functions Madhavan Srinivasan
` (7 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
Force use of soft_enabled_set() wrapper to update paca-soft_enabled
wherever possisble. Also add a new wrapper function, soft_enabled_set_return(),
added to force the paca->soft_enabled updates.
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_irq.h | 14 ++++++++++++++
arch/powerpc/include/asm/kvm_ppc.h | 2 +-
arch/powerpc/kernel/irq.c | 2 +-
arch/powerpc/kernel/setup_64.c | 4 ++--
arch/powerpc/kernel/time.c | 6 +++---
5 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 45243c8579db..6d263168fc0a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -62,6 +62,20 @@ static inline notrace void soft_enabled_set(unsigned long enable)
: "memory");
}
+static inline notrace unsigned long soft_enabled_set_return(unsigned long enable)
+{
+ unsigned long flags;
+
+ asm volatile(
+ "lbz %0,%1(13); stb %2,%1(13)"
+ : "=r" (flags)
+ : "i" (offsetof(struct paca_struct, soft_enabled)),\
+ "r" (enable)
+ : "memory");
+
+ return flags;
+}
+
static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 740ee309cea8..07f6a51ae99f 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -707,7 +707,7 @@ static inline void kvmppc_fix_ee_before_entry(void)
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
- local_paca->soft_enabled = IRQ_DISABLE_MASK_NONE;
+ soft_enabled_set(IRQ_DISABLE_MASK_NONE);
#endif
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5a926ea5bd0b..58462ce186fa 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -332,7 +332,7 @@ bool prep_irq_for_idle(void)
* of entering the low power state.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
- local_paca->soft_enabled = IRQ_DISABLE_MASK_NONE;
+ soft_enabled_set(IRQ_DISABLE_MASK_NONE);
/* Tell the caller to enter the low power state */
return true;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f31930b9bfc1..f0f882166dcc 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -197,7 +197,7 @@ static void __init fixup_boot_paca(void)
/* Allow percpu accesses to work until we setup percpu data */
get_paca()->data_offset = 0;
/* Mark interrupts disabled in PACA */
- get_paca()->soft_enabled = IRQ_DISABLE_MASK_LINUX;
+ soft_enabled_set(IRQ_DISABLE_MASK_LINUX);
}
static void __init configure_exceptions(void)
@@ -334,7 +334,7 @@ void __init early_setup(unsigned long dt_ptr)
void early_setup_secondary(void)
{
/* Mark interrupts disabled in PACA */
- get_paca()->soft_enabled = 0;
+ soft_enabled_set(IRQ_DISABLE_MASK_LINUX);
/* Initialize the hash table or TLB handling */
early_init_mmu_secondary();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7105757cdb90..483313aa311f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -259,7 +259,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
void accumulate_stolen_time(void)
{
u64 sst, ust;
- u8 save_soft_enabled = local_paca->soft_enabled;
+ unsigned long save_soft_enabled;
struct cpu_accounting_data *acct = &local_paca->accounting;
/* We are called early in the exception entry, before
@@ -268,7 +268,7 @@ void accumulate_stolen_time(void)
* needs to reflect that so various debug stuff doesn't
* complain
*/
- local_paca->soft_enabled = IRQ_DISABLE_MASK_LINUX;
+ save_soft_enabled = soft_enabled_set_return(IRQ_DISABLE_MASK_LINUX);
sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime);
@@ -276,7 +276,7 @@ void accumulate_stolen_time(void)
acct->user_time -= ust;
local_paca->stolen_time += ust + sst;
- local_paca->soft_enabled = save_soft_enabled;
+ soft_enabled_set(save_soft_enabled);
}
static inline u64 calculate_stolen_time(u64 stop_tb)
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 04/11] powerpc: Add soft_enabled manipulation functions
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (2 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 03/11] powerpc: Use soft_enabled_set api to update paca->soft_enabled Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 05/11] powerpc: reverse the soft_enable logic Madhavan Srinivasan
` (6 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
Add new soft_enabled_* manipulation function and implement
arch_local_* using the soft_enabled_* wrappers.
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_irq.h | 32 ++++++++++++++------------------
1 file changed, 14 insertions(+), 18 deletions(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 6d263168fc0a..2083b6628aaf 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -62,21 +62,7 @@ static inline notrace void soft_enabled_set(unsigned long enable)
: "memory");
}
-static inline notrace unsigned long soft_enabled_set_return(unsigned long enable)
-{
- unsigned long flags;
-
- asm volatile(
- "lbz %0,%1(13); stb %2,%1(13)"
- : "=r" (flags)
- : "i" (offsetof(struct paca_struct, soft_enabled)),\
- "r" (enable)
- : "memory");
-
- return flags;
-}
-
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long soft_enabled_return(void)
{
unsigned long flags;
@@ -88,20 +74,30 @@ static inline unsigned long arch_local_save_flags(void)
return flags;
}
-static inline unsigned long arch_local_irq_disable(void)
+static inline notrace unsigned long soft_enabled_set_return(unsigned long enable)
{
unsigned long flags, zero;
asm volatile(
- "li %1,%3; lbz %0,%2(13); stb %1,%2(13)"
+ "mr %1,%3; lbz %0,%2(13); stb %1,%2(13)"
: "=r" (flags), "=&r" (zero)
: "i" (offsetof(struct paca_struct, soft_enabled)),\
- "i" (IRQ_DISABLE_MASK_LINUX)
+ "r" (enable)
: "memory");
return flags;
}
+static inline unsigned long arch_local_save_flags(void)
+{
+ return soft_enabled_return();
+}
+
+static inline unsigned long arch_local_irq_disable(void)
+{
+ return soft_enabled_set_return(IRQ_DISABLE_MASK_LINUX);
+}
+
extern void arch_local_irq_restore(unsigned long);
static inline void arch_local_irq_enable(void)
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 05/11] powerpc: reverse the soft_enable logic
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (3 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 04/11] powerpc: Add soft_enabled manipulation functions Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 06/11] powerpc: Avoid using EXCEPTION_PROLOG_1 macro in MASKABLE_* Madhavan Srinivasan
` (5 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
"paca->soft_enabled" is used as a flag to mask some of interrupts.
Currently supported flags values and their details:
soft_enabled MSR[EE]
0 0 Disabled (PMI and HMI not masked)
1 1 Enabled
"paca->soft_enabled" is initialized to 1 to make the interripts as
enabled. arch_local_irq_disable() will toggle the value when interrupts
needs to disbled. At this point, the interrupts are not actually disabled,
instead, interrupt vector has code to check for the flag and mask it when it occurs.
By "mask it", it update interrupt paca->irq_happened and return.
arch_local_irq_restore() is called to re-enable interrupts, which checks and
replays interrupts if any occured.
Now, as mentioned, current logic doesnot mask "performance monitoring interrupts"
and PMIs are implemented as NMI. But this patchset depends on local_irq_*
for a successful local_* update. Meaning, mask all possible interrupts during
local_* update and replay them after the update.
So the idea here is to reserve the "paca->soft_enabled" logic. New values and
details:
soft_enabled MSR[EE]
1 0 Disabled (PMI and HMI not masked)
0 1 Enabled
Reason for the this change is to create foundation for a third mask value "0x2"
for "soft_enabled" to add support to mask PMIs. When ->soft_enabled is
set to a value "3", PMI interrupts are mask and when set to a value
of "1", PMI are not mask.
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_irq.h | 4 ++--
arch/powerpc/kernel/entry_64.S | 5 ++---
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 2083b6628aaf..889cc7bec0f8 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -30,8 +30,8 @@
/*
* flags for paca->soft_enabled
*/
-#define IRQ_DISABLE_MASK_NONE 1
-#define IRQ_DISABLE_MASK_LINUX 0
+#define IRQ_DISABLE_MASK_NONE 0
+#define IRQ_DISABLE_MASK_LINUX 1
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index aef7b64cbbeb..879aeb11ad29 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -131,8 +131,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
*/
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
lbz r10,PACASOFTIRQEN(r13)
- xori r10,r10,IRQ_DISABLE_MASK_NONE
-1: tdnei r10,0
+1: tdnei r10,IRQ_DISABLE_MASK_NONE
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
@@ -1012,7 +1011,7 @@ _GLOBAL(enter_rtas)
* check it with the asm equivalent of WARN_ON
*/
lbz r0,PACASOFTIRQEN(r13)
-1: tdnei r0,IRQ_DISABLE_MASK_LINUX
+1: tdeqi r0,IRQ_DISABLE_MASK_NONE
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 06/11] powerpc: Avoid using EXCEPTION_PROLOG_1 macro in MASKABLE_*
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (4 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 05/11] powerpc: reverse the soft_enable logic Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 07/11] powerpc: Add support to take additional parameter in MASKABLE_* macro Madhavan Srinivasan
` (4 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
Currently we use both EXCEPTION_PROLOG_1 and __EXCEPTION_PROLOG_1
in the MASKABLE_* macros. As a cleanup, this patch makes MASKABLE_*
to use only __EXCEPTION_PROLOG_1. There is not logic change.
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/exception-64s.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 38272fe8a757..75e262466b85 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -450,7 +450,7 @@ label##_hv: \
#define MASKABLE_EXCEPTION_HV_OOL(vec, label) \
.globl label##_hv; \
label##_hv: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
@@ -478,7 +478,7 @@ label##_relon_hv: \
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \
.globl label##_relon_hv; \
label##_relon_hv: \
- EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
/*
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 07/11] powerpc: Add support to take additional parameter in MASKABLE_* macro
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (5 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 06/11] powerpc: Avoid using EXCEPTION_PROLOG_1 macro in MASKABLE_* Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 08/11] powerpc: Add support to mask perf interrupts and replay them Madhavan Srinivasan
` (3 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
To support addition of "bitmask" to MASKABLE_* macros,
factor out the EXCPETION_PROLOG_1 macro.
Currently soft_enabled is used as the flag to determine
the interrupt state. Patch extends the soft_enabled
to be used as a mask instead of a flag.
Make it explicit the interrupt masking supported
by a gievn interrupt handler. Patch correspondingly
extends the MASKABLE_* macros with an addition's parameter.
"bitmask" parameter is passed to SOFTEN_TEST macro to decide
on masking the interrupt.
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/exception-64s.h | 90 ++++++++++++++++++++------------
arch/powerpc/include/asm/irqflags.h | 4 +-
arch/powerpc/kernel/entry_64.S | 4 +-
arch/powerpc/kernel/exceptions-64e.S | 6 +--
arch/powerpc/kernel/exceptions-64s.S | 36 ++++++++-----
5 files changed, 86 insertions(+), 54 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 75e262466b85..c8ce70bea184 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -161,18 +161,40 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
std r10,area+EX_R10(r13); /* save r10 - r12 */ \
OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
-#define __EXCEPTION_PROLOG_1(area, extra, vec) \
+#define __EXCEPTION_PROLOG_1_PRE(area) \
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
SAVE_CTR(r10, area); \
- mfcr r9; \
- extra(vec); \
+ mfcr r9;
+
+#define __EXCEPTION_PROLOG_1_POST(area) \
std r11,area+EX_R11(r13); \
std r12,area+EX_R12(r13); \
GET_SCRATCH0(r10); \
std r10,area+EX_R13(r13)
+
+/*
+ * This version of the EXCEPTION_PROLOG_1 will carry
+ * addition parameter called "bitmask" to support
+ * checking of the interrupt maskable level in the SOFTEN_TEST.
+ * Intended to be used in MASKABLE_EXCPETION_* macros.
+ */
+#define MASKABLE_EXCEPTION_PROLOG_1(area, extra, vec, bitmask) \
+ __EXCEPTION_PROLOG_1_PRE(area); \
+ extra(vec, bitmask); \
+ __EXCEPTION_PROLOG_1_POST(area);
+
+/*
+ * This version of the EXCEPTION_PROLOG_1 is intended
+ * to be used in STD_EXCEPTION* macros
+ */
+#define _EXCEPTION_PROLOG_1(area, extra, vec) \
+ __EXCEPTION_PROLOG_1_PRE(area); \
+ extra(vec); \
+ __EXCEPTION_PROLOG_1_POST(area);
+
#define EXCEPTION_PROLOG_1(area, extra, vec) \
- __EXCEPTION_PROLOG_1(area, extra, vec)
+ _EXCEPTION_PROLOG_1(area, extra, vec)
#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
@@ -406,79 +428,79 @@ label##_relon_hv: \
#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
#define SOFTEN_VALUE_0xea2 PACA_IRQ_EE
-#define __SOFTEN_TEST(h, vec) \
+#define __SOFTEN_TEST(h, vec, bitmask) \
lbz r10,PACASOFTIRQEN(r13); \
- cmpwi r10,IRQ_DISABLE_MASK_LINUX; \
+ andi. r10,r10,bitmask; \
li r10,SOFTEN_VALUE_##vec; \
- beq masked_##h##interrupt
-#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
+ bne masked_##h##interrupt
+#define _SOFTEN_TEST(h, vec, bitmask) __SOFTEN_TEST(h, vec, bitmask)
-#define SOFTEN_TEST_PR(vec) \
+#define SOFTEN_TEST_PR(vec, bitmask) \
KVMTEST(vec); \
- _SOFTEN_TEST(EXC_STD, vec)
+ _SOFTEN_TEST(EXC_STD, vec, bitmask)
-#define SOFTEN_TEST_HV(vec) \
+#define SOFTEN_TEST_HV(vec, bitmask) \
KVMTEST(vec); \
- _SOFTEN_TEST(EXC_HV, vec)
+ _SOFTEN_TEST(EXC_HV, vec, bitmask)
-#define SOFTEN_NOTEST_PR(vec) _SOFTEN_TEST(EXC_STD, vec)
-#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
+#define SOFTEN_NOTEST_PR(vec, bitmask) _SOFTEN_TEST(EXC_STD, vec, bitmask)
+#define SOFTEN_NOTEST_HV(vec, bitmask) _SOFTEN_TEST(EXC_HV, vec, bitmask)
-#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
- __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
-#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
- __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
+#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask) \
+ __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)
-#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
+#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label, bitmask) \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
- EXC_STD, SOFTEN_TEST_PR)
+ EXC_STD, SOFTEN_TEST_PR, bitmask)
-#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
+#define MASKABLE_EXCEPTION_HV(loc, vec, label, bitmask) \
. = loc; \
.globl label##_hv; \
label##_hv: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
- EXC_HV, SOFTEN_TEST_HV)
+ EXC_HV, SOFTEN_TEST_HV, bitmask)
-#define MASKABLE_EXCEPTION_HV_OOL(vec, label) \
+#define MASKABLE_EXCEPTION_HV_OOL(vec, label, bitmask) \
.globl label##_hv; \
label##_hv: \
- __EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask); \
EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
-#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
+#define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)\
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
- __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec, bitmask); \
EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, h);
-#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra) \
- __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra)
+#define _MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)\
+ __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra, bitmask)
-#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label) \
+#define MASKABLE_RELON_EXCEPTION_PSERIES(loc, vec, label, bitmask) \
. = loc; \
.globl label##_relon_pSeries; \
label##_relon_pSeries: \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
- EXC_STD, SOFTEN_NOTEST_PR)
+ EXC_STD, SOFTEN_NOTEST_PR, bitmask)
-#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label) \
+#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label, bitmask) \
. = loc; \
.globl label##_relon_hv; \
label##_relon_hv: \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
- EXC_HV, SOFTEN_NOTEST_HV)
+ EXC_HV, SOFTEN_NOTEST_HV, bitmask)
-#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \
+#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
.globl label##_relon_hv; \
label##_relon_hv: \
- __EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec, bitmask);\
EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
/*
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index d0ed2a7d7d10..9ff09747a226 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -48,11 +48,11 @@
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
- cmpwi cr0,__rA,IRQ_DISABLE_MASK_LINUX;\
+ andi. __rA,__rA,IRQ_DISABLE_MASK_LINUX;\
li __rA,IRQ_DISABLE_MASK_LINUX; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
- beq 44f; \
+ bne 44f; \
stb __rA,PACASOFTIRQEN(r13); \
TRACE_DISABLE_INTS; \
44:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 879aeb11ad29..533e363914a9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -764,8 +764,8 @@ restore:
*/
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
- cmpwi cr0,r5,IRQ_DISABLE_MASK_LINUX
- beq restore_irq_off
+ andi. r5,r5,IRQ_DISABLE_MASK_LINUX
+ bne restore_irq_off
/* We are enabling, were we already enabled ? Yes, just return */
cmpwi cr0,r6,IRQ_DISABLE_MASK_NONE
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 5c628b5696f6..8e40df2c2f30 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -212,8 +212,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
/* Interrupts had better not already be enabled... */
twnei r6,IRQ_DISABLE_MASK_LINUX
- cmpwi cr0,r5,IRQ_DISABLE_MASK_LINUX
- beq 1f
+ andi. r5,r5,IRQ_DISABLE_MASK_LINUX
+ bne 1f
TRACE_ENABLE_INTS
stb r5,PACASOFTIRQEN(r13)
@@ -352,7 +352,7 @@ ret_from_mc_except:
#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
- cmpwi cr0,r10,IRQ_DISABLE_MASK_LINUX;/* yes -> go out of line */ \
+ andi. r10,r10,IRQ_DISABLE_MASK_LINUX;/* yes -> go out of line */ \
beq masked_interrupt_book3e_##n
#define PROLOG_ADDITION_2REGS_GEN(n) \
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index bffec73dbffc..581a10bdb34a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -221,11 +221,13 @@ hardware_interrupt_pSeries:
hardware_interrupt_hv:
BEGIN_FTR_SECTION
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
- EXC_HV, SOFTEN_TEST_HV)
+ EXC_HV, SOFTEN_TEST_HV,
+ IRQ_DISABLE_MASK_LINUX)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
FTR_SECTION_ELSE
_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
- EXC_STD, SOFTEN_TEST_PR)
+ EXC_STD, SOFTEN_TEST_PR,
+ IRQ_DISABLE_MASK_LINUX)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
@@ -241,11 +243,13 @@ hardware_interrupt_hv:
. = 0x900
.globl decrementer_pSeries
decrementer_pSeries:
- _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
+ _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR,
+ IRQ_DISABLE_MASK_LINUX)
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
- MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
+ MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super,
+ IRQ_DISABLE_MASK_LINUX)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
STD_EXCEPTION_PSERIES(0xb00, trap_0b)
@@ -582,13 +586,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
- MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
+ MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception,IRQ_DISABLE_MASK_LINUX)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
- MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
+ MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell,IRQ_DISABLE_MASK_LINUX)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
- MASKABLE_EXCEPTION_HV_OOL(0xea2, h_virt_irq)
+ MASKABLE_EXCEPTION_HV_OOL(0xea2, h_virt_irq,IRQ_DISABLE_MASK_LINUX)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xea2)
/* moved from 0xf00 */
@@ -824,16 +828,20 @@ instruction_access_slb_relon_pSeries:
hardware_interrupt_relon_pSeries:
hardware_interrupt_relon_hv:
BEGIN_FTR_SECTION
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
+ _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt,
+ EXC_HV, SOFTEN_TEST_HV, IRQ_DISABLE_MASK_LINUX)
FTR_SECTION_ELSE
- _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
+ _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt,
+ EXC_STD, SOFTEN_TEST_PR, IRQ_DISABLE_MASK_LINUX)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
- MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
+ MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer,
+ IRQ_DISABLE_MASK_LINUX)
STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
- MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
+ MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super,
+ IRQ_DISABLE_MASK_LINUX)
STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
. = 0x4c00
@@ -1132,8 +1140,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
/* Equivalents to the above handlers for relocation-on interrupt vectors */
STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
- MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
- MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq)
+ MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell,
+ IRQ_DISABLE_MASK_LINUX)
+ MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq,
+ IRQ_DISABLE_MASK_LINUX)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 08/11] powerpc: Add support to mask perf interrupts and replay them
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (6 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 07/11] powerpc: Add support to take additional parameter in MASKABLE_* macro Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 09/11] powerpc:Add new kconfig IRQ_DEBUG_SUPPORT Madhavan Srinivasan
` (2 subsequent siblings)
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
To support masking of the PMI interrupts, couple of new interrupt handler
macros are added MASKABLE_EXCEPTION_PSERIES_OOL and
MASKABLE_RELON_EXCEPTION_PSERIES_OOL.
New bit mask field "IRQ_DISABLE_MASK_PMU" is introduced to support
the masking of PMI.
Couple of new irq #defs "PACA_IRQ_PMI" and "SOFTEN_VALUE_0xf0*" added to
use in the exception code to check for PMI interrupts.
In the masked_interrupt handler, for PMIs we reset the MSR[EE]
and return. In the __check_irq_replay(), replay the PMI interrupt
by calling performance_monitor_common handler.
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/exception-64s.h | 13 +++++++++++++
arch/powerpc/include/asm/hw_irq.h | 2 ++
arch/powerpc/kernel/entry_64.S | 5 +++++
arch/powerpc/kernel/exceptions-64s.S | 6 ++++--
arch/powerpc/kernel/irq.c | 25 ++++++++++++++++++++++++-
5 files changed, 48 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index c8ce70bea184..7bea90015b5d 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -427,6 +427,7 @@ label##_relon_hv: \
#define SOFTEN_VALUE_0xe62 PACA_IRQ_HMI
#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
#define SOFTEN_VALUE_0xea2 PACA_IRQ_EE
+#define SOFTEN_VALUE_0xf00 PACA_IRQ_PMI
#define __SOFTEN_TEST(h, vec, bitmask) \
lbz r10,PACASOFTIRQEN(r13); \
@@ -462,6 +463,12 @@ label##_pSeries: \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_TEST_PR, bitmask)
+#define MASKABLE_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \
+ .globl label##_pSeries; \
+label##_pSeries: \
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_PR, vec, bitmask);\
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD);
+
#define MASKABLE_EXCEPTION_HV(loc, vec, label, bitmask) \
. = loc; \
.globl label##_hv; \
@@ -490,6 +497,12 @@ label##_relon_pSeries: \
_MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_NOTEST_PR, bitmask)
+#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label, bitmask) \
+ .globl label##_relon_pSeries; \
+label##_relon_pSeries: \
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec, bitmask);\
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD);
+
#define MASKABLE_RELON_EXCEPTION_HV(loc, vec, label, bitmask) \
. = loc; \
.globl label##_relon_hv; \
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 889cc7bec0f8..6173d57f624a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -26,12 +26,14 @@
#define PACA_IRQ_DEC 0x08 /* Or FIT */
#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
#define PACA_IRQ_HMI 0x20
+#define PACA_IRQ_PMI 0x40
/*
* flags for paca->soft_enabled
*/
#define IRQ_DISABLE_MASK_NONE 0
#define IRQ_DISABLE_MASK_LINUX 1
+#define IRQ_DISABLE_MASK_PMU 2
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 533e363914a9..e3baf9c24d0e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -933,6 +933,11 @@ restore_check_irq_replay:
addi r3,r1,STACK_FRAME_OVERHEAD;
bl do_IRQ
b ret_from_except
+1: cmpwi cr0,r3,0xf00
+ bne 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD;
+ bl performance_monitor_exception
+ b ret_from_except
1: cmpwi cr0,r3,0xe60
bne 1f
addi r3,r1,STACK_FRAME_OVERHEAD;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 581a10bdb34a..19138a411700 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -596,7 +596,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xea2)
/* moved from 0xf00 */
- STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
+ MASKABLE_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor, IRQ_DISABLE_MASK_PMU)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
@@ -671,6 +671,8 @@ _GLOBAL(__replay_interrupt)
beq decrementer_common
cmpwi r3,0x500
beq hardware_interrupt_common
+ cmpwi r3,0xf00
+ beq performance_monitor_common
BEGIN_FTR_SECTION
cmpwi r3,0xe80
beq h_doorbell_common
@@ -1145,7 +1147,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq,
IRQ_DISABLE_MASK_LINUX)
- STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
+ MASKABLE_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor, IRQ_DISABLE_MASK_PMU)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 58462ce186fa..5ba60f6e7eb8 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -159,6 +159,27 @@ notrace unsigned int __check_irq_replay(void)
if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
return 0x900;
+ /*
+ * In masked_handler() for PMI, we disable MSR[EE] and return.
+ * Replay it here.
+ *
+ * After this point, PMIs could still be disabled in certain
+ * scenarios like this one.
+ *
+ * local_irq_disable();
+ * powerpc_irq_pmu_save();
+ * powerpc_irq_pmu_restore();
+ * local_irq_restore();
+ *
+ * Even though powerpc_irq_pmu_restore() would have replayed the PMIs
+ * if any, we have still not enabled EE and this will happen only at
+ * complition of last *_restore in this nested cases. And PMIs will
+ * once again start firing only when we have MSR[EE] enabled.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_PMI;
+ if (happened & PACA_IRQ_PMI)
+ return 0xf00;
+
/* Finally check if an external interrupt happened */
local_paca->irq_happened &= ~PACA_IRQ_EE;
if (happened & PACA_IRQ_EE)
@@ -203,7 +224,9 @@ notrace void arch_local_irq_restore(unsigned long en)
/* Write the new soft-enabled value */
soft_enabled_set(en);
- if (en == IRQ_DISABLE_MASK_LINUX)
+
+ /* any bits still disabled */
+ if (en)
return;
/*
* From this point onward, we can take interrupts, preempt,
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 09/11] powerpc:Add new kconfig IRQ_DEBUG_SUPPORT
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (7 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 08/11] powerpc: Add support to mask perf interrupts and replay them Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 10/11] powerpc: Add new set of soft_enabled_ functions Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 11/11] powerpc: rewrite local_t using soft_irq Madhavan Srinivasan
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
New Kconfig is added "CONFIG_IRQ_DEBUG_SUPPORT" to add warn_on
to alert the invalid transitions. Also moved the code under
the CONFIG_TRACE_IRQFLAGS in arch_local_irq_restore() to new Kconfig.
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/Kconfig | 4 ++++
arch/powerpc/kernel/irq.c | 4 ++--
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 927d2ab2ce08..878f05925340 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -51,6 +51,10 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default y
+config IRQ_DEBUG_SUPPORT
+ bool
+ default n
+
config LOCKDEP_SUPPORT
bool
default y
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5ba60f6e7eb8..d1ee6c2a6b09 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -258,7 +258,7 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQ_DEBUG_SUPPORT
else {
/*
* We should already be hard disabled here. We had bugs
@@ -269,7 +269,7 @@ notrace void arch_local_irq_restore(unsigned long en)
if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable();
}
-#endif /* CONFIG_TRACE_IRQFLAGS */
+#endif /* CONFIG_IRQ_DEBUG_SUPPORT */
soft_enabled_set(IRQ_DISABLE_MASK_LINUX);
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 10/11] powerpc: Add new set of soft_enabled_ functions
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (8 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 09/11] powerpc:Add new kconfig IRQ_DEBUG_SUPPORT Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
2016-09-25 14:23 ` [PATCH v2 11/11] powerpc: rewrite local_t using soft_irq Madhavan Srinivasan
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
To support disabling and enabling of irq with PMI, set of
new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
functions are added. And powerpc_local_irq_save() implemented,
by adding a new soft_enabled manipulation function soft_enabled_or_return().
Local_irq_pmu_* macros are provided to access these powerpc_local_irq_pmu*
functions which includes trace_hardirqs_on|off() to match what we
have in include/linux/irqflags.h.
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_irq.h | 62 ++++++++++++++++++++++++++++++++++++++-
arch/powerpc/kernel/irq.c | 4 +++
2 files changed, 65 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 6173d57f624a..dedd443e8013 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -90,6 +90,20 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long enable
return flags;
}
+static inline notrace unsigned long soft_enabled_or_return(unsigned long enable)
+{
+ unsigned long flags, zero;
+
+ asm volatile(
+ "mr %1,%3; lbz %0,%2(13); or %1,%0,%1; stb %1,%2(13)"
+ : "=r" (flags), "=&r"(zero)
+ : "i" (offsetof(struct paca_struct, soft_enabled)),\
+ "r" (enable)
+ : "memory");
+
+ return flags;
+}
+
static inline unsigned long arch_local_save_flags(void)
{
return soft_enabled_return();
@@ -114,7 +128,7 @@ static inline unsigned long arch_local_irq_save(void)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
- return flags == IRQ_DISABLE_MASK_LINUX;
+ return flags & IRQ_DISABLE_MASK_LINUX;
}
static inline bool arch_irqs_disabled(void)
@@ -122,6 +136,52 @@ static inline bool arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+/*
+ * To support disabling and enabling of irq with PMI, set of
+ * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
+ * functions are added. These macros are implemented using generic
+ * linux local_irq_* code from include/linux/irqflags.h.
+ */
+#define raw_local_irq_pmu_save(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = soft_enabled_or_return(IRQ_DISABLE_MASK_LINUX | \
+ IRQ_DISABLE_MASK_PMU); \
+ } while(0)
+
+#define raw_local_irq_pmu_restore(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ arch_local_irq_restore(flags); \
+ } while(0)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#define powerpc_local_irq_pmu_save(flags) \
+ do { \
+ raw_local_irq_pmu_save(flags); \
+ trace_hardirqs_off(); \
+ } while(0)
+#define powerpc_local_irq_pmu_restore(flags) \
+ do { \
+ if (raw_irqs_disabled_flags(flags)) { \
+ raw_local_irq_pmu_restore(flags); \
+ trace_hardirqs_off(); \
+ } else { \
+ trace_hardirqs_on(); \
+ raw_local_irq_pmu_restore(flags); \
+ } \
+ } while(0)
+#else
+#define powerpc_local_irq_pmu_save(flags) \
+ do { \
+ raw_local_irq_pmu_save(flags); \
+ } while(0)
+#define powerpc_local_irq_pmu_restore(flags) \
+ do { \
+ raw_local_irq_pmu_restore(flags); \
+ } while (0)
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
#ifdef CONFIG_PPC_BOOK3E
#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d1ee6c2a6b09..7f9f47a93f4e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -222,6 +222,10 @@ notrace void arch_local_irq_restore(unsigned long en)
unsigned char irq_happened;
unsigned int replay;
+#ifdef CONFIG_IRQ_DEBUG_SUPPORT
+ WARN_ON(en & local_paca->soft_enabled & ~IRQ_DISABLE_MASK_LINUX);
+#endif
+
/* Write the new soft-enabled value */
soft_enabled_set(en);
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH v2 11/11] powerpc: rewrite local_t using soft_irq
2016-09-25 14:23 [PATCH v2 00/11]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
` (9 preceding siblings ...)
2016-09-25 14:23 ` [PATCH v2 10/11] powerpc: Add new set of soft_enabled_ functions Madhavan Srinivasan
@ 2016-09-25 14:23 ` Madhavan Srinivasan
10 siblings, 0 replies; 12+ messages in thread
From: Madhavan Srinivasan @ 2016-09-25 14:23 UTC (permalink / raw)
To: benh, mpe; +Cc: anton, paulus, npiggin, linuxppc-dev, Madhavan Srinivasan
Local atomic operations are fast and highly reentrant per CPU counters.
Used for percpu variable updates. Local atomic operations only guarantee
variable modification atomicity wrt the CPU which owns the data and
these needs to be executed in a preemption safe way.
Here is the design of this patch. Since local_* operations
are only need to be atomic to interrupts (IIUC), we have two options.
Either replay the "op" if interrupted or replay the interrupt after
the "op". Initial patchset posted was based on implementing local_* operation
based on CR5 which replay's the "op". Patchset had issues in case of
rewinding the address pointor from an array. This make the slow patch
really slow. Since CR5 based implementation proposed using __ex_table to find
the rewind addressr, this rasied concerns about size of __ex_table and vmlinux.
https://lists.ozlabs.org/pipermail/linuxppc-dev/2014-December/123115.html
But this patch uses, powerpc_local_irq_pmu_save to soft_disable
interrupts (including PMIs). After finishing the "op", powerpc_local_irq_pmu_restore()
called and correspondingly interrupts are replayed if any occured.
patch re-write the current local_* functions to use arch_local_irq_disbale.
Base flow for each function is
{
powerpc_local_irq_pmu_save(flags)
load
..
store
powerpc_local_irq_pmu_restore(flags)
}
Reason for the approach is that, currently l[w/d]arx/st[w/d]cx.
instruction pair is used for local_* operations, which are heavy
on cycle count and they dont support a local variant. So to
see whether the new implementation helps, used a modified
version of Rusty's benchmark code on local_t.
https://lkml.org/lkml/2008/12/16/450
Modifications to Rusty's benchmark code:
- Executed only local_t test
Here are the values with the patch.
Time in ns per iteration
Local_t Without Patch With Patch
_inc 28 8
_add 28 8
_read 3 3
_add_return 28 7
Currently only asm/local.h has been rewrite, and also
the entire change is tested only in PPC64 (pseries guest)
and PPC64 host (LE)
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/local.h | 201 +++++++++++++++++++++++++++++++++++++++
1 file changed, 201 insertions(+)
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index b8da91363864..7d117c07b0b1 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -3,6 +3,9 @@
#include <linux/percpu.h>
#include <linux/atomic.h>
+#include <linux/irqflags.h>
+
+#include <asm/hw_irq.h>
typedef struct
{
@@ -14,6 +17,202 @@ typedef struct
#define local_read(l) atomic_long_read(&(l)->a)
#define local_set(l,i) atomic_long_set(&(l)->a, (i))
+#ifdef CONFIG_PPC64
+
+static __inline__ void local_add(long i, local_t *l)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__(
+ PPC_LL" %0,0(%2)\n\
+ add %0,%1,%0\n"
+ PPC_STL" %0,0(%2)\n"
+ : "=&r" (t)
+ : "r" (i), "r" (&(l->a.counter)));
+ powerpc_local_irq_pmu_restore(flags);
+}
+
+static __inline__ void local_sub(long i, local_t *l)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__(
+ PPC_LL" %0,0(%2)\n\
+ subf %0,%1,%0\n"
+ PPC_STL" %0,0(%2)\n"
+ : "=&r" (t)
+ : "r" (i), "r" (&(l->a.counter)));
+ powerpc_local_irq_pmu_restore(flags);
+}
+
+static __inline__ long local_add_return(long a, local_t *l)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__(
+ PPC_LL" %0,0(%2)\n\
+ add %0,%1,%0\n"
+ PPC_STL "%0,0(%2)\n"
+ : "=&r" (t)
+ : "r" (a), "r" (&(l->a.counter))
+ : "memory");
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t;
+}
+
+#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
+
+static __inline__ long local_sub_return(long a, local_t *l)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__(
+ PPC_LL" %0,0(%2)\n\
+ subf %0,%1,%0\n"
+ PPC_STL "%0,0(%2)\n"
+ : "=&r" (t)
+ : "r" (a), "r" (&(l->a.counter))
+ : "memory");
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t;
+}
+
+static __inline__ long local_inc_return(local_t *l)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__(
+ PPC_LL" %0,0(%1)\n\
+ addic %0,%0,1\n"
+ PPC_STL "%0,0(%1)\n"
+ : "=&r" (t)
+ : "r" (&(l->a.counter))
+ : "xer", "memory");
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t;
+}
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+static __inline__ long local_dec_return(local_t *l)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__(
+ PPC_LL" %0,0(%1)\n\
+ addic %0,%0,-1\n"
+ PPC_STL "%0,0(%1)\n"
+ : "=&r" (t)
+ : "r" (&(l->a.counter))
+ : "xer", "memory");
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t;
+}
+
+#define local_inc(l) local_inc_return(l)
+#define local_dec(l) local_dec_return(l)
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+static __inline__ int local_add_unless(local_t *l, long a, long u)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__ (
+ PPC_LL" %0,0(%1)\n\
+ cmpw 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC_STL" %0,0(%1) \n"
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&(l->a.counter)), "r" (a), "r" (u)
+ : "cc", "memory");
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t != u;
+}
+
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0)
+#define local_dec_and_test(l) (local_dec_return((l)) == 0)
+
+/*
+ * Atomically test *l and decrement if it is greater than 0.
+ * The function returns the old value of *l minus 1.
+ */
+static __inline__ long local_dec_if_positive(local_t *l)
+{
+ long t;
+ unsigned long flags;
+
+ powerpc_local_irq_pmu_save(flags);
+ __asm__ __volatile__(
+ PPC_LL" %0,0(%1)\n\
+ cmpwi %0,1\n\
+ addi %0,%0,-1\n\
+ blt- 2f\n"
+ PPC_STL "%0,0(%1)\n"
+ "\n\
+2:" : "=&b" (t)
+ : "r" (&(l->a.counter))
+ : "cc", "memory");
+ powerpc_local_irq_pmu_restore(flags);
+
+ return t;
+}
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i,l) ((l)->a.counter+=(i))
+#define __local_sub(i,l) ((l)->a.counter-=(i))
+
+#else
+
#define local_add(i,l) atomic_long_add((i),(&(l)->a))
#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
#define local_inc(l) atomic_long_inc(&(l)->a)
@@ -172,4 +371,6 @@ static __inline__ long local_dec_if_positive(local_t *l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
+#endif /* CONFIG_PPC64 */
+
#endif /* _ARCH_POWERPC_LOCAL_H */
--
2.7.4
^ permalink raw reply related [flat|nested] 12+ messages in thread