From: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
To: mpe@ellerman.id.au
Cc: benh@kernel.crashing.org, anton@samba.org, paulus@samba.org,
npiggin@gmail.com, linuxppc-dev@lists.ozlabs.org,
Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Subject: [PATCH v9 08/14] powerpc: Rename soft_enabled to soft_disable_mask
Date: Thu, 3 Aug 2017 09:19:12 +0530 [thread overview]
Message-ID: <1501732158-19009-9-git-send-email-maddy@linux.vnet.ibm.com> (raw)
In-Reply-To: <1501732158-19009-1-git-send-email-maddy@linux.vnet.ibm.com>
Rename the paca->soft_enabled to paca->soft_disable_mask as
it is no more used as a flag for interrupt state.
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
---
arch/powerpc/include/asm/hw_irq.h | 24 ++++++++++++------------
arch/powerpc/include/asm/kvm_ppc.h | 2 +-
arch/powerpc/include/asm/paca.h | 2 +-
arch/powerpc/kernel/asm-offsets.c | 2 +-
arch/powerpc/kernel/irq.c | 8 ++++----
arch/powerpc/kernel/ptrace.c | 2 +-
arch/powerpc/kernel/setup_64.c | 4 ++--
arch/powerpc/kernel/time.c | 6 +++---
arch/powerpc/mm/hugetlbpage.c | 2 +-
arch/powerpc/xmon/xmon.c | 4 ++--
10 files changed, 28 insertions(+), 28 deletions(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 87b3face8e27..c60922c77249 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -28,7 +28,7 @@
#define PACA_IRQ_HMI 0x20
/*
- * flags for paca->soft_enabled
+ * flags for paca->soft_disable_mask
*/
#define IRQ_DISABLE_MASK_NONE 0
#define IRQ_DISABLE_MASK_LINUX 1
@@ -50,38 +50,38 @@ extern void unknown_exception(struct pt_regs *regs);
/*
*TODO:
* Currently none of the soft_eanbled modification helpers have clobbers
- * for modifying the r13->soft_enabled memory itself. Secondly they only
+ * for modifying the r13->soft_disable_mask memory itself. Secondly they only
* include "memory" clobber as a hint. Ideally, if all the accesses to
- * soft_enabled go via these helpers, we could avoid the "memory" clobber.
+ * soft_disable_mask go via these helpers, we could avoid the "memory" clobber.
* Former could be taken care by having location in the constraints.
*/
-static inline notrace void soft_enabled_set(unsigned long enable)
+static inline notrace void soft_disable_mask_set(unsigned long enable)
{
__asm__ __volatile__("stb %0,%1(13)"
- : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))
+ : : "r" (enable), "i" (offsetof(struct paca_struct, soft_disable_mask))
: "memory");
}
-static inline notrace unsigned long soft_enabled_return(void)
+static inline notrace unsigned long soft_disable_mask_return(void)
{
unsigned long flags;
asm volatile(
"lbz %0,%1(13)"
: "=r" (flags)
- : "i" (offsetof(struct paca_struct, soft_enabled)));
+ : "i" (offsetof(struct paca_struct, soft_disable_mask)));
return flags;
}
-static inline notrace unsigned long soft_enabled_set_return(unsigned long enable)
+static inline notrace unsigned long soft_disable_mask_set_return(unsigned long enable)
{
unsigned long flags, zero;
asm volatile(
"mr %1,%3; lbz %0,%2(13); stb %1,%2(13)"
: "=r" (flags), "=&r" (zero)
- : "i" (offsetof(struct paca_struct, soft_enabled)),\
+ : "i" (offsetof(struct paca_struct, soft_disable_mask)),\
"r" (enable)
: "memory");
@@ -90,7 +90,7 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long enable
static inline unsigned long arch_local_save_flags(void)
{
- return soft_enabled_return();
+ return soft_disable_mask_return();
}
extern void arch_local_irq_restore(unsigned long);
@@ -102,7 +102,7 @@ static inline void arch_local_irq_enable(void)
static inline unsigned long arch_local_irq_save(void)
{
- return soft_enabled_set_return(IRQ_DISABLE_MASK_LINUX);
+ return soft_disable_mask_set_return(IRQ_DISABLE_MASK_LINUX);
}
static inline void arch_local_irq_disable(void)
@@ -131,7 +131,7 @@ static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() do { \
unsigned long flags; \
__hard_irq_disable(); \
- flags = soft_enabled_set_return(IRQ_DISABLE_MASK_LINUX);\
+ flags = soft_disable_mask_set_return(IRQ_DISABLE_MASK_LINUX);\
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0e90dbe46b5b..ec2086a76324 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -869,7 +869,7 @@ static inline void kvmppc_fix_ee_before_entry(void)
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
- soft_enabled_set(IRQ_DISABLE_MASK_NONE);
+ soft_disable_mask_set(IRQ_DISABLE_MASK_NONE);
#endif
}
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index dc88a31cc79a..000b3b397b04 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -158,7 +158,7 @@ struct paca_struct {
u64 saved_r1; /* r1 save for RTAS calls or PM */
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
- u8 soft_enabled; /* irq soft-enable flag */
+ u8 soft_disable_mask; /* mask for irq soft disable */
u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6e95c2c19a7e..0afb57036e6f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -178,7 +178,7 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
- OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled);
+ OFFSET(PACASOFTIRQEN, paca_struct, soft_disable_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
#ifdef CONFIG_PPC_BOOK3S
OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 198f4cb3cb5a..63f7838cf9a6 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -206,7 +206,7 @@ notrace void arch_local_irq_restore(unsigned long en)
unsigned int replay;
/* Write the new soft-enabled value */
- soft_enabled_set(en);
+ soft_disable_mask_set(en);
if (en == IRQ_DISABLE_MASK_LINUX)
return;
/*
@@ -252,7 +252,7 @@ notrace void arch_local_irq_restore(unsigned long en)
}
#endif /* CONFIG_TRACE_IRQFLAGS */
- soft_enabled_set(IRQ_DISABLE_MASK_LINUX);
+ soft_disable_mask_set(IRQ_DISABLE_MASK_LINUX);
/*
* Check if anything needs to be re-emitted. We haven't
@@ -262,7 +262,7 @@ notrace void arch_local_irq_restore(unsigned long en)
replay = __check_irq_replay();
/* We can soft-enable now */
- soft_enabled_set(IRQ_DISABLE_MASK_NONE);
+ soft_disable_mask_set(IRQ_DISABLE_MASK_NONE);
/*
* And replay if we have to. This will return with interrupts
@@ -337,7 +337,7 @@ bool prep_irq_for_idle(void)
* of entering the low power state.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
- soft_enabled_set(IRQ_DISABLE_MASK_NONE);
+ soft_disable_mask_set(IRQ_DISABLE_MASK_NONE);
/* Tell the caller to enter the low power state */
return true;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 75c10d4aaf30..ad2d5ac734e0 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -277,7 +277,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
return get_user_dscr(task, data);
/*
- * softe copies paca->soft_enabled variable state. Since soft_enabled is
+ * softe copies paca->soft_disable_mask variable state. Since soft_disable_mask is
* no more used as a flag, lets force usr to alway see the softe value as 1
* which means interrupts are not soft disabled.
*/
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 23a10bb0d5b6..de557830a689 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -189,7 +189,7 @@ static void __init fixup_boot_paca(void)
/* Allow percpu accesses to work until we setup percpu data */
get_paca()->data_offset = 0;
/* Mark interrupts disabled in PACA */
- soft_enabled_set(IRQ_DISABLE_MASK_LINUX);
+ soft_disable_mask_set(IRQ_DISABLE_MASK_LINUX);
}
static void __init configure_exceptions(void)
@@ -345,7 +345,7 @@ void __init early_setup(unsigned long dt_ptr)
void early_setup_secondary(void)
{
/* Mark interrupts disabled in PACA */
- soft_enabled_set(IRQ_DISABLE_MASK_LINUX);
+ soft_disable_mask_set(IRQ_DISABLE_MASK_LINUX);
/* Initialize the hash table or TLB handling */
early_init_mmu_secondary();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 96402dcb38d1..f505d8fe4c05 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -244,7 +244,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
void accumulate_stolen_time(void)
{
u64 sst, ust;
- unsigned long save_soft_enabled;
+ unsigned long save_soft_disable_mask;
struct cpu_accounting_data *acct = &local_paca->accounting;
/* We are called early in the exception entry, before
@@ -253,7 +253,7 @@ void accumulate_stolen_time(void)
* needs to reflect that so various debug stuff doesn't
* complain
*/
- save_soft_enabled = soft_enabled_set_return(IRQ_DISABLE_MASK_LINUX);
+ save_soft_disable_mask = soft_disable_mask_set_return(IRQ_DISABLE_MASK_LINUX);
sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime);
@@ -261,7 +261,7 @@ void accumulate_stolen_time(void)
acct->utime -= ust;
acct->steal_time += ust + sst;
- soft_enabled_set(save_soft_enabled);
+ soft_disable_mask_set(save_soft_disable_mask);
}
static inline u64 calculate_stolen_time(u64 stop_tb)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 4df4925a14d1..93a36680e95a 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -884,7 +884,7 @@ void flush_dcache_icache_hugepage(struct page *page)
* So long as we atomically load page table pointers we are safe against teardown,
* we can follow the address down to the the page and take a ref on it.
* This function need to be called with interrupts disabled. We use this variant
- * when we have MSR[EE] = 0 but the paca->soft_enabled = IRQ_DISABLE_MASK_NONE
+ * when we have MSR[EE] = 0 but the paca->soft_disable_mask = IRQ_DISABLE_MASK_NONE
*/
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 08e367e3e8c3..f9f4f2b1df29 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1580,7 +1580,7 @@ static void excprint(struct pt_regs *fp)
printf(" current = 0x%lx\n", current);
#ifdef CONFIG_PPC64
printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n",
- local_paca, local_paca->soft_enabled, local_paca->irq_happened);
+ local_paca, local_paca->soft_disable_mask, local_paca->irq_happened);
#endif
if (current) {
printf(" pid = %ld, comm = %s\n",
@@ -2310,7 +2310,7 @@ static void dump_one_paca(int cpu)
DUMP(p, stab_rr, "lx");
DUMP(p, saved_r1, "lx");
DUMP(p, trap_save, "x");
- DUMP(p, soft_enabled, "x");
+ DUMP(p, soft_disable_mask, "x");
DUMP(p, irq_happened, "x");
DUMP(p, io_sync, "x");
DUMP(p, irq_work_pending, "x");
--
2.7.4
next prev parent reply other threads:[~2017-08-03 3:51 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-03 3:49 [PATCH v9 00/14]powerpc: "paca->soft_enabled" based local atomic operation implementation Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 01/14] powerpc: Add #defs for paca->soft_enabled flags Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 02/14] powerpc: move set_soft_enabled() and rename Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 03/14] powerpc: Use soft_enabled_set api to update paca->soft_enabled Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 04/14] powerpc: Add soft_enabled manipulation functions Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 05/14] powerpc/irq: Cleanup hard_irq_disable() macro Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 06/14] powerpc/irq: Fix arch_local_irq_disable() in book3s Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 07/14] powerpc: Modify soft_enable from flag to mask Madhavan Srinivasan
2017-08-03 4:44 ` Nicholas Piggin
2017-08-03 3:49 ` Madhavan Srinivasan [this message]
2017-08-03 3:49 ` [PATCH v9 09/14] powerpc: Avoid using EXCEPTION_PROLOG_1 macro in MASKABLE_* Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 10/14] powerpc: Add support to take additional parameter in MASKABLE_* macro Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 11/14] Add support to mask perf interrupts and replay them Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 12/14] powerpc:Add new kconfig IRQ_DEBUG_SUPPORT Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 13/14] powerpc: Add new set of soft_disable_mask_ functions Madhavan Srinivasan
2017-08-03 3:49 ` [PATCH v9 14/14] powerpc: rewrite local_t using soft_irq Madhavan Srinivasan
2017-08-03 17:50 ` Nicholas Piggin
2017-08-04 1:40 ` Benjamin Herrenschmidt
2017-08-04 9:04 ` Nicholas Piggin
2017-08-04 15:18 ` David Laight
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1501732158-19009-9-git-send-email-maddy@linux.vnet.ibm.com \
--to=maddy@linux.vnet.ibm.com \
--cc=anton@samba.org \
--cc=benh@kernel.crashing.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=paulus@samba.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).