linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2, 1/5] powerpc/cache: add cache flush operation for various e500
@ 2015-08-26 12:09 Chenhui Zhao
  2015-08-26 12:09 ` [PATCH v2,2/5] powerpc/rcpm: add RCPM driver Chenhui Zhao
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Chenhui Zhao @ 2015-08-26 12:09 UTC (permalink / raw)
  To: linuxppc-dev, scottwood; +Cc: linux-kernel, Jason.Jin

Various e500 core have different cache architecture, so they
need different cache flush operations. Therefore, add a callback
function cpu_flush_caches to the struct cpu_spec. The cache flush
operation for the specific kind of e500 is selected at init time.
The callback function will flush all caches inside the current cpu.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
---
 arch/powerpc/include/asm/cacheflush.h     |   2 -
 arch/powerpc/include/asm/cputable.h       |  11 +++
 arch/powerpc/kernel/asm-offsets.c         |   3 +
 arch/powerpc/kernel/cpu_setup_fsl_booke.S | 112 ++++++++++++++++++++++++++++++
 arch/powerpc/kernel/cputable.c            |   4 ++
 arch/powerpc/kernel/head_fsl_booke.S      |  74 --------------------
 arch/powerpc/platforms/85xx/smp.c         |   5 +-
 7 files changed, 133 insertions(+), 78 deletions(-)

diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 30b35ff..729fde4 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 
-extern void __flush_disable_L1(void);
-
 extern void flush_icache_range(unsigned long, unsigned long);
 extern void flush_icache_user_range(struct vm_area_struct *vma,
 				    struct page *page, unsigned long addr,
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b118072..d89b04a 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,13 @@ extern int machine_check_e500(struct pt_regs *regs);
 extern int machine_check_e200(struct pt_regs *regs);
 extern int machine_check_47x(struct pt_regs *regs);
 
+#if defined(CONFIG_E500)
+extern void cpu_down_flush_e500v2(void);
+extern void cpu_down_flush_e500mc(void);
+extern void cpu_down_flush_e5500(void);
+extern void cpu_down_flush_e6500(void);
+#endif
+
 /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
 struct cpu_spec {
 	/* CPU is matched via (PVR & pvr_mask) == pvr_value */
@@ -59,6 +66,10 @@ struct cpu_spec {
 	unsigned int	icache_bsize;
 	unsigned int	dcache_bsize;
 
+#if defined(CONFIG_E500)
+	/* flush caches inside the current cpu */
+	void (*cpu_down_flush)(void);
+#endif
 	/* number of performance monitor counters */
 	unsigned int	num_pmcs;
 	enum powerpc_pmc_type pmc_type;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9823057..17b672d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -373,6 +373,9 @@ int main(void)
 	DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
 	DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
 	DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
+#if defined(CONFIG_E500)
+	DEFINE(CPU_DOWN_FLUSH, offsetof(struct cpu_spec, cpu_down_flush));
+#endif
 
 	DEFINE(pbe_address, offsetof(struct pbe, address));
 	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index dddba3e..462aed9 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -13,11 +13,13 @@
  *
  */
 
+#include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/cputable.h>
 #include <asm/ppc_asm.h>
 #include <asm/mmu-book3e.h>
 #include <asm/asm-offsets.h>
+#include <asm/mpc85xx.h>
 
 _GLOBAL(__e500_icache_setup)
 	mfspr	r0, SPRN_L1CSR1
@@ -233,3 +235,113 @@ _GLOBAL(__setup_cpu_e5500)
 	mtlr	r5
 	blr
 #endif
+
+/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
+_GLOBAL(flush_dcache_L1)
+	mfmsr	r10
+	wrteei	0
+
+	mfspr	r3,SPRN_L1CFG0
+	rlwinm	r5,r3,9,3	/* Extract cache block size */
+	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
+				 * are currently defined.
+				 */
+	li	r4,32
+	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
+				 *      log2(number of ways)
+				 */
+	slw	r5,r4,r5	/* r5 = cache block size */
+
+	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
+	mulli	r7,r7,13	/* An 8-way cache will require 13
+				 * loads per set.
+				 */
+	slw	r7,r7,r6
+
+	/* save off HID0 and set DCFA */
+	mfspr	r8,SPRN_HID0
+	ori	r9,r8,HID0_DCFA@l
+	mtspr	SPRN_HID0,r9
+	isync
+
+	LOAD_REG_IMMEDIATE(r6, KERNELBASE)
+	mr	r4, r6
+	mtctr	r7
+
+1:	lwz	r3,0(r4)	/* Load... */
+	add	r4,r4,r5
+	bdnz	1b
+
+	msync
+	mr	r4, r6
+	mtctr	r7
+
+1:	dcbf	0,r4		/* ...and flush. */
+	add	r4,r4,r5
+	bdnz	1b
+
+	/* restore HID0 */
+	mtspr	SPRN_HID0,r8
+	isync
+
+	wrtee r10
+
+	blr
+
+has_L2_cache:
+	/* skip L2 cache on P2040/P2040E as they have no L2 cache */
+	mfspr	r3, SPRN_SVR
+	/* shift right by 8 bits and clear E bit of SVR */
+	rlwinm	r4, r3, 24, ~0x800
+
+	lis	r3, SVR_P2040@h
+	ori	r3, r3, SVR_P2040@l
+	cmpw	r4, r3
+	beq	1f
+
+	li	r3, 1
+	blr
+1:
+	li	r3, 0
+	blr
+
+/* flush backside L2 cache */
+flush_backside_L2_cache:
+	mflr	r10
+	bl	has_L2_cache
+	mtlr	r10
+	cmpwi	r3, 0
+	beq	2f
+
+	/* Flush the L2 cache */
+	mfspr	r3, SPRN_L2CSR0
+	ori	r3, r3, L2CSR0_L2FL@l
+	msync
+	isync
+	mtspr	SPRN_L2CSR0,r3
+	isync
+
+	/* check if it is complete */
+1:	mfspr	r3,SPRN_L2CSR0
+	andi.	r3, r3, L2CSR0_L2FL@l
+	bne	1b
+2:
+	blr
+
+_GLOBAL(cpu_down_flush_e500v2)
+	mflr r0
+	bl	flush_dcache_L1
+	mtlr r0
+	blr
+
+_GLOBAL(cpu_down_flush_e500mc)
+_GLOBAL(cpu_down_flush_e5500)
+	mflr r0
+	bl	flush_dcache_L1
+	bl	flush_backside_L2_cache
+	mtlr r0
+	blr
+
+/* L1 Data Cache of e6500 contains no modified data, no flush is required */
+_GLOBAL(cpu_down_flush_e6500)
+	blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 7d80bfd..d65b45a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2023,6 +2023,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_setup		= __setup_cpu_e500v2,
 		.machine_check		= machine_check_e500,
 		.platform		= "ppc8548",
+		.cpu_down_flush		= cpu_down_flush_e500v2,
 	},
 #else
 	{	/* e500mc */
@@ -2042,6 +2043,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 		.cpu_setup		= __setup_cpu_e500mc,
 		.machine_check		= machine_check_e500mc,
 		.platform		= "ppce500mc",
+		.cpu_down_flush		= cpu_down_flush_e500mc,
 	},
 #endif /* CONFIG_PPC_E500MC */
 #endif /* CONFIG_PPC32 */
@@ -2066,6 +2068,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 #endif
 		.machine_check		= machine_check_e500mc,
 		.platform		= "ppce5500",
+		.cpu_down_flush		= cpu_down_flush_e5500,
 	},
 	{	/* e6500 */
 		.pvr_mask		= 0xffff0000,
@@ -2088,6 +2091,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
 #endif
 		.machine_check		= machine_check_e500mc,
 		.platform		= "ppce6500",
+		.cpu_down_flush		= cpu_down_flush_e6500,
 	},
 #endif /* CONFIG_PPC_E500MC */
 #ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index fffd1f9..709bc50 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -1075,80 +1075,6 @@ _GLOBAL(set_context)
 	isync			/* Force context change */
 	blr
 
-_GLOBAL(flush_dcache_L1)
-	mfspr	r3,SPRN_L1CFG0
-
-	rlwinm	r5,r3,9,3	/* Extract cache block size */
-	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
-				 * are currently defined.
-				 */
-	li	r4,32
-	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
-				 *      log2(number of ways)
-				 */
-	slw	r5,r4,r5	/* r5 = cache block size */
-
-	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
-	mulli	r7,r7,13	/* An 8-way cache will require 13
-				 * loads per set.
-				 */
-	slw	r7,r7,r6
-
-	/* save off HID0 and set DCFA */
-	mfspr	r8,SPRN_HID0
-	ori	r9,r8,HID0_DCFA@l
-	mtspr	SPRN_HID0,r9
-	isync
-
-	lis	r4,KERNELBASE@h
-	mtctr	r7
-
-1:	lwz	r3,0(r4)	/* Load... */
-	add	r4,r4,r5
-	bdnz	1b
-
-	msync
-	lis	r4,KERNELBASE@h
-	mtctr	r7
-
-1:	dcbf	0,r4		/* ...and flush. */
-	add	r4,r4,r5
-	bdnz	1b
-	
-	/* restore HID0 */
-	mtspr	SPRN_HID0,r8
-	isync
-
-	blr
-
-/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
-_GLOBAL(__flush_disable_L1)
-	mflr	r10
-	bl	flush_dcache_L1	/* Flush L1 d-cache */
-	mtlr	r10
-
-	mfspr	r4, SPRN_L1CSR0	/* Invalidate and disable d-cache */
-	li	r5, 2
-	rlwimi	r4, r5, 0, 3
-
-	msync
-	isync
-	mtspr	SPRN_L1CSR0, r4
-	isync
-
-1:	mfspr	r4, SPRN_L1CSR0	/* Wait for the invalidate to finish */
-	andi.	r4, r4, 2
-	bne	1b
-
-	mfspr	r4, SPRN_L1CSR1	/* Invalidate and disable i-cache */
-	li	r5, 2
-	rlwimi	r4, r5, 0, 3
-
-	mtspr	SPRN_L1CSR1, r4
-	isync
-
-	blr
-
 #ifdef CONFIG_SMP
 /* When we get here, r24 needs to hold the CPU # */
 	.globl __secondary_start
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index b8b8216..0b75e8e 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -139,7 +139,8 @@ static void smp_85xx_mach_cpu_die(void)
 
 	mtspr(SPRN_TCR, 0);
 
-	__flush_disable_L1();
+	cur_cpu_spec->cpu_down_flush();
+
 	tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
 	mtspr(SPRN_HID0, tmp);
 	isync();
@@ -345,7 +346,7 @@ void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
 	local_irq_disable();
 
 	if (secondary) {
-		__flush_disable_L1();
+		cur_cpu_spec->cpu_down_flush();
 		atomic_inc(&kexec_down_cpus);
 		/* loop forever */
 		while (1);
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2,2/5] powerpc/rcpm: add RCPM driver
  2015-08-26 12:09 [PATCH v2, 1/5] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
@ 2015-08-26 12:09 ` Chenhui Zhao
  2015-08-26 20:35   ` Scott Wood
  2015-08-26 12:09 ` [PATCH v2,3/5] Powerpc: mpc85xx: refactor the PM operations Chenhui Zhao
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 13+ messages in thread
From: Chenhui Zhao @ 2015-08-26 12:09 UTC (permalink / raw)
  To: linuxppc-dev, scottwood; +Cc: linux-kernel, Jason.Jin

There is a RCPM (Run Control/Power Management) in Freescale QorIQ
series processors. The device performs tasks associated with device
run control and power management.

The driver implements some features: mask/unmask irq, enter/exit low
power states, freeze time base, etc.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@freescale.com>
---
major changes for v2:
* rcpm_v1_cpu_die() and rcpm_v2_cpu_die() will be executed by the dying cpu.
  this way, more stable

 Documentation/devicetree/bindings/soc/fsl/rcpm.txt |  44 +++
 arch/powerpc/include/asm/fsl_guts.h                | 105 ++++++
 arch/powerpc/include/asm/fsl_pm.h                  |  50 +++
 arch/powerpc/platforms/85xx/Kconfig                |   1 +
 arch/powerpc/platforms/85xx/common.c               |   3 +
 arch/powerpc/sysdev/Kconfig                        |   5 +
 arch/powerpc/sysdev/Makefile                       |   1 +
 arch/powerpc/sysdev/fsl_rcpm.c                     | 390 +++++++++++++++++++++
 8 files changed, 599 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/soc/fsl/rcpm.txt
 create mode 100644 arch/powerpc/include/asm/fsl_pm.h
 create mode 100644 arch/powerpc/sysdev/fsl_rcpm.c

diff --git a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
new file mode 100644
index 0000000..dc52f70
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
@@ -0,0 +1,44 @@
+* Run Control and Power Management
+
+The RCPM performs all device-level tasks associated with device run control
+and power management.
+
+Required properites:
+  - reg : Offset and length of the register set of RCPM block.
+  - compatible : Sould contain a chip-specific RCPM block compatible string
+	and (if applicable) may contain a chassis-version RCPM compatible string.
+	Chip-specific strings are of the form "fsl,<chip>-rcpm", such as:
+	* "fsl,p2041-rcpm"
+	* "fsl,p3041-rcpm"
+	* "fsl,p4080-rcpm"
+	* "fsl,p5020-rcpm"
+	* "fsl,p5040-rcpm"
+	* "fsl,t4240-rcpm"
+	* "fsl,b4420-rcpm"
+	* "fsl,b4860-rcpm"
+
+	Chassis-version RCPM strings include:
+	* "fsl,qoriq-rcpm-1.0": for chassis 1.0 rcpm
+	* "fsl,qoriq-rcpm-2.0": for chassis 2.0 rcpm
+	* "fsl,qoriq-rcpm-2.1": for chassis 2.1 rcpm
+
+All references to "1.0" and "2.0" refer to the QorIQ chassis version to
+which the chip complies.
+Chassis Version		Example Chips
+---------------		-------------------------------
+1.0					p4080, p5020, p5040, p2041, p3041
+2.0					t4240, b4860, b4420
+2.1					t1040
+
+Example:
+The RCPM node for T4240:
+	rcpm: global-utilities@e2000 {
+		compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0";
+		reg = <0xe2000 0x1000>;
+	};
+
+The RCPM node for P4080:
+	rcpm: global-utilities@e2000 {
+		compatible = "fsl,qoriq-rcpm-1.0";
+		reg = <0xe2000 0x1000>;
+	};
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index 43b6bb1..a67413c 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -188,5 +188,110 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
 
 #endif
 
+struct ccsr_rcpm_v1 {
+	u8	res0000[4];
+	__be32	cdozsr;	    /* 0x0004 Core Doze Status Register */
+	u8	res0008[4];
+	__be32	cdozcr;	    /* 0x000c Core Doze Control Register */
+	u8	res0010[4];
+	__be32	cnapsr;	    /* 0x0014 Core Nap Status Register */
+	u8	res0018[4];
+	__be32	cnapcr;	    /* 0x001c Core Nap Control Register */
+	u8	res0020[4];
+	__be32	cdozpsr;    /* 0x0024 Core Doze Previous Status Register */
+	u8	res0028[4];
+	__be32	cnappsr;    /* 0x002c Core Nap Previous Status Register */
+	u8	res0030[4];
+	__be32	cwaitsr;    /* 0x0034 Core Wait Status Register */
+	u8	res0038[4];
+	__be32	cwdtdsr;    /* 0x003c Core Watchdog Detect Status Register */
+	__be32	powmgtcsr;  /* 0x0040 PM Control&Status Register */
+#define RCPM_POWMGTCSR_SLP	0x00020000
+	u8	res0044[12];
+	__be32	ippdexpcr;  /* 0x0050 IP Powerdown Exception Control Register */
+	u8	res0054[16];
+	__be32	cpmimr;	    /* 0x0064 Core PM IRQ Mask Register */
+	u8	res0068[4];
+	__be32	cpmcimr;    /* 0x006c Core PM Critical IRQ Mask Register */
+	u8	res0070[4];
+	__be32	cpmmcmr;    /* 0x0074 Core PM Machine Check Mask Register */
+	u8	res0078[4];
+	__be32	cpmnmimr;   /* 0x007c Core PM NMI Mask Register */
+	u8	res0080[4];
+	__be32	ctbenr;	    /* 0x0084 Core Time Base Enable Register */
+	u8	res0088[4];
+	__be32	ctbckselr;  /* 0x008c Core Time Base Clock Select Register */
+	u8	res0090[4];
+	__be32	ctbhltcr;   /* 0x0094 Core Time Base Halt Control Register */
+	u8	res0098[4];
+	__be32	cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */
+};
+
+struct ccsr_rcpm_v2 {
+	u8	res_00[12];
+	__be32	tph10sr0;	/* Thread PH10 Status Register */
+	u8	res_10[12];
+	__be32	tph10setr0;	/* Thread PH10 Set Control Register */
+	u8	res_20[12];
+	__be32	tph10clrr0;	/* Thread PH10 Clear Control Register */
+	u8	res_30[12];
+	__be32	tph10psr0;	/* Thread PH10 Previous Status Register */
+	u8	res_40[12];
+	__be32	twaitsr0;	/* Thread Wait Status Register */
+	u8	res_50[96];
+	__be32	pcph15sr;	/* Physical Core PH15 Status Register */
+	__be32	pcph15setr;	/* Physical Core PH15 Set Control Register */
+	__be32	pcph15clrr;	/* Physical Core PH15 Clear Control Register */
+	__be32	pcph15psr;	/* Physical Core PH15 Prev Status Register */
+	u8	res_c0[16];
+	__be32	pcph20sr;	/* Physical Core PH20 Status Register */
+	__be32	pcph20setr;	/* Physical Core PH20 Set Control Register */
+	__be32	pcph20clrr;	/* Physical Core PH20 Clear Control Register */
+	__be32	pcph20psr;	/* Physical Core PH20 Prev Status Register */
+	__be32	pcpw20sr;	/* Physical Core PW20 Status Register */
+	u8	res_e0[12];
+	__be32	pcph30sr;	/* Physical Core PH30 Status Register */
+	__be32	pcph30setr;	/* Physical Core PH30 Set Control Register */
+	__be32	pcph30clrr;	/* Physical Core PH30 Clear Control Register */
+	__be32	pcph30psr;	/* Physical Core PH30 Prev Status Register */
+	u8	res_100[32];
+	__be32	ippwrgatecr;	/* IP Power Gating Control Register */
+	u8	res_124[12];
+	__be32	powmgtcsr;	/* Power Management Control & Status Reg */
+#define RCPM_POWMGTCSR_LPM20_RQ		0x00100000
+#define RCPM_POWMGTCSR_LPM20_ST		0x00000200
+#define RCPM_POWMGTCSR_P_LPM20_ST	0x00000100
+	u8	res_134[12];
+	__be32	ippdexpcr[4];	/* IP Powerdown Exception Control Reg */
+	u8	res_150[12];
+	__be32	tpmimr0;	/* Thread PM Interrupt Mask Reg */
+	u8	res_160[12];
+	__be32	tpmcimr0;	/* Thread PM Crit Interrupt Mask Reg */
+	u8	res_170[12];
+	__be32	tpmmcmr0;	/* Thread PM Machine Check Interrupt Mask Reg */
+	u8	res_180[12];
+	__be32	tpmnmimr0;	/* Thread PM NMI Mask Reg */
+	u8	res_190[12];
+	__be32	tmcpmaskcr0;	/* Thread Machine Check Mask Control Reg */
+	__be32	pctbenr;	/* Physical Core Time Base Enable Reg */
+	__be32	pctbclkselr;	/* Physical Core Time Base Clock Select */
+	__be32	tbclkdivr;	/* Time Base Clock Divider Register */
+	u8	res_1ac[4];
+	__be32	ttbhltcr[4];	/* Thread Time Base Halt Control Register */
+	__be32	clpcl10sr;	/* Cluster PCL10 Status Register */
+	__be32	clpcl10setr;	/* Cluster PCL30 Set Control Register */
+	__be32	clpcl10clrr;	/* Cluster PCL30 Clear Control Register */
+	__be32	clpcl10psr;	/* Cluster PCL30 Prev Status Register */
+	__be32	cddslpsetr;	/* Core Domain Deep Sleep Set Register */
+	__be32	cddslpclrr;	/* Core Domain Deep Sleep Clear Register */
+	__be32	cdpwroksetr;	/* Core Domain Power OK Set Register */
+	__be32	cdpwrokclrr;	/* Core Domain Power OK Clear Register */
+	__be32	cdpwrensr;	/* Core Domain Power Enable Status Register */
+	__be32	cddslsr;	/* Core Domain Deep Sleep Status Register */
+	u8	res_1e8[8];
+	__be32	dslpcntcr[8];	/* Deep Sleep Counter Cfg Register */
+	u8	res_300[3568];
+};
+
 #endif
 #endif
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
new file mode 100644
index 0000000..3a36622
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -0,0 +1,50 @@
+/*
+ * Support Power Management
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#ifndef __PPC_FSL_PM_H
+#define __PPC_FSL_PM_H
+#ifdef __KERNEL__
+
+#define E500_PM_PH10	1
+#define E500_PM_PH15	2
+#define E500_PM_PH20	3
+#define E500_PM_PH30	4
+#define E500_PM_DOZE	E500_PM_PH10
+#define E500_PM_NAP	E500_PM_PH15
+
+#define PLAT_PM_SLEEP	20
+#define PLAT_PM_LPM20	30
+
+#define FSL_PM_SLEEP		(1 << 0)
+#define FSL_PM_DEEP_SLEEP	(1 << 1)
+
+struct fsl_pm_ops {
+	/* mask pending interrupts to the RCPM from MPIC */
+	void (*irq_mask)(int cpu);
+
+	/* unmask pending interrupts to the RCPM from MPIC */
+	void (*irq_unmask)(int cpu);
+	void (*cpu_enter_state)(int cpu, int state);
+	void (*cpu_exit_state)(int cpu, int state);
+	void (*cpu_up_prepare)(int cpu);
+	void (*cpu_die)(int cpu);
+	int (*plat_enter_sleep)(void);
+	void (*freeze_time_base)(bool freeze);
+
+	/* keep the power of IP blocks during sleep/deep sleep */
+	void (*set_ip_power)(bool enable, u32 mask);
+
+	/* get platform supported power management modes */
+	unsigned int (*get_pm_modes)(void);
+};
+
+extern const struct fsl_pm_ops *qoriq_pm_ops;
+#endif /* __KERNEL__ */
+#endif /* __PPC_FSL_PM_H */
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 97915fe..e626461 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -8,6 +8,7 @@ menuconfig FSL_SOC_BOOKE
 	select FSL_PCI if PCI
 	select SERIAL_8250_EXTENDED if SERIAL_8250
 	select SERIAL_8250_SHARE_IRQ if SERIAL_8250
+	select FSL_CORENET_RCPM if PPC_E500MC
 	default y
 
 if FSL_SOC_BOOKE
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 7bfb9b1..91475f5 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -10,10 +10,13 @@
 #include <linux/of_platform.h>
 
 #include <asm/qe.h>
+#include <asm/fsl_pm.h>
 #include <sysdev/cpm2_pic.h>
 
 #include "mpc85xx.h"
 
+const struct fsl_pm_ops *qoriq_pm_ops;
+
 static const struct of_device_id mpc85xx_common_ids[] __initconst = {
 	{ .type = "soc", },
 	{ .compatible = "soc", },
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index a19332a..52dc165 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -40,3 +40,8 @@ config SCOM_DEBUGFS
 config GE_FPGA
 	bool
 	default n
+
+config FSL_CORENET_RCPM
+	bool
+	help
+	  This option enables support for RCPM (Run Control/Power Management).
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5b492a6..d0e8a43 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MMIO_NVRAM)	+= mmio_nvram.o
 obj-$(CONFIG_FSL_SOC)		+= fsl_soc.o fsl_mpic_err.o
 obj-$(CONFIG_FSL_PCI)		+= fsl_pci.o $(fsl-msi-obj-y)
 obj-$(CONFIG_FSL_PMC)		+= fsl_pmc.o
+obj-$(CONFIG_FSL_CORENET_RCPM)	+= fsl_rcpm.o
 obj-$(CONFIG_FSL_LBC)		+= fsl_lbc.o
 obj-$(CONFIG_FSL_GTM)		+= fsl_gtm.o
 obj-$(CONFIG_FSL_85XX_CACHE_SRAM)	+= fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
diff --git a/arch/powerpc/sysdev/fsl_rcpm.c b/arch/powerpc/sysdev/fsl_rcpm.c
new file mode 100644
index 0000000..ed59881
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_rcpm.c
@@ -0,0 +1,390 @@
+/*
+ * RCPM(Run Control/Power Management) support
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * Author: Chenhui Zhao <chenhui.zhao@freescale.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <asm/fsl_guts.h>
+#include <asm/cputhreads.h>
+#include <asm/fsl_pm.h>
+
+static struct ccsr_rcpm_v1 __iomem *rcpm_v1_regs;
+static struct ccsr_rcpm_v2 __iomem *rcpm_v2_regs;
+static unsigned int fsl_supported_pm_modes;
+
+static void rcpm_v1_irq_mask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	setbits32(&rcpm_v1_regs->cpmimr, mask);
+	setbits32(&rcpm_v1_regs->cpmcimr, mask);
+	setbits32(&rcpm_v1_regs->cpmmcmr, mask);
+	setbits32(&rcpm_v1_regs->cpmnmimr, mask);
+}
+
+static void rcpm_v2_irq_mask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	setbits32(&rcpm_v2_regs->tpmimr0, mask);
+	setbits32(&rcpm_v2_regs->tpmcimr0, mask);
+	setbits32(&rcpm_v2_regs->tpmmcmr0, mask);
+	setbits32(&rcpm_v2_regs->tpmnmimr0, mask);
+}
+
+static void rcpm_v1_irq_unmask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	clrbits32(&rcpm_v1_regs->cpmimr, mask);
+	clrbits32(&rcpm_v1_regs->cpmcimr, mask);
+	clrbits32(&rcpm_v1_regs->cpmmcmr, mask);
+	clrbits32(&rcpm_v1_regs->cpmnmimr, mask);
+}
+
+static void rcpm_v2_irq_unmask(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	clrbits32(&rcpm_v2_regs->tpmimr0, mask);
+	clrbits32(&rcpm_v2_regs->tpmcimr0, mask);
+	clrbits32(&rcpm_v2_regs->tpmmcmr0, mask);
+	clrbits32(&rcpm_v2_regs->tpmnmimr0, mask);
+}
+
+static void rcpm_v1_set_ip_power(bool enable, u32 mask)
+{
+	if (enable)
+		setbits32(&rcpm_v1_regs->ippdexpcr, mask);
+	else
+		clrbits32(&rcpm_v1_regs->ippdexpcr, mask);
+}
+
+static void rcpm_v2_set_ip_power(bool enable, u32 mask)
+{
+	if (enable)
+		setbits32(&rcpm_v2_regs->ippdexpcr[0], mask);
+	else
+		clrbits32(&rcpm_v2_regs->ippdexpcr[0], mask);
+}
+
+static void rcpm_v1_cpu_enter_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	switch (state) {
+	case E500_PM_PH10:
+		setbits32(&rcpm_v1_regs->cdozcr, mask);
+		break;
+	case E500_PM_PH15:
+		setbits32(&rcpm_v1_regs->cnapcr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+		break;
+	}
+}
+
+static void rcpm_v2_cpu_enter_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	u32 mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+	switch (state) {
+	case E500_PM_PH10:
+		/* one bit corresponds to one thread for PH10 of 6500 */
+		setbits32(&rcpm_v2_regs->tph10setr0, 1 << hw_cpu);
+		break;
+	case E500_PM_PH15:
+		setbits32(&rcpm_v2_regs->pcph15setr, mask);
+		break;
+	case E500_PM_PH20:
+		setbits32(&rcpm_v2_regs->pcph20setr, mask);
+		break;
+	case E500_PM_PH30:
+		setbits32(&rcpm_v2_regs->pcph30setr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+	}
+}
+
+static void rcpm_v1_cpu_die(int cpu)
+{
+	rcpm_v1_cpu_enter_state(cpu, E500_PM_PH15);
+}
+
+#ifdef CONFIG_PPC_BOOK3E
+static void qoriq_disable_thread(int cpu)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	int thread = cpu_thread_in_core(hw_cpu);
+
+	mtspr(SPRN_TENC, TEN_THREAD(thread));
+}
+#endif
+
+static void rcpm_v2_cpu_die(int cpu)
+{
+#ifdef CONFIG_PPC_BOOK3E
+	int primary;
+
+	if (threads_per_core == 2) {
+		primary = cpu_first_thread_sibling(cpu);
+		if (cpu_is_offline(primary) && cpu_is_offline(primary + 1)) {
+			/* if both threads are offline, put the cpu in PH20 */
+			rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
+		} else {
+			/* if only one thread is offline, disable the thread */
+			qoriq_disable_thread(cpu);
+		}
+	}
+#endif
+
+	if (threads_per_core == 1) {
+		rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
+		return;
+	}
+}
+
+static void rcpm_v1_cpu_exit_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	unsigned int mask = 1 << hw_cpu;
+
+	switch (state) {
+	case E500_PM_PH10:
+		clrbits32(&rcpm_v1_regs->cdozcr, mask);
+		break;
+	case E500_PM_PH15:
+		clrbits32(&rcpm_v1_regs->cnapcr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+		break;
+	}
+}
+
+static void rcpm_v1_cpu_up_prepare(int cpu)
+{
+	rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
+	rcpm_v1_irq_unmask(cpu);
+}
+
+static void rcpm_v2_cpu_exit_state(int cpu, int state)
+{
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	u32 mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+	switch (state) {
+	case E500_PM_PH10:
+		setbits32(&rcpm_v2_regs->tph10clrr0, 1 << hw_cpu);
+		break;
+	case E500_PM_PH15:
+		setbits32(&rcpm_v2_regs->pcph15clrr, mask);
+		break;
+	case E500_PM_PH20:
+		setbits32(&rcpm_v2_regs->pcph20clrr, mask);
+		break;
+	case E500_PM_PH30:
+		setbits32(&rcpm_v2_regs->pcph30clrr, mask);
+		break;
+	default:
+		pr_warn("Unknown cpu PM state (%d)\n", state);
+	}
+}
+
+static void rcpm_v2_cpu_up_prepare(int cpu)
+{
+	rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20);
+	rcpm_v2_irq_unmask(cpu);
+}
+
+static int rcpm_v1_plat_enter_state(int state)
+{
+	u32 *pmcsr_reg = &rcpm_v1_regs->powmgtcsr;
+	int ret = 0;
+	int result;
+
+	switch (state) {
+	case PLAT_PM_SLEEP:
+		setbits32(pmcsr_reg, RCPM_POWMGTCSR_SLP);
+
+		/* Upon resume, wait for RCPM_POWMGTCSR_SLP bit to be clear. */
+		result = spin_event_timeout(
+		  !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_SLP), 10000, 10);
+		if (!result) {
+			pr_err("timeout waiting for SLP bit to be cleared\n");
+			ret = -ETIMEDOUT;
+		}
+		break;
+	default:
+		pr_warn("Unknown platform PM state (%d)", state);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int rcpm_v2_plat_enter_state(int state)
+{
+	u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
+	int ret = 0;
+	int result;
+
+	switch (state) {
+	case PLAT_PM_LPM20:
+		/* clear previous LPM20 status */
+		setbits32(pmcsr_reg, RCPM_POWMGTCSR_P_LPM20_ST);
+		/* enter LPM20 status */
+		setbits32(pmcsr_reg, RCPM_POWMGTCSR_LPM20_RQ);
+
+		/* At this point, the device is in LPM20 status. */
+
+		/* resume ... */
+		result = spin_event_timeout(
+		  !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_LPM20_ST), 10000, 10);
+		if (!result) {
+			pr_err("timeout waiting for LPM20 bit to be cleared\n");
+			ret = -ETIMEDOUT;
+		}
+		break;
+	default:
+		pr_warn("Unknown platform PM state (%d)\n", state);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int rcpm_v1_plat_enter_sleep(void)
+{
+	return rcpm_v1_plat_enter_state(PLAT_PM_SLEEP);
+}
+
+static int rcpm_v2_plat_enter_sleep(void)
+{
+	return rcpm_v2_plat_enter_state(PLAT_PM_LPM20);
+}
+
+static void rcpm_common_freeze_time_base(u32 *tben_reg, int freeze)
+{
+	static u32 mask;
+
+	if (freeze) {
+		mask = in_be32(tben_reg);
+		clrbits32(tben_reg, mask);
+	} else {
+		setbits32(tben_reg, mask);
+	}
+
+	/* read back to push the previous write */
+	in_be32(tben_reg);
+}
+
+static void rcpm_v1_freeze_time_base(bool freeze)
+{
+	rcpm_common_freeze_time_base(&rcpm_v1_regs->ctbenr, freeze);
+}
+
+static void rcpm_v2_freeze_time_base(bool freeze)
+{
+	rcpm_common_freeze_time_base(&rcpm_v2_regs->pctbenr, freeze);
+}
+
+static unsigned int rcpm_get_pm_modes(void)
+{
+	return fsl_supported_pm_modes;
+}
+
+static const struct fsl_pm_ops qoriq_rcpm_v1_ops = {
+	.irq_mask = rcpm_v1_irq_mask,
+	.irq_unmask = rcpm_v1_irq_unmask,
+	.cpu_enter_state = rcpm_v1_cpu_enter_state,
+	.cpu_exit_state = rcpm_v1_cpu_exit_state,
+	.cpu_up_prepare = rcpm_v1_cpu_up_prepare,
+	.cpu_die = rcpm_v1_cpu_die,
+	.plat_enter_sleep = rcpm_v1_plat_enter_sleep,
+	.set_ip_power = rcpm_v1_set_ip_power,
+	.freeze_time_base = rcpm_v1_freeze_time_base,
+	.get_pm_modes = rcpm_get_pm_modes,
+};
+
+static const struct fsl_pm_ops qoriq_rcpm_v2_ops = {
+	.irq_mask = rcpm_v2_irq_mask,
+	.irq_unmask = rcpm_v2_irq_unmask,
+	.cpu_enter_state = rcpm_v2_cpu_enter_state,
+	.cpu_exit_state = rcpm_v2_cpu_exit_state,
+	.cpu_up_prepare = rcpm_v2_cpu_up_prepare,
+	.cpu_die = rcpm_v2_cpu_die,
+	.plat_enter_sleep = rcpm_v2_plat_enter_sleep,
+	.set_ip_power = rcpm_v2_set_ip_power,
+	.freeze_time_base = rcpm_v2_freeze_time_base,
+	.get_pm_modes = rcpm_get_pm_modes,
+};
+
+static const struct of_device_id rcpm_matches[] = {
+	{
+		.compatible = "fsl,qoriq-rcpm-1.0",
+		.data = (void *)&qoriq_rcpm_v1_ops,
+	},
+	{
+		.compatible = "fsl,qoriq-rcpm-2.0",
+		.data = (void *)&qoriq_rcpm_v2_ops,
+	},
+	{
+		.compatible = "fsl,qoriq-rcpm-2.1",
+		.data = (void *)&qoriq_rcpm_v2_ops,
+	},
+	{},
+};
+
+int __init fsl_rcpm_init(void)
+{
+	struct device_node *np;
+	const struct of_device_id *match;
+	void __iomem *base;
+
+	np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
+	if (!np) {
+		pr_err("can't find the rcpm node.\n");
+		return -ENODEV;
+	}
+
+	base = of_iomap(np, 0);
+	of_node_put(np);
+	if (!base) {
+		pr_err("of_iomap() error.\n");
+		return -ENOMEM;
+	}
+
+	rcpm_v1_regs = base;
+	rcpm_v2_regs = base;
+
+	/* support sleep by default */
+	fsl_supported_pm_modes = FSL_PM_SLEEP;
+
+	qoriq_pm_ops = match->data;
+
+	return 0;
+}
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2,3/5] Powerpc: mpc85xx: refactor the PM operations
  2015-08-26 12:09 [PATCH v2, 1/5] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
  2015-08-26 12:09 ` [PATCH v2,2/5] powerpc/rcpm: add RCPM driver Chenhui Zhao
@ 2015-08-26 12:09 ` Chenhui Zhao
  2015-08-26 12:09 ` [PATCH v2, 4/5] PowerPC/mpc85xx: Add hotplug support on E5500 and E500MC cores Chenhui Zhao
  2015-08-26 12:09 ` [PATCH v2,5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500 Chenhui Zhao
  3 siblings, 0 replies; 13+ messages in thread
From: Chenhui Zhao @ 2015-08-26 12:09 UTC (permalink / raw)
  To: linuxppc-dev, scottwood; +Cc: linux-kernel, Jason.Jin

Freescale CoreNet-based and Non-CoreNet-based platforms require
different PM operations. This patch extracted existing PM operations
on Non-CoreNet-based platforms to a new file which can accommodate
both platforms.
In this way, PM operation codes are clearer structurally.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
---
major changes for v2:
* moved mpc85xx_cpu_die()
* added qoriq_pm_ops->cpu_die() in smp_85xx_mach_cpu_die(). it puts the dying
  cpu in a low power mode

 arch/powerpc/platforms/85xx/Makefile         |   1 +
 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c | 106 +++++++++++++++++++++++++++
 arch/powerpc/platforms/85xx/smp.c            |  74 +++++--------------
 arch/powerpc/platforms/85xx/smp.h            |   1 +
 4 files changed, 127 insertions(+), 55 deletions(-)
 create mode 100644 arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c

diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 1fe7fb9..7bc86da 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -2,6 +2,7 @@
 # Makefile for the PowerPC 85xx linux kernel.
 #
 obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_FSL_PMC)		  += mpc85xx_pm_ops.o
 
 obj-y += common.o
 
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
new file mode 100644
index 0000000..ed356dd
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
@@ -0,0 +1,106 @@
+/*
+ * MPC85xx PM operators
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/io.h>
+#include <asm/fsl_guts.h>
+#include <asm/fsl_pm.h>
+
+static struct ccsr_guts __iomem *guts;
+
+static void mpc85xx_irq_mask(int cpu)
+{
+
+}
+
+static void mpc85xx_irq_unmask(int cpu)
+{
+
+}
+
+static void mpc85xx_cpu_die(int cpu)
+{
+	u32 tmp;
+
+	tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
+	mtspr(SPRN_HID0, tmp);
+
+	/* Enter NAP mode. */
+	tmp = mfmsr();
+	tmp |= MSR_WE;
+	asm volatile(
+		"msync\n"
+		"mtmsr %0\n"
+		"isync\n"
+		:
+		: "r" (tmp));
+}
+
+static void mpc85xx_cpu_up_prepare(int cpu)
+{
+
+}
+
+static void mpc85xx_freeze_time_base(bool freeze)
+{
+	uint32_t mask;
+
+	mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
+	if (freeze)
+		setbits32(&guts->devdisr, mask);
+	else
+		clrbits32(&guts->devdisr, mask);
+
+	in_be32(&guts->devdisr);
+}
+
+static const struct of_device_id mpc85xx_smp_guts_ids[] = {
+	{ .compatible = "fsl,mpc8572-guts", },
+	{ .compatible = "fsl,p1020-guts", },
+	{ .compatible = "fsl,p1021-guts", },
+	{ .compatible = "fsl,p1022-guts", },
+	{ .compatible = "fsl,p1023-guts", },
+	{ .compatible = "fsl,p2020-guts", },
+	{ .compatible = "fsl,bsc9132-guts", },
+	{},
+};
+
+static const struct fsl_pm_ops mpc85xx_pm_ops = {
+	.freeze_time_base = mpc85xx_freeze_time_base,
+	.irq_mask = mpc85xx_irq_mask,
+	.irq_unmask = mpc85xx_irq_unmask,
+	.cpu_die = mpc85xx_cpu_die,
+	.cpu_up_prepare = mpc85xx_cpu_up_prepare,
+};
+
+int __init mpc85xx_setup_pmc(void)
+{
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
+	if (np) {
+		guts = of_iomap(np, 0);
+		of_node_put(np);
+		if (!guts) {
+			pr_err("Could not map guts node address\n");
+			return -ENOMEM;
+		}
+	}
+
+	qoriq_pm_ops = &mpc85xx_pm_ops;
+
+	return 0;
+}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 0b75e8e..f9552b8 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
  * Author: Andy Fleming <afleming@freescale.com>
  * 	   Kumar Gala <galak@kernel.crashing.org>
  *
- * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
+ * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -15,7 +15,6 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
 #include <linux/kexec.h>
 #include <linux/highmem.h>
 #include <linux/cpu.h>
@@ -26,9 +25,9 @@
 #include <asm/mpic.h>
 #include <asm/cacheflush.h>
 #include <asm/dbell.h>
-#include <asm/fsl_guts.h>
 #include <asm/code-patching.h>
 #include <asm/cputhreads.h>
+#include <asm/fsl_pm.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/mpic.h>
@@ -43,24 +42,11 @@ struct epapr_spin_table {
 	u32	pir;
 };
 
-static struct ccsr_guts __iomem *guts;
+#ifdef CONFIG_HOTPLUG_CPU
 static u64 timebase;
 static int tb_req;
 static int tb_valid;
 
-static void mpc85xx_timebase_freeze(int freeze)
-{
-	uint32_t mask;
-
-	mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
-	if (freeze)
-		setbits32(&guts->devdisr, mask);
-	else
-		clrbits32(&guts->devdisr, mask);
-
-	in_be32(&guts->devdisr);
-}
-
 static void mpc85xx_give_timebase(void)
 {
 	unsigned long flags;
@@ -71,7 +57,7 @@ static void mpc85xx_give_timebase(void)
 		barrier();
 	tb_req = 0;
 
-	mpc85xx_timebase_freeze(1);
+	qoriq_pm_ops->freeze_time_base(true);
 #ifdef CONFIG_PPC64
 	/*
 	 * e5500/e6500 have a workaround for erratum A-006958 in place
@@ -104,7 +90,7 @@ static void mpc85xx_give_timebase(void)
 	while (tb_valid)
 		barrier();
 
-	mpc85xx_timebase_freeze(0);
+	qoriq_pm_ops->freeze_time_base(false);
 
 	local_irq_restore(flags);
 }
@@ -126,31 +112,25 @@ static void mpc85xx_take_timebase(void)
 	local_irq_restore(flags);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void smp_85xx_mach_cpu_die(void)
 {
 	unsigned int cpu = smp_processor_id();
-	u32 tmp;
 
 	local_irq_disable();
+	hard_irq_disable();
+	/* mask all irqs to prevent cpu wakeup */
+	qoriq_pm_ops->irq_mask(cpu);
+
 	idle_task_exit();
-	generic_set_cpu_dead(cpu);
-	mb();
 
 	mtspr(SPRN_TCR, 0);
+	mtspr(SPRN_TSR, mfspr(SPRN_TSR));
 
-	cur_cpu_spec->cpu_down_flush();
+	generic_set_cpu_dead(cpu);
 
-	tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
-	mtspr(SPRN_HID0, tmp);
-	isync();
+	cur_cpu_spec->cpu_down_flush();
 
-	/* Enter NAP mode. */
-	tmp = mfmsr();
-	tmp |= MSR_WE;
-	mb();
-	mtmsr(tmp);
-	isync();
+	qoriq_pm_ops->cpu_die(cpu);
 
 	while (1)
 		;
@@ -398,16 +378,6 @@ static void smp_85xx_setup_cpu(int cpu_nr)
 	smp_85xx_basic_setup(cpu_nr);
 }
 
-static const struct of_device_id mpc85xx_smp_guts_ids[] = {
-	{ .compatible = "fsl,mpc8572-guts", },
-	{ .compatible = "fsl,p1020-guts", },
-	{ .compatible = "fsl,p1021-guts", },
-	{ .compatible = "fsl,p1022-guts", },
-	{ .compatible = "fsl,p1023-guts", },
-	{ .compatible = "fsl,p2020-guts", },
-	{},
-};
-
 void __init mpc85xx_smp_init(void)
 {
 	struct device_node *np;
@@ -431,22 +401,16 @@ void __init mpc85xx_smp_init(void)
 		smp_85xx_ops.probe = NULL;
 	}
 
-	np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
-	if (np) {
-		guts = of_iomap(np, 0);
-		of_node_put(np);
-		if (!guts) {
-			pr_err("%s: Could not map guts node address\n",
-								__func__);
-			return;
-		}
+#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_FSL_PMC
+	mpc85xx_setup_pmc();
+#endif
+	if (qoriq_pm_ops) {
 		smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
 		smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
-#ifdef CONFIG_HOTPLUG_CPU
 		ppc_md.cpu_die = smp_85xx_mach_cpu_die;
-#endif
 	}
-
+#endif
 	smp_ops = &smp_85xx_ops;
 
 #ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/85xx/smp.h b/arch/powerpc/platforms/85xx/smp.h
index e2b4493..0b20ae3 100644
--- a/arch/powerpc/platforms/85xx/smp.h
+++ b/arch/powerpc/platforms/85xx/smp.h
@@ -5,6 +5,7 @@
 
 #ifdef CONFIG_SMP
 void __init mpc85xx_smp_init(void);
+int __init mpc85xx_setup_pmc(void);
 #else
 static inline void mpc85xx_smp_init(void)
 {
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2, 4/5] PowerPC/mpc85xx: Add hotplug support on E5500 and E500MC cores
  2015-08-26 12:09 [PATCH v2, 1/5] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
  2015-08-26 12:09 ` [PATCH v2,2/5] powerpc/rcpm: add RCPM driver Chenhui Zhao
  2015-08-26 12:09 ` [PATCH v2,3/5] Powerpc: mpc85xx: refactor the PM operations Chenhui Zhao
@ 2015-08-26 12:09 ` Chenhui Zhao
  2015-08-26 20:55   ` Scott Wood
  2015-08-26 12:09 ` [PATCH v2,5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500 Chenhui Zhao
  3 siblings, 1 reply; 13+ messages in thread
From: Chenhui Zhao @ 2015-08-26 12:09 UTC (permalink / raw)
  To: linuxppc-dev, scottwood; +Cc: linux-kernel, Jason.Jin

Freescale E500MC and E5500 core-based platforms, like P4080, T1040,
support disabling/enabling CPU dynamically.
This patch adds this feature on those platforms.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
---
major changes for v2:
* factor out smp_85xx_start_cpu()
* move fsl_rcpm_init() into mpc85xx_smp_init() due to the init sequence
* add hard_irq_disable() after local_irq_save(). for platforms that
  implement lazy enabling/disabling of interrupts, call hard_irq_disable()
  to ensure interrupts are disabled physically.

 arch/powerpc/Kconfig              |   2 +-
 arch/powerpc/include/asm/smp.h    |   3 +
 arch/powerpc/kernel/smp.c         |   7 +-
 arch/powerpc/platforms/85xx/smp.c | 193 ++++++++++++++++++++++----------------
 arch/powerpc/platforms/85xx/smp.h |   1 +
 5 files changed, 122 insertions(+), 84 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5ef2711..dd9e252 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -386,7 +386,7 @@ config SWIOTLB
 config HOTPLUG_CPU
 	bool "Support for enabling/disabling CPUs"
 	depends on SMP && (PPC_PSERIES || \
-	PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
+	PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
 	---help---
 	  Say Y here to be able to disable and re-enable individual
 	  CPUs at runtime on SMP machines.
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 825663c..4ff5b71 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,9 @@ void generic_cpu_die(unsigned int cpu);
 void generic_set_cpu_dead(unsigned int cpu);
 void generic_set_cpu_up(unsigned int cpu);
 int generic_check_cpu_restart(unsigned int cpu);
+int check_cpu_dead(unsigned int cpu);
+#else
+#define generic_set_cpu_up(i)	do { } while (0)
 #endif
 
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec20..95111f2 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -427,7 +427,7 @@ void generic_cpu_die(unsigned int cpu)
 
 	for (i = 0; i < 100; i++) {
 		smp_rmb();
-		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
+		if (check_cpu_dead(cpu))
 			return;
 		msleep(100);
 	}
@@ -454,6 +454,11 @@ int generic_check_cpu_restart(unsigned int cpu)
 	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 }
 
+int check_cpu_dead(unsigned int cpu)
+{
+	return per_cpu(cpu_state, cpu) == CPU_DEAD;
+}
+
 static bool secondaries_inhibited(void)
 {
 	return kvm_hv_mode_active();
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index f9552b8..73eb994 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -10,6 +10,8 @@
  * option) any later version.
  */
 
+#define pr_fmt(fmt) "smp: %s: " fmt, __func__
+
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -52,6 +54,7 @@ static void mpc85xx_give_timebase(void)
 	unsigned long flags;
 
 	local_irq_save(flags);
+	hard_irq_disable();
 
 	while (!tb_req)
 		barrier();
@@ -100,6 +103,7 @@ static void mpc85xx_take_timebase(void)
 	unsigned long flags;
 
 	local_irq_save(flags);
+	hard_irq_disable();
 
 	tb_req = 1;
 	while (!tb_valid)
@@ -135,8 +139,31 @@ static void smp_85xx_mach_cpu_die(void)
 	while (1)
 		;
 }
+
+static void qoriq_cpu_kill(unsigned int cpu)
+{
+	int i;
+
+	for (i = 0; i < 500; i++) {
+		if (check_cpu_dead(cpu)) {
+#ifdef CONFIG_PPC64
+			paca[cpu].cpu_start = 0;
+#endif
+			return;
+		}
+		msleep(20);
+	}
+	pr_err("CPU%d didn't die...\n", cpu);
+}
 #endif
 
+/*
+ * To keep it compatible with old boot program which uses
+ * cache-inhibit spin table, we need to flush the cache
+ * before accessing spin table to invalidate any staled data.
+ * We also need to flush the cache after writing to spin
+ * table to push data out.
+ */
 static inline void flush_spin_table(void *spin_table)
 {
 	flush_dcache_range((ulong)spin_table,
@@ -168,51 +195,20 @@ static void wake_hw_thread(void *info)
 }
 #endif
 
-static int smp_85xx_kick_cpu(int nr)
+static int smp_85xx_start_cpu(int cpu)
 {
-	unsigned long flags;
-	const u64 *cpu_rel_addr;
-	__iomem struct epapr_spin_table *spin_table;
+	int ret = 0;
 	struct device_node *np;
-	int hw_cpu = get_hard_smp_processor_id(nr);
+	const u64 *cpu_rel_addr;
+	unsigned long flags;
 	int ioremappable;
-	int ret = 0;
-
-	WARN_ON(nr < 0 || nr >= NR_CPUS);
-	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
-
-	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
-
-#ifdef CONFIG_PPC64
-	/* Threads don't use the spin table */
-	if (cpu_thread_in_core(nr) != 0) {
-		int primary = cpu_first_thread_sibling(nr);
-
-		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
-			return -ENOENT;
-
-		if (cpu_thread_in_core(nr) != 1) {
-			pr_err("%s: cpu %d: invalid hw thread %d\n",
-			       __func__, nr, cpu_thread_in_core(nr));
-			return -ENOENT;
-		}
+	int hw_cpu = get_hard_smp_processor_id(cpu);
+	struct epapr_spin_table __iomem *spin_table;
 
-		if (!cpu_online(primary)) {
-			pr_err("%s: cpu %d: primary %d not online\n",
-			       __func__, nr, primary);
-			return -ENOENT;
-		}
-
-		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
-		return 0;
-	}
-#endif
-
-	np = of_get_cpu_node(nr, NULL);
+	np = of_get_cpu_node(cpu, NULL);
 	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
-
-	if (cpu_rel_addr == NULL) {
-		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
+	if (!cpu_rel_addr) {
+		pr_err("No cpu-release-addr for cpu %d\n", cpu);
 		return -ENOENT;
 	}
 
@@ -232,28 +228,18 @@ static int smp_85xx_kick_cpu(int nr)
 		spin_table = phys_to_virt(*cpu_rel_addr);
 
 	local_irq_save(flags);
-#ifdef CONFIG_PPC32
-#ifdef CONFIG_HOTPLUG_CPU
-	/* Corresponding to generic_set_cpu_dead() */
-	generic_set_cpu_up(nr);
+	hard_irq_disable();
 
-	if (system_state == SYSTEM_RUNNING) {
-		/*
-		 * To keep it compatible with old boot program which uses
-		 * cache-inhibit spin table, we need to flush the cache
-		 * before accessing spin table to invalidate any staled data.
-		 * We also need to flush the cache after writing to spin
-		 * table to push data out.
-		 */
-		flush_spin_table(spin_table);
-		out_be32(&spin_table->addr_l, 0);
-		flush_spin_table(spin_table);
+	if (qoriq_pm_ops)
+		qoriq_pm_ops->cpu_up_prepare(cpu);
 
+	/* if cpu is not spinning, reset it */
+	if (read_spin_table_addr_l(spin_table) != 1) {
 		/*
 		 * We don't set the BPTR register here since it already points
 		 * to the boot page properly.
 		 */
-		mpic_reset_core(nr);
+		mpic_reset_core(cpu);
 
 		/*
 		 * wait until core is ready...
@@ -263,40 +249,23 @@ static int smp_85xx_kick_cpu(int nr)
 		if (!spin_event_timeout(
 				read_spin_table_addr_l(spin_table) == 1,
 				10000, 100)) {
-			pr_err("%s: timeout waiting for core %d to reset\n",
-							__func__, hw_cpu);
-			ret = -ENOENT;
-			goto out;
+			pr_err("timeout waiting for cpu %d to reset\n",
+				hw_cpu);
+			ret = -EAGAIN;
+			goto err;
 		}
-
-		/*  clear the acknowledge status */
-		__secondary_hold_acknowledge = -1;
 	}
-#endif
-	flush_spin_table(spin_table);
-	out_be32(&spin_table->pir, hw_cpu);
-	out_be32(&spin_table->addr_l, __pa(__early_start));
-	flush_spin_table(spin_table);
-
-	/* Wait a bit for the CPU to ack. */
-	if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
-					10000, 100)) {
-		pr_err("%s: timeout waiting for core %d to ack\n",
-						__func__, hw_cpu);
-		ret = -ENOENT;
-		goto out;
-	}
-out:
-#else
-	smp_generic_kick_cpu(nr);
 
 	flush_spin_table(spin_table);
 	out_be32(&spin_table->pir, hw_cpu);
+#ifdef CONFIG_PPC64
 	out_be64((u64 *)(&spin_table->addr_h),
 		__pa(ppc_function_entry(generic_secondary_smp_init)));
-	flush_spin_table(spin_table);
+#else
+	out_be32(&spin_table->addr_l, __pa(__early_start));
 #endif
-
+	flush_spin_table(spin_table);
+err:
 	local_irq_restore(flags);
 
 	if (ioremappable)
@@ -305,6 +274,61 @@ out:
 	return ret;
 }
 
+static int smp_85xx_kick_cpu(int nr)
+{
+	int ret = 0;
+#ifdef CONFIG_PPC64
+	int primary = nr;
+	int primary_hw = get_hard_smp_processor_id(primary);
+#endif
+
+	WARN_ON(nr < 0 || nr >= num_possible_cpus());
+
+	pr_debug("kick CPU #%d\n", nr);
+
+#ifdef CONFIG_PPC64
+	/* Threads don't use the spin table */
+	if (cpu_thread_in_core(nr) != 0) {
+		int primary = cpu_first_thread_sibling(nr);
+
+		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
+			return -ENOENT;
+
+		if (cpu_thread_in_core(nr) != 1) {
+			pr_err("%s: cpu %d: invalid hw thread %d\n",
+			       __func__, nr, cpu_thread_in_core(nr));
+			return -ENOENT;
+		}
+
+		if (!cpu_online(primary)) {
+			pr_err("%s: cpu %d: primary %d not online\n",
+			       __func__, nr, primary);
+			return -ENOENT;
+		}
+
+		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
+		return 0;
+	}
+
+	ret = smp_85xx_start_cpu(primary);
+	if (ret)
+		return ret;
+
+	paca[nr].cpu_start = 1;
+	generic_set_cpu_up(nr);
+
+	return ret;
+#else
+	ret = smp_85xx_start_cpu(nr);
+	if (ret)
+		return ret;
+
+	generic_set_cpu_up(nr);
+
+	return ret;
+#endif
+}
+
 struct smp_ops_t smp_85xx_ops = {
 	.kick_cpu = smp_85xx_kick_cpu,
 	.cpu_bootable = smp_generic_cpu_bootable,
@@ -402,6 +426,10 @@ void __init mpc85xx_smp_init(void)
 	}
 
 #ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_FSL_CORENET_RCPM
+	fsl_rcpm_init();
+#endif
+
 #ifdef CONFIG_FSL_PMC
 	mpc85xx_setup_pmc();
 #endif
@@ -409,6 +437,7 @@ void __init mpc85xx_smp_init(void)
 		smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
 		smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
 		ppc_md.cpu_die = smp_85xx_mach_cpu_die;
+		smp_85xx_ops.cpu_die = qoriq_cpu_kill;
 	}
 #endif
 	smp_ops = &smp_85xx_ops;
diff --git a/arch/powerpc/platforms/85xx/smp.h b/arch/powerpc/platforms/85xx/smp.h
index 0b20ae3..8ee19a3 100644
--- a/arch/powerpc/platforms/85xx/smp.h
+++ b/arch/powerpc/platforms/85xx/smp.h
@@ -6,6 +6,7 @@
 #ifdef CONFIG_SMP
 void __init mpc85xx_smp_init(void);
 int __init mpc85xx_setup_pmc(void);
+int __init fsl_rcpm_init(void);
 #else
 static inline void mpc85xx_smp_init(void)
 {
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v2,5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500
  2015-08-26 12:09 [PATCH v2, 1/5] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
                   ` (2 preceding siblings ...)
  2015-08-26 12:09 ` [PATCH v2, 4/5] PowerPC/mpc85xx: Add hotplug support on E5500 and E500MC cores Chenhui Zhao
@ 2015-08-26 12:09 ` Chenhui Zhao
  2015-08-26 22:42   ` [PATCH v2, 5/5] " Scott Wood
  3 siblings, 1 reply; 13+ messages in thread
From: Chenhui Zhao @ 2015-08-26 12:09 UTC (permalink / raw)
  To: linuxppc-dev, scottwood; +Cc: linux-kernel, Jason.Jin

Support Freescale E6500 core-based platforms, like t4240.
Support disabling/enabling individual CPU thread dynamically.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
---
major changes for v2:
* start Thread1 by Thread0 when we want to boot Thread1 only replacing
  the method of changing cpu physical id

 arch/powerpc/include/asm/cputhreads.h |  9 +++++
 arch/powerpc/include/asm/smp.h        |  1 +
 arch/powerpc/kernel/head_64.S         | 69 ++++++++++++++++++++++++++++++++++-
 arch/powerpc/platforms/85xx/smp.c     | 53 ++++++++++++++-------------
 arch/powerpc/sysdev/fsl_rcpm.c        |  2 +-
 5 files changed, 106 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index ba42e46..9920f61 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_POWERPC_CPUTHREADS_H
 #define _ASM_POWERPC_CPUTHREADS_H
 
+#ifndef __ASSEMBLY__
 #include <linux/cpumask.h>
 
 /*
@@ -95,6 +96,14 @@ static inline int cpu_last_thread_sibling(int cpu)
 }
 
 
+#ifdef CONFIG_PPC_BOOK3E
+void book3e_start_thread(int thread, unsigned long addr);
+void book3e_stop_thread(int thread);
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#define INVALID_THREAD_HWID	0x0fff
 
 #endif /* _ASM_POWERPC_CPUTHREADS_H */
 
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 4ff5b71..a1faa4c 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -200,6 +200,7 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+extern unsigned int booting_thread_hwid;
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d48125d..6df2aa4 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -40,6 +40,7 @@
 #include <asm/kvm_book3s_asm.h>
 #include <asm/ptrace.h>
 #include <asm/hw_irq.h>
+#include <asm/cputhreads.h>
 
 /* The physical memory is laid out such that the secondary processor
  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -181,6 +182,44 @@ exception_marker:
 #endif
 
 #ifdef CONFIG_PPC_BOOK3E
+	.globl	booting_thread_hwid
+booting_thread_hwid:
+	.long  INVALID_THREAD_HWID
+	.align 3
+/*
+ * start threads in the same cpu
+ * input parameters:
+ * r3 = the thread physical id
+ * r4 = the entry point where thread starts
+ */
+_GLOBAL(book3e_start_thread)
+	LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
+	cmpi	0, r3, 0
+	bne	10f
+	mttmr	TMRN_IMSR0, r5
+	mttmr	TMRN_INIA0, r4
+	b	11f
+10:
+	mttmr	TMRN_IMSR1, r5
+	mttmr	TMRN_INIA1, r4
+11:
+	isync
+	li	r6, 1
+	sld	r6, r6, r3
+	mtspr	SPRN_TENS, r6
+	isync
+	blr
+
+/*
+ * r3 = the thread physical id
+ */
+_GLOBAL(book3e_stop_thread)
+	li	r4, 1
+	sld	r4, r4, r3
+	mtspr	SPRN_TENC, r4
+	isync
+	blr
+
 _GLOBAL(fsl_secondary_thread_init)
 	/* Enable branch prediction */
 	lis     r3,BUCSR_INIT@h
@@ -197,8 +236,10 @@ _GLOBAL(fsl_secondary_thread_init)
 	 * but the low bit right by two bits so that the cpu numbering is
 	 * continuous.
 	 */
-	mfspr	r3, SPRN_PIR
-	rlwimi	r3, r3, 30, 2, 30
+	bl	10f
+10:	mflr	r5
+	addi	r5,r5,(booting_thread_hwid - 10b)
+	lwz	r3,0(r5)
 	mtspr	SPRN_PIR, r3
 #endif
 
@@ -245,6 +286,30 @@ _GLOBAL(generic_secondary_smp_init)
 	mr	r3,r24
 	mr	r4,r25
 	bl	book3e_secondary_core_init
+
+/*
+ * If we want to boot Thread1, start Thread1 and stop Thread0.
+ * Note that only Thread0 will run the piece of code.
+ */
+	LOAD_REG_ADDR(r3, booting_thread_hwid)
+	lwz     r4, 0(r3)
+	cmpwi	r4, INVALID_THREAD_HWID
+	beq	20f
+	cmpw	r4, r24
+	beq	20f
+
+	/* start Thread1 */
+	LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
+	ld	r4, 0(r5)
+	li	r3, 1
+	bl	book3e_start_thread
+
+	/* stop Thread0 */
+	li	r3, 0
+	bl	book3e_stop_thread
+10:
+	b	10b
+20:
 #endif
 
 generic_secondary_common_init:
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 73eb994..61f68ad 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -181,17 +181,11 @@ static inline u32 read_spin_table_addr_l(void *spin_table)
 static void wake_hw_thread(void *info)
 {
 	void fsl_secondary_thread_init(void);
-	unsigned long imsr1, inia1;
-	int nr = *(const int *)info;
+	unsigned long inia;
+	int hw_cpu = get_hard_smp_processor_id(*(const int *)info);
 
-	imsr1 = MSR_KERNEL;
-	inia1 = *(unsigned long *)fsl_secondary_thread_init;
-
-	mttmr(TMRN_IMSR1, imsr1);
-	mttmr(TMRN_INIA1, inia1);
-	mtspr(SPRN_TENS, TEN_THREAD(1));
-
-	smp_generic_kick_cpu(nr);
+	inia = *(unsigned long *)fsl_secondary_thread_init;
+	book3e_start_thread(cpu_thread_in_core(hw_cpu), inia);
 }
 #endif
 
@@ -279,7 +273,6 @@ static int smp_85xx_kick_cpu(int nr)
 	int ret = 0;
 #ifdef CONFIG_PPC64
 	int primary = nr;
-	int primary_hw = get_hard_smp_processor_id(primary);
 #endif
 
 	WARN_ON(nr < 0 || nr >= num_possible_cpus());
@@ -287,33 +280,43 @@ static int smp_85xx_kick_cpu(int nr)
 	pr_debug("kick CPU #%d\n", nr);
 
 #ifdef CONFIG_PPC64
+	booting_thread_hwid = INVALID_THREAD_HWID;
 	/* Threads don't use the spin table */
-	if (cpu_thread_in_core(nr) != 0) {
-		int primary = cpu_first_thread_sibling(nr);
+	if (threads_per_core == 2) {
+		booting_thread_hwid = get_hard_smp_processor_id(nr);
+		primary = cpu_first_thread_sibling(nr);
 
 		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
 			return -ENOENT;
 
-		if (cpu_thread_in_core(nr) != 1) {
-			pr_err("%s: cpu %d: invalid hw thread %d\n",
-			       __func__, nr, cpu_thread_in_core(nr));
-			return -ENOENT;
-		}
-
-		if (!cpu_online(primary)) {
-			pr_err("%s: cpu %d: primary %d not online\n",
-			       __func__, nr, primary);
-			return -ENOENT;
+		/*
+		 * If either one of threads in the same core is online,
+		 * use the online one to start the other.
+		 */
+		if (qoriq_pm_ops)
+			qoriq_pm_ops->cpu_up_prepare(nr);
+
+		if (cpu_online(primary)) {
+			smp_call_function_single(primary,
+					wake_hw_thread, &nr, 1);
+			goto done;
+		} else if (cpu_online(primary + 1)) {
+			smp_call_function_single(primary + 1,
+					wake_hw_thread, &nr, 1);
+			goto done;
 		}
 
-		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
-		return 0;
+		/* If both threads are offline, continue to star primary cpu */
+	} else if (threads_per_core > 2) {
+		pr_err("Do not support more than 2 threads per CPU.");
+		return -EINVAL;
 	}
 
 	ret = smp_85xx_start_cpu(primary);
 	if (ret)
 		return ret;
 
+done:
 	paca[nr].cpu_start = 1;
 	generic_set_cpu_up(nr);
 
diff --git a/arch/powerpc/sysdev/fsl_rcpm.c b/arch/powerpc/sysdev/fsl_rcpm.c
index ed59881..f52d02a 100644
--- a/arch/powerpc/sysdev/fsl_rcpm.c
+++ b/arch/powerpc/sysdev/fsl_rcpm.c
@@ -140,7 +140,7 @@ static void qoriq_disable_thread(int cpu)
 	int hw_cpu = get_hard_smp_processor_id(cpu);
 	int thread = cpu_thread_in_core(hw_cpu);
 
-	mtspr(SPRN_TENC, TEN_THREAD(thread));
+	book3e_stop_thread(thread);
 }
 #endif
 
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v2,2/5] powerpc/rcpm: add RCPM driver
  2015-08-26 12:09 ` [PATCH v2,2/5] powerpc/rcpm: add RCPM driver Chenhui Zhao
@ 2015-08-26 20:35   ` Scott Wood
  2015-08-28  0:40     ` Scott Wood
  0 siblings, 1 reply; 13+ messages in thread
From: Scott Wood @ 2015-08-26 20:35 UTC (permalink / raw)
  To: Chenhui Zhao; +Cc: linuxppc-dev, linux-kernel, Jason.Jin

On Wed, Aug 26, 2015 at 08:09:45PM +0800, Chenhui Zhao wrote:
> +#ifdef CONFIG_PPC_BOOK3E
> +static void qoriq_disable_thread(int cpu)
> +{
> +	int hw_cpu = get_hard_smp_processor_id(cpu);
> +	int thread = cpu_thread_in_core(hw_cpu);
> +
> +	mtspr(SPRN_TENC, TEN_THREAD(thread));
> +}
> +#endif

This file is always used on book3e.  If the intent is to only build this
on 64-bit, use CONFIG_PPC64 rather than relying on the fact that this one
of the confusing mess of BOOKE/BOOK3E symbols is 64-bit-only.

> +static void rcpm_v2_cpu_die(int cpu)
> +{
> +#ifdef CONFIG_PPC_BOOK3E
> +	int primary;
> +
> +	if (threads_per_core == 2) {
> +		primary = cpu_first_thread_sibling(cpu);
> +		if (cpu_is_offline(primary) && cpu_is_offline(primary + 1)) {
> +			/* if both threads are offline, put the cpu in PH20 */
> +			rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
> +		} else {
> +			/* if only one thread is offline, disable the thread */
> +			qoriq_disable_thread(cpu);
> +		}
> +	}
> +#endif
> +
> +	if (threads_per_core == 1) {
> +		rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
> +		return;
> +	}
> +}

That "return;" adds nothing, and it's even more awkward having it on the
one-thread case but not the two-thread case.

> +static void rcpm_v1_cpu_up_prepare(int cpu)
> +{
> +	rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
> +	rcpm_v1_irq_unmask(cpu);
> +}
> +
> +static void rcpm_v2_cpu_exit_state(int cpu, int state)
> +{
> +	int hw_cpu = get_hard_smp_processor_id(cpu);
> +	u32 mask = 1 << cpu_core_index_of_thread(hw_cpu);

Are you sure cpu_core_index_of_thread() is supposed to take a hardware
cpu id?  The only current user, pseries_energy.c, has the comment
"Convert logical cpu number to core number".

> +static const struct of_device_id rcpm_matches[] = {
> +	{
> +		.compatible = "fsl,qoriq-rcpm-1.0",
> +		.data = (void *)&qoriq_rcpm_v1_ops,
> +	},
> +	{
> +		.compatible = "fsl,qoriq-rcpm-2.0",
> +		.data = (void *)&qoriq_rcpm_v2_ops,
> +	},
> +	{
> +		.compatible = "fsl,qoriq-rcpm-2.1",
> +		.data = (void *)&qoriq_rcpm_v2_ops,
> +	},
> +	{},
> +};

Unnecessary (and const-unsafe) casts.

> +
> +int __init fsl_rcpm_init(void)
> +{
> +	struct device_node *np;
> +	const struct of_device_id *match;
> +	void __iomem *base;
> +
> +	np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
> +	if (!np) {
> +		pr_err("can't find the rcpm node.\n");
> +		return -ENODEV;
> +	}

It's not an error for the device tree node to not have this.

-Scott

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2, 4/5] PowerPC/mpc85xx: Add hotplug support on E5500 and E500MC cores
  2015-08-26 12:09 ` [PATCH v2, 4/5] PowerPC/mpc85xx: Add hotplug support on E5500 and E500MC cores Chenhui Zhao
@ 2015-08-26 20:55   ` Scott Wood
  2015-08-28  0:47     ` [PATCH v2,4/5] " Chenhui Zhao
  0 siblings, 1 reply; 13+ messages in thread
From: Scott Wood @ 2015-08-26 20:55 UTC (permalink / raw)
  To: Chenhui Zhao; +Cc: linuxppc-dev, linux-kernel, Jason.Jin

On Wed, Aug 26, 2015 at 08:09:47PM +0800, Chenhui Zhao wrote:
> +int check_cpu_dead(unsigned int cpu)
> +{
> +	return per_cpu(cpu_state, cpu) == CPU_DEAD;
> +}

I'm not sure this needs to be a function versus open-coded, but if you do
want to make it a function, make it more obvious from the caller side by
changing it to:

bool is_cpu_dead(unsigned int cpu);

Otherwise if I see "if (check_cpu_dead(cpu))" I don't know if the
if-block is executed if the CPU is dead or if it isn't.

> diff --git a/arch/powerpc/platforms/85xx/smp.h b/arch/powerpc/platforms/85xx/smp.h
> index 0b20ae3..8ee19a3 100644
> --- a/arch/powerpc/platforms/85xx/smp.h
> +++ b/arch/powerpc/platforms/85xx/smp.h
> @@ -6,6 +6,7 @@
>  #ifdef CONFIG_SMP
>  void __init mpc85xx_smp_init(void);
>  int __init mpc85xx_setup_pmc(void);
> +int __init fsl_rcpm_init(void);
>  #else

Why wasn't this added in the patch that added fsl_rcpm_init()?

-Scott

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2, 5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500
  2015-08-26 12:09 ` [PATCH v2,5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500 Chenhui Zhao
@ 2015-08-26 22:42   ` Scott Wood
  2015-08-28  1:42     ` [PATCH v2,5/5] " Chenhui Zhao
  0 siblings, 1 reply; 13+ messages in thread
From: Scott Wood @ 2015-08-26 22:42 UTC (permalink / raw)
  To: Chenhui Zhao; +Cc: linuxppc-dev, linux-kernel, Jason.Jin

On Wed, Aug 26, 2015 at 08:09:48PM +0800, Chenhui Zhao wrote:
> +	.globl	booting_thread_hwid
> +booting_thread_hwid:
> +	.long  INVALID_THREAD_HWID
> +	.align 3

The commit message goes into no detail about the changes you're making to
thread handling, nor are there relevant comments.

> +/*
> + * r3 = the thread physical id
> + */
> +_GLOBAL(book3e_stop_thread)
> +	li	r4, 1
> +	sld	r4, r4, r3
> +	mtspr	SPRN_TENC, r4
> +	isync
> +	blr

Why did the C code not have an isync, if it's required here?

>  _GLOBAL(fsl_secondary_thread_init)
>  	/* Enable branch prediction */
>  	lis     r3,BUCSR_INIT@h
> @@ -197,8 +236,10 @@ _GLOBAL(fsl_secondary_thread_init)
>  	 * but the low bit right by two bits so that the cpu numbering is
>  	 * continuous.
>  	 */
> -	mfspr	r3, SPRN_PIR
> -	rlwimi	r3, r3, 30, 2, 30
> +	bl	10f
> +10:	mflr	r5
> +	addi	r5,r5,(booting_thread_hwid - 10b)
> +	lwz	r3,0(r5)
>  	mtspr	SPRN_PIR, r3
>  #endif

I assume the reason for this is that, unlike the kexec case, the cpu has
been reset so PIR has been reset?  Don't make me guess -- document.

> @@ -245,6 +286,30 @@ _GLOBAL(generic_secondary_smp_init)
>  	mr	r3,r24
>  	mr	r4,r25
>  	bl	book3e_secondary_core_init
> +
> +/*
> + * If we want to boot Thread1, start Thread1 and stop Thread0.
> + * Note that only Thread0 will run the piece of code.
> + */

What ensures that only thread 0 runs this?  Especially if we're entering
via kdump on thread 1?

s/the piece/this piece/

> +	LOAD_REG_ADDR(r3, booting_thread_hwid)
> +	lwz     r4, 0(r3)
> +	cmpwi	r4, INVALID_THREAD_HWID
> +	beq	20f
> +	cmpw	r4, r24
> +	beq	20f

Do all cores get released from the spin table before the first thread
gets kicked?

> +
> +	/* start Thread1 */
> +	LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
> +	ld	r4, 0(r5)
> +	li	r3, 1
> +	bl	book3e_start_thread
> +
> +	/* stop Thread0 */
> +	li	r3, 0
> +	bl	book3e_stop_thread
> +10:
> +	b	10b
> +20:
>  #endif
>  
>  generic_secondary_common_init:
> diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
> index 73eb994..61f68ad 100644
> --- a/arch/powerpc/platforms/85xx/smp.c
> +++ b/arch/powerpc/platforms/85xx/smp.c
> @@ -181,17 +181,11 @@ static inline u32 read_spin_table_addr_l(void *spin_table)
>  static void wake_hw_thread(void *info)
>  {
>  	void fsl_secondary_thread_init(void);
> -	unsigned long imsr1, inia1;
> -	int nr = *(const int *)info;
> +	unsigned long inia;
> +	int hw_cpu = get_hard_smp_processor_id(*(const int *)info);
>  
> -	imsr1 = MSR_KERNEL;
> -	inia1 = *(unsigned long *)fsl_secondary_thread_init;
> -
> -	mttmr(TMRN_IMSR1, imsr1);
> -	mttmr(TMRN_INIA1, inia1);
> -	mtspr(SPRN_TENS, TEN_THREAD(1));
> -
> -	smp_generic_kick_cpu(nr);
> +	inia = *(unsigned long *)fsl_secondary_thread_init;
> +	book3e_start_thread(cpu_thread_in_core(hw_cpu), inia);
>  }
>  #endif
>  
> @@ -279,7 +273,6 @@ static int smp_85xx_kick_cpu(int nr)
>  	int ret = 0;
>  #ifdef CONFIG_PPC64
>  	int primary = nr;
> -	int primary_hw = get_hard_smp_processor_id(primary);
>  #endif
>  
>  	WARN_ON(nr < 0 || nr >= num_possible_cpus());
> @@ -287,33 +280,43 @@ static int smp_85xx_kick_cpu(int nr)
>  	pr_debug("kick CPU #%d\n", nr);
>  
>  #ifdef CONFIG_PPC64
> +	booting_thread_hwid = INVALID_THREAD_HWID;
>  	/* Threads don't use the spin table */
> -	if (cpu_thread_in_core(nr) != 0) {
> -		int primary = cpu_first_thread_sibling(nr);
> +	if (threads_per_core == 2) {
> +		booting_thread_hwid = get_hard_smp_processor_id(nr);

What does setting booting_thread_hwid to INVALID_THREAD_HWID here
accomplish?  If threads_per_core != 2 it would never have been set to
anything else, and if threads_per_core == 2 you immediately overwrite it.

> +		primary = cpu_first_thread_sibling(nr);
>  
>  		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
>  			return -ENOENT;
>  
> -		if (cpu_thread_in_core(nr) != 1) {
> -			pr_err("%s: cpu %d: invalid hw thread %d\n",
> -			       __func__, nr, cpu_thread_in_core(nr));
> -			return -ENOENT;
> -		}
> -
> -		if (!cpu_online(primary)) {
> -			pr_err("%s: cpu %d: primary %d not online\n",
> -			       __func__, nr, primary);
> -			return -ENOENT;
> +		/*
> +		 * If either one of threads in the same core is online,
> +		 * use the online one to start the other.
> +		 */
> +		if (qoriq_pm_ops)
> +			qoriq_pm_ops->cpu_up_prepare(nr);

cpu_up_prepare does rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20).  How do
you know the cpu is already in PH20?  What if this is initial boot?  Are
you relying on it being a no-op in that case?

> +
> +		if (cpu_online(primary)) {
> +			smp_call_function_single(primary,
> +					wake_hw_thread, &nr, 1);
> +			goto done;
> +		} else if (cpu_online(primary + 1)) {
> +			smp_call_function_single(primary + 1,
> +					wake_hw_thread, &nr, 1);
> +			goto done;
>  		}
>  
> -		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
> -		return 0;
> +		/* If both threads are offline, continue to star primary cpu */

s/star/start/

> +	} else if (threads_per_core > 2) {
> +		pr_err("Do not support more than 2 threads per CPU.");

WARN_ONCE(1, "More than 2 threads per core not supported: %d\n",
	  threads_per_core);

-Scott

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2,2/5] powerpc/rcpm: add RCPM driver
  2015-08-26 20:35   ` Scott Wood
@ 2015-08-28  0:40     ` Scott Wood
  2015-08-28  5:18       ` Scott Wood
  0 siblings, 1 reply; 13+ messages in thread
From: Scott Wood @ 2015-08-28  0:40 UTC (permalink / raw)
  To: Scott Wood; +Cc: linuxppc-dev, linux-kernel, Jason.Jin



On Thu, Aug 27, 2015 at 4:35 AM, Scott Wood <scottwood@freescale.com> 
wrote:
> On Wed, Aug 26, 2015 at 08:09:45PM +0800, Chenhui Zhao wrote:
>>  +#ifdef CONFIG_PPC_BOOK3E
>>  +static void qoriq_disable_thread(int cpu)
>>  +{
>>  +	int hw_cpu = get_hard_smp_processor_id(cpu);
>>  +	int thread = cpu_thread_in_core(hw_cpu);
>>  +
>>  +	mtspr(SPRN_TENC, TEN_THREAD(thread));
>>  +}
>>  +#endif
> 
> This file is always used on book3e.  If the intent is to only build 
> this
> on 64-bit, use CONFIG_PPC64 rather than relying on the fact that this 
> one
> of the confusing mess of BOOKE/BOOK3E symbols is 64-bit-only.

OK. Will use CONFIG_PPC64.

> 
>>  +static void rcpm_v2_cpu_die(int cpu)
>>  +{
>>  +#ifdef CONFIG_PPC_BOOK3E
>>  +	int primary;
>>  +
>>  +	if (threads_per_core == 2) {
>>  +		primary = cpu_first_thread_sibling(cpu);
>>  +		if (cpu_is_offline(primary) && cpu_is_offline(primary + 1)) {
>>  +			/* if both threads are offline, put the cpu in PH20 */
>>  +			rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
>>  +		} else {
>>  +			/* if only one thread is offline, disable the thread */
>>  +			qoriq_disable_thread(cpu);
>>  +		}
>>  +	}
>>  +#endif
>>  +
>>  +	if (threads_per_core == 1) {
>>  +		rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
>>  +		return;
>>  +	}
>>  +}
> 
> That "return;" adds nothing, and it's even more awkward having it on 
> the
> one-thread case but not the two-thread case.

Will get rid of it.

> 
> 
>>  +static void rcpm_v1_cpu_up_prepare(int cpu)
>>  +{
>>  +	rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
>>  +	rcpm_v1_irq_unmask(cpu);
>>  +}
>>  +
>>  +static void rcpm_v2_cpu_exit_state(int cpu, int state)
>>  +{
>>  +	int hw_cpu = get_hard_smp_processor_id(cpu);
>>  +	u32 mask = 1 << cpu_core_index_of_thread(hw_cpu);
> 
> Are you sure cpu_core_index_of_thread() is supposed to take a hardware
> cpu id?  The only current user, pseries_energy.c, has the comment
> "Convert logical cpu number to core number".

Here, the method of getting core index of thread is same for physical 
and logical.
So use this existed function to do the job.

> 
>>  +static const struct of_device_id rcpm_matches[] = {
>>  +	{
>>  +		.compatible = "fsl,qoriq-rcpm-1.0",
>>  +		.data = (void *)&qoriq_rcpm_v1_ops,
>>  +	},
>>  +	{
>>  +		.compatible = "fsl,qoriq-rcpm-2.0",
>>  +		.data = (void *)&qoriq_rcpm_v2_ops,
>>  +	},
>>  +	{
>>  +		.compatible = "fsl,qoriq-rcpm-2.1",
>>  +		.data = (void *)&qoriq_rcpm_v2_ops,
>>  +	},
>>  +	{},
>>  +};
> 
> Unnecessary (and const-unsafe) casts.
> 
> 
>>  +
>>  +int __init fsl_rcpm_init(void)
>>  +{
>>  +	struct device_node *np;
>>  +	const struct of_device_id *match;
>>  +	void __iomem *base;
>>  +
>>  +	np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
>>  +	if (!np) {
>>  +		pr_err("can't find the rcpm node.\n");
>>  +		return -ENODEV;
>>  +	}
> 
> It's not an error for the device tree node to not have this.
> 
> -Scott

Thanks.

-Chenhui

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2,4/5] PowerPC/mpc85xx: Add hotplug support on E5500 and E500MC cores
  2015-08-26 20:55   ` Scott Wood
@ 2015-08-28  0:47     ` Chenhui Zhao
  0 siblings, 0 replies; 13+ messages in thread
From: Chenhui Zhao @ 2015-08-28  0:47 UTC (permalink / raw)
  To: Scott Wood; +Cc: linuxppc-dev, linux-kernel, Jason.Jin



On Thu, Aug 27, 2015 at 4:55 AM, Scott Wood <scottwood@freescale.com> 
wrote:
> On Wed, Aug 26, 2015 at 08:09:47PM +0800, Chenhui Zhao wrote:
>>  +int check_cpu_dead(unsigned int cpu)
>>  +{
>>  +	return per_cpu(cpu_state, cpu) == CPU_DEAD;
>>  +}
> 
> I'm not sure this needs to be a function versus open-coded, but if 
> you do
> want to make it a function, make it more obvious from the caller side 
> by
> changing it to:
> 
> bool is_cpu_dead(unsigned int cpu);
> 
> Otherwise if I see "if (check_cpu_dead(cpu))" I don't know if the
> if-block is executed if the CPU is dead or if it isn't.

OK.

> 
> 
>>  diff --git a/arch/powerpc/platforms/85xx/smp.h 
>> b/arch/powerpc/platforms/85xx/smp.h
>>  index 0b20ae3..8ee19a3 100644
>>  --- a/arch/powerpc/platforms/85xx/smp.h
>>  +++ b/arch/powerpc/platforms/85xx/smp.h
>>  @@ -6,6 +6,7 @@
>>   #ifdef CONFIG_SMP
>>   void __init mpc85xx_smp_init(void);
>>   int __init mpc85xx_setup_pmc(void);
>>  +int __init fsl_rcpm_init(void);
>>   #else
> 
> Why wasn't this added in the patch that added fsl_rcpm_init()?
> 
> -Scott

Will move it to there.

Thanks,
-Chenhui

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2,5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500
  2015-08-26 22:42   ` [PATCH v2, 5/5] " Scott Wood
@ 2015-08-28  1:42     ` Chenhui Zhao
  2015-08-28  5:13       ` Scott Wood
  0 siblings, 1 reply; 13+ messages in thread
From: Chenhui Zhao @ 2015-08-28  1:42 UTC (permalink / raw)
  To: Scott Wood; +Cc: linuxppc-dev, linux-kernel, Jason.Jin



On Thu, Aug 27, 2015 at 6:42 AM, Scott Wood <scottwood@freescale.com> 
wrote:
> On Wed, Aug 26, 2015 at 08:09:48PM +0800, Chenhui Zhao wrote:
>>  +	.globl	booting_thread_hwid
>>  +booting_thread_hwid:
>>  +	.long  INVALID_THREAD_HWID
>>  +	.align 3
> 
> The commit message goes into no detail about the changes you're 
> making to
> thread handling, nor are there relevant comments.

OK. Will add some comments.

> 
>>  +/*
>>  + * r3 = the thread physical id
>>  + */
>>  +_GLOBAL(book3e_stop_thread)
>>  +	li	r4, 1
>>  +	sld	r4, r4, r3
>>  +	mtspr	SPRN_TENC, r4
>>  +	isync
>>  +	blr
> 
> Why did the C code not have an isync, if it's required here?

Just make sure "mtspr" has completed before the routine returns.

> 
> 
>>   _GLOBAL(fsl_secondary_thread_init)
>>   	/* Enable branch prediction */
>>   	lis     r3,BUCSR_INIT@h
>>  @@ -197,8 +236,10 @@ _GLOBAL(fsl_secondary_thread_init)
>>   	 * but the low bit right by two bits so that the cpu numbering is
>>   	 * continuous.
>>   	 */
>>  -	mfspr	r3, SPRN_PIR
>>  -	rlwimi	r3, r3, 30, 2, 30
>>  +	bl	10f
>>  +10:	mflr	r5
>>  +	addi	r5,r5,(booting_thread_hwid - 10b)
>>  +	lwz	r3,0(r5)
>>   	mtspr	SPRN_PIR, r3
>>   #endif
> 
> I assume the reason for this is that, unlike the kexec case, the cpu 
> has
> been reset so PIR has been reset?  Don't make me guess -- document.

We can not rely on the value saved in SPRN_PIR. Every time running 
fsl_secondary_thread_init, SPRN_PIR may not always has a reset value.
Using booting_thread_hwid to ensure SPRN_PIR has a correct value.

> 
> 
>>  @@ -245,6 +286,30 @@ _GLOBAL(generic_secondary_smp_init)
>>   	mr	r3,r24
>>   	mr	r4,r25
>>   	bl	book3e_secondary_core_init
>>  +
>>  +/*
>>  + * If we want to boot Thread1, start Thread1 and stop Thread0.
>>  + * Note that only Thread0 will run the piece of code.
>>  + */
> 
> What ensures that only thread 0 runs this?  Especially if we're 
> entering
> via kdump on thread 1?

This piece of code will be executed only when core resets (Thead0 will 
start first). Thead1 will run fsl_secondary_thread_init() to start.

How can kdump run this on Thread1? I know little about kexec.

> 
> 
> s/the piece/this piece/
> 
>>  +	LOAD_REG_ADDR(r3, booting_thread_hwid)
>>  +	lwz     r4, 0(r3)
>>  +	cmpwi	r4, INVALID_THREAD_HWID
>>  +	beq	20f
>>  +	cmpw	r4, r24
>>  +	beq	20f
> 
> Do all cores get released from the spin table before the first thread
> gets kicked?

Yes.

> 
> 
>>  +
>>  +	/* start Thread1 */
>>  +	LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
>>  +	ld	r4, 0(r5)
>>  +	li	r3, 1
>>  +	bl	book3e_start_thread
>>  +
>>  +	/* stop Thread0 */
>>  +	li	r3, 0
>>  +	bl	book3e_stop_thread
>>  +10:
>>  +	b	10b
>>  +20:
>>   #endif
>> 
>>   generic_secondary_common_init:
>>  diff --git a/arch/powerpc/platforms/85xx/smp.c 
>> b/arch/powerpc/platforms/85xx/smp.c
>>  index 73eb994..61f68ad 100644
>>  --- a/arch/powerpc/platforms/85xx/smp.c
>>  +++ b/arch/powerpc/platforms/85xx/smp.c
>>  @@ -181,17 +181,11 @@ static inline u32 read_spin_table_addr_l(void 
>> *spin_table)
>>   static void wake_hw_thread(void *info)
>>   {
>>   	void fsl_secondary_thread_init(void);
>>  -	unsigned long imsr1, inia1;
>>  -	int nr = *(const int *)info;
>>  +	unsigned long inia;
>>  +	int hw_cpu = get_hard_smp_processor_id(*(const int *)info);
>> 
>>  -	imsr1 = MSR_KERNEL;
>>  -	inia1 = *(unsigned long *)fsl_secondary_thread_init;
>>  -
>>  -	mttmr(TMRN_IMSR1, imsr1);
>>  -	mttmr(TMRN_INIA1, inia1);
>>  -	mtspr(SPRN_TENS, TEN_THREAD(1));
>>  -
>>  -	smp_generic_kick_cpu(nr);
>>  +	inia = *(unsigned long *)fsl_secondary_thread_init;
>>  +	book3e_start_thread(cpu_thread_in_core(hw_cpu), inia);
>>   }
>>   #endif
>> 
>>  @@ -279,7 +273,6 @@ static int smp_85xx_kick_cpu(int nr)
>>   	int ret = 0;
>>   #ifdef CONFIG_PPC64
>>   	int primary = nr;
>>  -	int primary_hw = get_hard_smp_processor_id(primary);
>>   #endif
>> 
>>   	WARN_ON(nr < 0 || nr >= num_possible_cpus());
>>  @@ -287,33 +280,43 @@ static int smp_85xx_kick_cpu(int nr)
>>   	pr_debug("kick CPU #%d\n", nr);
>> 
>>   #ifdef CONFIG_PPC64
>>  +	booting_thread_hwid = INVALID_THREAD_HWID;
>>   	/* Threads don't use the spin table */
>>  -	if (cpu_thread_in_core(nr) != 0) {
>>  -		int primary = cpu_first_thread_sibling(nr);
>>  +	if (threads_per_core == 2) {
>>  +		booting_thread_hwid = get_hard_smp_processor_id(nr);
> 
> What does setting booting_thread_hwid to INVALID_THREAD_HWID here
> accomplish?  If threads_per_core != 2 it would never have been set to
> anything else, and if threads_per_core == 2 you immediately overwrite 
> it.

booting_thread_hwid is valid only for the case that one core has two 
threads (e6500). For e5500 and e500mc, one core one thread, 
"booting_thread_hwid" is invalid.

"booting_thread_hwid" will determine starting which thread in 
generic_secondary_smp_init().

> 
>>  +		primary = cpu_first_thread_sibling(nr);
>> 
>>   		if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
>>   			return -ENOENT;
>> 
>>  -		if (cpu_thread_in_core(nr) != 1) {
>>  -			pr_err("%s: cpu %d: invalid hw thread %d\n",
>>  -			       __func__, nr, cpu_thread_in_core(nr));
>>  -			return -ENOENT;
>>  -		}
>>  -
>>  -		if (!cpu_online(primary)) {
>>  -			pr_err("%s: cpu %d: primary %d not online\n",
>>  -			       __func__, nr, primary);
>>  -			return -ENOENT;
>>  +		/*
>>  +		 * If either one of threads in the same core is online,
>>  +		 * use the online one to start the other.
>>  +		 */
>>  +		if (qoriq_pm_ops)
>>  +			qoriq_pm_ops->cpu_up_prepare(nr);
> 
> cpu_up_prepare does rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20).  How do
> you know the cpu is already in PH20?  What if this is initial boot?  
> Are
> you relying on it being a no-op in that case?

Yes, if the cpu is in PH20, it will exit; if not, cpu_up_prepare() is 
equal to a no-op.

> 
> 
>>  +
>>  +		if (cpu_online(primary)) {
>>  +			smp_call_function_single(primary,
>>  +					wake_hw_thread, &nr, 1);
>>  +			goto done;
>>  +		} else if (cpu_online(primary + 1)) {
>>  +			smp_call_function_single(primary + 1,
>>  +					wake_hw_thread, &nr, 1);
>>  +			goto done;
>>   		}
>> 
>>  -		smp_call_function_single(primary, wake_hw_thread, &nr, 0);
>>  -		return 0;
>>  +		/* If both threads are offline, continue to star primary cpu */
> 
> s/star/start/
> 
>>  +	} else if (threads_per_core > 2) {
>>  +		pr_err("Do not support more than 2 threads per CPU.");
> 
> WARN_ONCE(1, "More than 2 threads per core not supported: %d\n",
> 	  threads_per_core);
> 
> -Scott

OK

-Chenhui

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2,5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500
  2015-08-28  1:42     ` [PATCH v2,5/5] " Chenhui Zhao
@ 2015-08-28  5:13       ` Scott Wood
  0 siblings, 0 replies; 13+ messages in thread
From: Scott Wood @ 2015-08-28  5:13 UTC (permalink / raw)
  To: Chenhui Zhao; +Cc: linuxppc-dev, linux-kernel, Jason.Jin

On Fri, 2015-08-28 at 09:42 +0800, Chenhui Zhao wrote:
> On Thu, Aug 27, 2015 at 6:42 AM, Scott Wood <scottwood@freescale.com> 
> wrote:
> > On Wed, Aug 26, 2015 at 08:09:48PM +0800, Chenhui Zhao wrote:
> > >  +        .globl  booting_thread_hwid
> > >  +booting_thread_hwid:
> > >  +        .long  INVALID_THREAD_HWID
> > >  +        .align 3
> > 
> > The commit message goes into no detail about the changes you're 
> > making to
> > thread handling, nor are there relevant comments.
> 
> OK. Will add some comments.
> 
> > 
> > >  +/*
> > >  + * r3 = the thread physical id
> > >  + */
> > >  +_GLOBAL(book3e_stop_thread)
> > >  +        li      r4, 1
> > >  +        sld     r4, r4, r3
> > >  +        mtspr   SPRN_TENC, r4
> > >  +        isync
> > >  +        blr
> > 
> > Why did the C code not have an isync, if it's required here?
> 
> Just make sure "mtspr" has completed before the routine returns.
> 
> > 
> > 
> > >   _GLOBAL(fsl_secondary_thread_init)
> > >           /* Enable branch prediction */
> > >           lis     r3,BUCSR_INIT@h
> > >  @@ -197,8 +236,10 @@ _GLOBAL(fsl_secondary_thread_init)
> > >            * but the low bit right by two bits so that the cpu numbering is
> > >            * continuous.
> > >            */
> > >  -        mfspr   r3, SPRN_PIR
> > >  -        rlwimi  r3, r3, 30, 2, 30
> > >  +        bl      10f
> > >  +10:     mflr    r5
> > >  +        addi    r5,r5,(booting_thread_hwid - 10b)
> > >  +        lwz     r3,0(r5)
> > >           mtspr   SPRN_PIR, r3
> > >   #endif
> > 
> > I assume the reason for this is that, unlike the kexec case, the cpu 
> > has
> > been reset so PIR has been reset?  Don't make me guess -- document.
> 
> We can not rely on the value saved in SPRN_PIR. Every time running 
> fsl_secondary_thread_init, SPRN_PIR may not always has a reset value.
> Using booting_thread_hwid to ensure SPRN_PIR has a correct value.

But when will the cpu ever be in a state other than "reset PIR value and 
reset BUCSR value" or "Software-desired PIR value and BUCSR value"?

> > >  @@ -245,6 +286,30 @@ _GLOBAL(generic_secondary_smp_init)
> > >           mr      r3,r24
> > >           mr      r4,r25
> > >           bl      book3e_secondary_core_init
> > >  +
> > >  +/*
> > >  + * If we want to boot Thread1, start Thread1 and stop Thread0.
> > >  + * Note that only Thread0 will run the piece of code.
> > >  + */
> > 
> > What ensures that only thread 0 runs this?  Especially if we're 
> > entering
> > via kdump on thread 1?
> 
> This piece of code will be executed only when core resets (Thead0 will 
> start first). 

This is not true with kexec/kdump.

> Thead1 will run fsl_secondary_thread_init() to start.
> 
> How can kdump run this on Thread1? I know little about kexec.

kexec/kdump involves booting a new kernel image without resetting the 
hardware.

 +      /* start Thread1 */
> > >  +        LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
> > >  +        ld      r4, 0(r5)
> > >  +        li      r3, 1
> > >  +        bl      book3e_start_thread
> > >  +
> > >  +        /* stop Thread0 */
> > >  +        li      r3, 0
> > >  +        bl      book3e_stop_thread
> > >  +10:
> > >  +        b       10b
> > >  +20:
> > >   #endif
> > > 
> > >   generic_secondary_common_init:
> > >  diff --git a/arch/powerpc/platforms/85xx/smp.c 
> > > b/arch/powerpc/platforms/85xx/smp.c
> > >  index 73eb994..61f68ad 100644
> > >  --- a/arch/powerpc/platforms/85xx/smp.c
> > >  +++ b/arch/powerpc/platforms/85xx/smp.c
> > >  @@ -181,17 +181,11 @@ static inline u32 read_spin_table_addr_l(void 
> > > *spin_table)
> > >   static void wake_hw_thread(void *info)
> > >   {
> > >           void fsl_secondary_thread_init(void);
> > >  -        unsigned long imsr1, inia1;
> > >  -        int nr = *(const int *)info;
> > >  +        unsigned long inia;
> > >  +        int hw_cpu = get_hard_smp_processor_id(*(const int *)info);
> > > 
> > >  -        imsr1 = MSR_KERNEL;
> > >  -        inia1 = *(unsigned long *)fsl_secondary_thread_init;
> > >  -
> > >  -        mttmr(TMRN_IMSR1, imsr1);
> > >  -        mttmr(TMRN_INIA1, inia1);
> > >  -        mtspr(SPRN_TENS, TEN_THREAD(1));
> > >  -
> > >  -        smp_generic_kick_cpu(nr);
> > >  +        inia = *(unsigned long *)fsl_secondary_thread_init;
> > >  +        book3e_start_thread(cpu_thread_in_core(hw_cpu), inia);
> > >   }
> > >   #endif
> > > 
> > >  @@ -279,7 +273,6 @@ static int smp_85xx_kick_cpu(int nr)
> > >           int ret = 0;
> > >   #ifdef CONFIG_PPC64
> > >           int primary = nr;
> > >  -        int primary_hw = get_hard_smp_processor_id(primary);
> > >   #endif
> > > 
> > >           WARN_ON(nr < 0 || nr >= num_possible_cpus());
> > >  @@ -287,33 +280,43 @@ static int smp_85xx_kick_cpu(int nr)
> > >           pr_debug("kick CPU #%d\n", nr);
> > > 
> > >   #ifdef CONFIG_PPC64
> > >  +        booting_thread_hwid = INVALID_THREAD_HWID;
> > >           /* Threads don't use the spin table */
> > >  -        if (cpu_thread_in_core(nr) != 0) {
> > >  -                int primary = cpu_first_thread_sibling(nr);
> > >  +        if (threads_per_core == 2) {
> > >  +                booting_thread_hwid = get_hard_smp_processor_id(nr);
> > 
> > What does setting booting_thread_hwid to INVALID_THREAD_HWID here
> > accomplish?  If threads_per_core != 2 it would never have been set to
> > anything else, and if threads_per_core == 2 you immediately overwrite 
> > it.
> 
> booting_thread_hwid is valid only for the case that one core has two 
> threads (e6500). For e5500 and e500mc, one core one thread, 
> "booting_thread_hwid" is invalid.
> 
> "booting_thread_hwid" will determine starting which thread in 
> generic_secondary_smp_init().

You didn't answer my question.

> > > +         /*
> > >  +                 * If either one of threads in the same core is online,
> > >  +                 * use the online one to start the other.
> > >  +                 */
> > >  +                if (qoriq_pm_ops)
> > >  +                        qoriq_pm_ops->cpu_up_prepare(nr);
> > 
> > cpu_up_prepare does rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20).  How do
> > you know the cpu is already in PH20?  What if this is initial boot?  
> > Are
> > you relying on it being a no-op in that case?
> 
> Yes, if the cpu is in PH20, it will exit; if not, cpu_up_prepare() is 
> equal to a no-op.

This warrants a comment.

-Scott

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2,2/5] powerpc/rcpm: add RCPM driver
  2015-08-28  0:40     ` Scott Wood
@ 2015-08-28  5:18       ` Scott Wood
  0 siblings, 0 replies; 13+ messages in thread
From: Scott Wood @ 2015-08-28  5:18 UTC (permalink / raw)
  To: Chenhui Zhao; +Cc: linuxppc-dev, linux-kernel, Jason.Jin

On Fri, 2015-08-28 at 08:40 +0800, Scott Wood wrote:
> On Thu, Aug 27, 2015 at 4:35 AM, Scott Wood <scottwood@freescale.com> 
> wrote:
> > On Wed, Aug 26, 2015 at 08:09:45PM +0800, Chenhui Zhao wrote:

I didn't write this e-mail.  Please fix your mail client.

> > >  +static void rcpm_v1_cpu_up_prepare(int cpu)
> > >  +{
> > >  +        rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
> > >  +        rcpm_v1_irq_unmask(cpu);
> > >  +}
> > >  +
> > >  +static void rcpm_v2_cpu_exit_state(int cpu, int state)
> > >  +{
> > >  +        int hw_cpu = get_hard_smp_processor_id(cpu);
> > >  +        u32 mask = 1 << cpu_core_index_of_thread(hw_cpu);
> > 
> > Are you sure cpu_core_index_of_thread() is supposed to take a hardware
> > cpu id?  The only current user, pseries_energy.c, has the comment
> > "Convert logical cpu number to core number".
> 
> Here, the method of getting core index of thread is same for physical 
> and logical.
> So use this existed function to do the job.

I realize that you get the same result either way, but that doesn't make it 
OK.  You have "cpu"; pass that instead of "hw_cpu".

-Scott

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2015-08-28  5:18 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-08-26 12:09 [PATCH v2, 1/5] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
2015-08-26 12:09 ` [PATCH v2,2/5] powerpc/rcpm: add RCPM driver Chenhui Zhao
2015-08-26 20:35   ` Scott Wood
2015-08-28  0:40     ` Scott Wood
2015-08-28  5:18       ` Scott Wood
2015-08-26 12:09 ` [PATCH v2,3/5] Powerpc: mpc85xx: refactor the PM operations Chenhui Zhao
2015-08-26 12:09 ` [PATCH v2, 4/5] PowerPC/mpc85xx: Add hotplug support on E5500 and E500MC cores Chenhui Zhao
2015-08-26 20:55   ` Scott Wood
2015-08-28  0:47     ` [PATCH v2,4/5] " Chenhui Zhao
2015-08-26 12:09 ` [PATCH v2,5/5] PowerPC/mpc85xx: Add CPU hotplug support for E6500 Chenhui Zhao
2015-08-26 22:42   ` [PATCH v2, 5/5] " Scott Wood
2015-08-28  1:42     ` [PATCH v2,5/5] " Chenhui Zhao
2015-08-28  5:13       ` Scott Wood

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).