* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
@ 2005-02-09 17:53 ` Ashok Raj
2005-02-09 18:19 ` Alex Williamson
` (8 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Ashok Raj @ 2005-02-09 17:53 UTC (permalink / raw)
To: linux-ia64
On Wed, Feb 09, 2005 at 09:40:28AM -0800, Alex Williamson wrote:
Hi Alex
In fact i did submit a patch for this about a month ago. I was sharing some
code from mca side for tlb purge, and this code has been in the swamp for
several weeks now. I hope they are settled now, and i will re submit my
patches once again.
link from old post
http://marc.theaimsgroup.com/?l=linux-ia64&m\x110239954713260&w=2
I will repost to match whats there in tony-'s test/release tree asap.
ashok
>
> When a CPU is sent offline, it currently goes into a dummy spin
> loop
> and pretends to be gone. This patch returns the CPU back to SAL via
> the
> mechanism described in the SAL spec. The state of secondary CPUs is
> saved off to a dynamically allocated stack for use on return to SAL.
> I've munged the _start code in head.S to avoid trampling over some of
> the preserved registers before we get a chance to save them. The
> assembly could probably use some optimizations, but these are hardly
> performance paths. It seems to work reliably on zx1 and sx1000 boxes,
> but needs some exposure on others. Patch against current bk. Thanks,
>
> Alex
>
> --
> Signed-off-by: Alex Williamson <alex.williamson@hp.com>
>
> === arch/ia64/kernel/head.S 1.31 vs edited ==> --- 1.31/arch/ia64/kernel/head.S 2005-01-28 16:50:13 -07:00
> +++ edited/arch/ia64/kernel/head.S 2005-02-09 09:32:04 -07:00
> @@ -63,6 +63,14 @@
> ;;
> srlz.i
> ;;
> +
> + /*
> + * Store SAL gp, sp and tp so we don't clobber them
> + */
> + mov r31=r1
> + mov r30=r12
> + mov r29=r13
> +
> /*
> * Initialize kernel region registers:
> * rr[0]: VHPT enabled, page size = PAGE_SHIFT
> @@ -76,31 +84,65 @@
> * We initialize all of them to prevent inadvertently assuming
> * something about the state of address translation early in
> boot.
> */
> - mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> - movl r7=(0<<61)
> - mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> - movl r9=(1<<61)
> - mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> - movl r11=(2<<61)
> + mov r9=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> + mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> + mov r11=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> - movl r13=(3<<61)
> - mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> - movl r15=(4<<61)
> - mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> - movl r17=(5<<61)
> - mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) |
> (IA64_GRANULE_SHIFT << 2))
> - movl r19=(6<<61)
> - mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) |
> (IA64_GRANULE_SHIFT << 2))
> - movl r21=(7<<61)
> - ;;
> - mov rr[r7]=r6
> - mov rr[r9]=r8
> - mov rr[r11]=r10
> - mov rr[r13]=r12
> - mov rr[r15]=r14
> - mov rr[r17]=r16
> - mov rr[r19]=r18
> - mov rr[r21]=r20
> + mov r13=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> + mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) |
> (PAGE_SHIFT << 2) | 1)
> + mov r15=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) |
> (IA64_GRANULE_SHIFT << 2))
> + mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) |
> (IA64_GRANULE_SHIFT << 2))
> +
> + /*
> + * Original SAL RRs stored in r8-r15 for later use
> + */
> + movl r2=(0<<61)
> + ;;
> + mov r8=rr[r2]
> + ;;
> + mov rr[r2]=r9
> + ;;
> + movl r2=(1<<61)
> + ;;
> + mov r9=rr[r2]
> + ;;
> + mov rr[r2]=r10
> + ;;
> + movl r2=(2<<61)
> + ;;
> + mov r10=rr[r2]
> + ;;
> + mov rr[r2]=r11
> + ;;
> + movl r2=(3<<61)
> + ;;
> + mov r11=rr[r2]
> + ;;
> + mov rr[r2]=r12
> + ;;
> + movl r2=(4<<61)
> + ;;
> + mov r12=rr[r2]
> + ;;
> + mov rr[r2]=r13
> + ;;
> + movl r2=(5<<61)
> + ;;
> + mov r13=rr[r2]
> + ;;
> + mov rr[r2]=r14
> + ;;
> + movl r2=(6<<61)
> + ;;
> + mov r14=rr[r2]
> + ;;
> + mov rr[r2]=r15
> + ;;
> + movl r2=(7<<61)
> + ;;
> + mov r15=rr[r2]
> + ;;
> + mov rr[r2]=r16
> ;;
> /*
> * Now pin mappings into the TLB for kernel text and data
> @@ -141,6 +183,12 @@
> rfi
> ;;
> 1: // now we are in virtual mode
> +
> + /*
> + * Preserved CR/ARs for return to SAL before clobbering
> + */
> + mov r27=cr.iva
> + mov r26=ar.fpsr
>
> // set IVT entry point---can't access I/O ports without it
> movl r3=ia64_ivt
> @@ -154,8 +202,8 @@
> mov ar.fpsr=r2
> ;;
>
> -#define isAP p2 // are we an Application Processor?
> -#define isBP p3 // are we the Bootstrap Processor?
> +#define isAP p6 // are we an Application Processor?
> +#define isBP p7 // are we the Bootstrap Processor?
>
> #ifdef CONFIG_SMP
> /*
> @@ -170,6 +218,169 @@
> cmp.eq isBP,isAP=r3,r0
> ;;
> (isAP) mov r2=r3
> +#ifdef CONFIG_HOTPLUG_CPU
> + /*
> + * Save SAL off information for possible return to SAL
> + */
> +(isAP) movl r3=sal_handoff_state
> + ;;
> +(isAP) ld8 r3=[r3]
> + ;;
> +(isBP) movl r3=0
> + ;;
> + cmp.eq p8,p9=r3,r0 // Saving state predicated on p9
> + ;;
> + /* Branch registers 1-5 are preserved, 0 contains SAL re-entry
> point */
> +(p9) mov r16°
> + ;;
> +(p9) st8 [r3]=r16,8 // b0
> + ;;
> +(p9) mov r16±
> + ;;
> +(p9) st8 [r3]=r16,8 // b1
> + ;;
> +(p9) mov r16²
> + ;;
> +(p9) st8 [r3]=r16,8 // b2
> + ;;
> +(p9) mov r16³
> + ;;
> +(p9) st8 [r3]=r16,8 // b3
> + ;;
> +(p9) mov r16´
> + ;;
> +(p9) st8 [r3]=r16,8 // b4
> + ;;
> +(p9) mov r16µ
> + ;;
> +(p9) st8 [r3]=r16,8 // b5
> + ;;
> + /* Region registers are preserved */
> +(p9) st8 [r3]=r8,8 // rr0
> + ;;
> +(p9) st8 [r3]=r9,8 // rr1
> + ;;
> +(p9) st8 [r3]=r10,8 // rr2
> + ;;
> +(p9) st8 [r3]=r11,8 // rr3
> + ;;
> +(p9) st8 [r3]=r12,8 // rr4
> + ;;
> +(p9) st8 [r3]=r13,8 // rr5
> + ;;
> +(p9) st8 [r3]=r14,8 // rr6
> + ;;
> +(p9) st8 [r3]=r15,8 // rr7
> +(p9) mov r16=pr
> + ;;
> +(p9) st8 [r3]=r16,8 // predicates
> + ;;
> +(p9) mov r16=ar.bspstore
> + ;;
> +(p9) st8 [r3]=r16,8 // ar.bspstore
> + ;;
> +(p9) mov r16=ar.rnat
> + ;;
> +(p9) st8 [r3]=r16,8 // ar.rnat
> + ;;
> +(p9) mov r16=ar.unat
> + ;;
> +(p9) st8 [r3]=r16,8 // ar.unat
> + ;;
> +(p9) st8 [r3]=r26,8 // ar.fpsr
> + ;;
> +(p9) mov r16=ar.pfs
> + ;;
> +(p9) st8 [r3]=r16,8 // ar.pfs
> + ;;
> +(p9) mov r16=ar.lc
> + ;;
> +(p9) st8 [r3]=r16,8 // ar.lc
> + ;;
> +(p9) mov r16=cr.dcr
> + ;;
> +(p9) st8 [r3]=r16,8 // cr.dcr
> + ;;
> +(p9) st8 [r3]=r27,8 // cr.iva
> + ;;
> +(p9) mov r16=cr.pta
> + ;;
> +(p9) st8 [r3]=r16,8 // cr.pta
> + ;;
> +(p9) mov r16=cr.itv
> + ;;
> +(p9) st8 [r3]=r16,8 // cr.itv
> + ;;
> +(p9) mov r16=cr.pmv
> + ;;
> +(p9) st8 [r3]=r16,8 // cr.pmv
> + ;;
> +(p9) mov r16=cr.cmcv
> + ;;
> +(p9) st8 [r3]=r16,8 // cr.cmcv
> + ;;
> +(p9) mov r16=cr.lrr0
> + ;;
> +(p9) st8 [r3]=r16,8 // cr.lrr0
> + ;;
> +(p9) mov r16=cr.lrr1
> + ;;
> +(p9) st8 [r3]=r16,8 // cr.lrr1
> + ;;
> +(p9) st8 [r3]=r31,8 // gp
> + ;;
> +(p9) st8 [r3]=r30,8 // sp
> + ;;
> +(p9) st8 [r3]=r29,8 // tp
> + ;;
> +(p9) st8 [r3]=r4,8 // gr4
> + ;;
> +(p9) st8 [r3]=r5,8 // gr5
> + ;;
> +(p9) st8 [r3]=r6,8 // gr6
> + ;;
> +(p9) st8 [r3]=r7,8 // gr7
> + ;;
> +(p9) stf.spill.nta [r3]ò,16
> + ;;
> +(p9) stf.spill.nta [r3]ó,16
> + ;;
> +(p9) stf.spill.nta [r3]ô,16
> + ;;
> +(p9) stf.spill.nta [r3]õ,16
> + ;;
> +(p9) stf.spill.nta [r3]ñ6,16
> + ;;
> +(p9) stf.spill.nta [r3]ñ7,16
> + ;;
> +(p9) stf.spill.nta [r3]ñ8,16
> + ;;
> +(p9) stf.spill.nta [r3]ñ9,16
> + ;;
> +(p9) stf.spill.nta [r3]ò0,16
> + ;;
> +(p9) stf.spill.nta [r3]ò1,16
> + ;;
> +(p9) stf.spill.nta [r3]ò2,16
> + ;;
> +(p9) stf.spill.nta [r3]ò3,16
> + ;;
> +(p9) stf.spill.nta [r3]ò4,16
> + ;;
> +(p9) stf.spill.nta [r3]ò5,16
> + ;;
> +(p9) stf.spill.nta [r3]ò6,16
> + ;;
> +(p9) stf.spill.nta [r3]ò7,16
> + ;;
> +(p9) stf.spill.nta [r3]ò8,16
> + ;;
> +(p9) stf.spill.nta [r3]ò9,16
> + ;;
> +(p9) stf.spill.nta [r3]ó0,16
> + ;;
> +(p9) stf.spill.nta [r3]ó1,16
> +#endif /* CONFIG_HOTPLUG_CPU */
> #else
> movl r2=init_task
> cmp.eq isBP,isAP=r0,r0
> @@ -256,6 +467,263 @@
> self: hint @pause
> br.sptk.many self // endless loop
> END(_start)
> +
> +#ifdef CONFIG_HOTPLUG_CPU
> +GLOBAL_ENTRY(ia64_return_to_sal)
> + alloc r16=ar.pfs,2,0,0,0
> + ;;
> + mov ar.rsc=0 // place RSE in enforced lazy mode
> + ;;
> + flushrs
> + ;;
> + movl r16=(IA64_PSR_AC|IA64_PSR_BN)
> + ;;
> + br.call.sptk.many rp=ia64_switch_mode_phys // physical mode
> +1:
> + /* Purge kernel TRs */
> + movl r16=KERNEL_START
> + mov r18=KERNEL_TR_PAGE_SHIFT<<2
> + ;;
> + ptr.i r16,r18
> + ptr.d r16,r18
> + ;;
> + srlz.i
> + ;;
> + srlz.d
> + ;;
> + /* Purge percpu TR */
> + movl r16=PERCPU_ADDR
> + mov r18=PERCPU_PAGE_SHIFT<<2
> + ;;
> + ptr.d r16,r18
> + ;;
> + srlz.d
> + ;;
> + /* Purge PAL TR - purge before getting here */
> + /* Purge stack TR */
> + mov r16=IA64_KR(CURRENT_STACK)
> + ;;
> + shl r16=r16,IA64_GRANULE_SHIFT
> + movl r19=PAGE_OFFSET
> + ;;
> + add r16=r19,r16
> + mov r18=IA64_GRANULE_SHIFT<<2
> + ;;
> + ptr.d r16,r18
> + ;;
> + srlz.i
> + ;;
> + mov r3=r32
> + ;;
> + /* Branch registers 1-5 are preserved, 0 contains SAL re-entry
> point */
> + ld8 r2=[r3],8 // b0
> + ;;
> + mov b0=r2
> + ;;
> + ld8 r2=[r3],8 // b1
> + ;;
> + mov b1=r2
> + ;;
> + ld8 r2=[r3],8 // b2
> + ;;
> + mov b2=r2
> + ;;
> + ld8 r2=[r3],8 // b3
> + ;;
> + mov b3=r2
> + ;;
> + ld8 r2=[r3],8 // b4
> + ;;
> + mov b4=r2
> + ;;
> + ld8 r2=[r3],8 // b5
> + ;;
> + mov b5=r2
> + ;;
> + /* Region registers are preserved */
> + ld8 r2=[r3],8 // rr0
> + movl r16=(0<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // rr1
> + movl r16=(1<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // rr2
> + movl r16=(2<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // rr3
> + movl r16=(3<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // rr4
> + movl r16=(4<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // rr5
> + movl r16=(5<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // rr6
> + movl r16=(6<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // rr7
> + movl r16=(7<<61)
> + ;;
> + mov rr[r16]=r2
> + ;;
> + srlz.d
> + ;;
> + ld8 r2=[r3],8 // predicates
> + ;;
> + mov pr=r2,-1
> + ;;
> + ld8 r2=[r3],8 // ar.bspstore
> + ;;
> + mov ar.bspstore=r2
> + ;;
> + ld8 r2=[r3],8 // ar.rnat
> + ;;
> + mov ar.rnat=r2
> + ;;
> + ld8 r2=[r3],8 // ar.unat
> + ;;
> + mov ar.unat=r2
> + ;;
> + ld8 r2=[r3],8 // ar.fpsr
> + ;;
> + mov ar.fpsr=r2
> + ;;
> + ld8 r2=[r3],8 // ar.pfs
> + ;;
> + mov ar.pfs=r2
> + ;;
> + ld8 r2=[r3],8 // ar.lc
> + ;;
> + mov ar.lc=r2
> + ;;
> + ld8 r2=[r3],8 // cr.dcr
> + ;;
> + mov cr.dcr=r2
> + ;;
> + ld8 r2=[r3],8 // cr.iva
> + ;;
> + mov cr.iva=r2
> + ;;
> + ld8 r2=[r3],8 // cr.pta
> + ;;
> + mov cr.pta=r2
> + ;;
> + ld8 r2=[r3],8 // cr.itv
> + ;;
> + mov cr.itv=r2
> + ;;
> + ld8 r2=[r3],8 // cr.pmv
> + ;;
> + mov cr.pmv=r2
> + ;;
> + ld8 r2=[r3],8 // cr.cmcv
> + ;;
> + mov cr.cmcv=r2
> + ;;
> + ld8 r2=[r3],8 // cr.lrr0
> + ;;
> + mov cr.lrr0=r2
> + ;;
> + ld8 r2=[r3],8 // cr.lrr1
> + ;;
> + mov cr.lrr1=r2
> + ;;
> + ld8 gp=[r3],8 // gp
> + ;;
> + ld8 r12=[r3],8 // sp
> + ;;
> + ld8 r13=[r3],8 // tp
> + ;;
> + ld8 r4=[r3],8 // gr4
> + ;;
> + ld8 r5=[r3],8 // gr5
> + ;;
> + ld8 r6=[r3],8 // gr6
> + ;;
> + ld8 r7=[r3],8 // gr7
> + ;;
> + ldf.fill.nta f2=[r3],16
> + ;;
> + ldf.fill.nta f3=[r3],16
> + ;;
> + ldf.fill.nta f4=[r3],16
> + ;;
> + ldf.fill.nta f5=[r3],16
> + ;;
> + ldf.fill.nta f16=[r3],16
> + ;;
> + ldf.fill.nta f17=[r3],16
> + ;;
> + ldf.fill.nta f18=[r3],16
> + ;;
> + ldf.fill.nta f19=[r3],16
> + ;;
> + ldf.fill.nta f20=[r3],16
> + ;;
> + ldf.fill.nta f21=[r3],16
> + ;;
> + ldf.fill.nta f22=[r3],16
> + ;;
> + ldf.fill.nta f23=[r3],16
> + ;;
> + ldf.fill.nta f24=[r3],16
> + ;;
> + ldf.fill.nta f25=[r3],16
> + ;;
> + ldf.fill.nta f26=[r3],16
> + ;;
> + ldf.fill.nta f27=[r3],16
> + ;;
> + ldf.fill.nta f28=[r3],16
> + ;;
> + ldf.fill.nta f29=[r3],16
> + ;;
> + ldf.fill.nta f30=[r3],16
> + ;;
> + ldf.fill.nta f31=[r3],16
> + ;;
> + ssm psr.ic // SAL wants ic=1
> + srlz.d
> + ;;
> +#define CPU_DEAD 0x0007
> + mov r3=CPU_DEAD
> + ;;
> + st4 [r33]=r3 // Ack Going offline
> + ;;
> + mf
> + ;;
> + br.ret.sptk.many b0
> +END(ia64_return_to_sal)
> +#endif /* CONFIG_HOTPLUG_CPU */
>
> GLOBAL_ENTRY(ia64_save_debug_regs)
> alloc r16=ar.pfs,1,0,0,0
> === arch/ia64/kernel/process.c 1.73 vs edited ==> --- 1.73/arch/ia64/kernel/process.c 2005-01-22 15:19:21 -07:00
> +++ edited/arch/ia64/kernel/process.c 2005-02-09 10:02:11 -07:00
> @@ -199,28 +199,31 @@
> /* We don't actually take CPU down, just spin without interrupts. */
> static inline void play_dead(void)
> {
> - extern void ia64_cpu_local_tick (void);
> - /* Ack it */
> - __get_cpu_var(cpu_state) = CPU_DEAD;
> + void *pal_vaddr = efi_get_pal_addr();
> +
> + extern void ia64_return_to_sal (sal_handoff_state_t *, int *);
> + extern sal_handoff_state_t *sal_handoff_state;
>
> - /* We shouldn't have to disable interrupts while dead, but
> - * some interrupts just don't seem to go away, and this makes
> - * it "work" for testing purposes. */
> max_xtp();
> local_irq_disable();
> - /* Death loop */
> - while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
> - cpu_relax();
> -
> - /*
> - * Enable timer interrupts from now on
> - * Not required if we put processor in SAL_BOOT_RENDEZ mode.
> - */
> local_flush_tlb_all();
> - cpu_set(smp_processor_id(), cpu_online_map);
> - wmb();
> - ia64_cpu_local_tick ();
> - local_irq_enable();
> +
> + if (pal_vaddr) {
> + /*
> + * Easier to purge PAL TR here
> + */
> + ia64_clear_ic();
> + ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned
> long)pal_vaddr),
> + IA64_GRANULE_SHIFT);
> + ia64_srlz_i();
> + }
> +
> + ia64_return_to_sal((sal_handoff_state_t
> *)__pa(sal_handoff_state),
> + (int *)__pa(&__get_cpu_var(cpu_state)));
> +
> + printk(KERN_ERR "CPU%d didn't die\n", smp_processor_id());
> + for (;;)
> + cpu_relax();
> }
> #else
> static inline void play_dead(void)
> === arch/ia64/kernel/smpboot.c 1.65 vs edited ==> --- 1.65/arch/ia64/kernel/smpboot.c 2005-01-22 14:13:47 -07:00
> +++ edited/arch/ia64/kernel/smpboot.c 2005-02-09 09:32:05 -07:00
> @@ -22,6 +22,7 @@
> #include <linux/irq.h>
> #include <linux/kernel.h>
> #include <linux/kernel_stat.h>
> +#include <linux/list.h>
> #include <linux/mm.h>
> #include <linux/notifier.h>
> #include <linux/smp.h>
> @@ -79,6 +80,14 @@
>
> task_t *task_for_booting_cpu;
>
> +#ifdef CONFIG_HOTPLUG_CPU
> +/*
> + * Info for return to SAL
> + */
> +sal_handoff_state_t *sal_handoff_state;
> +static LIST_HEAD(sal_handoff_list);
> +#endif
> +
> /*
> * State for each CPU
> */
> @@ -297,6 +306,8 @@
> cpu_set(cpuid, cpu_online_map);
> unlock_ipi_calllock();
>
> + __get_cpu_var(cpu_state)=CPU_ONLINE;
> +
> smp_setup_percpu_timer();
>
> ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
> @@ -398,6 +409,15 @@
> panic("failed fork for CPU %d", cpu);
> task_for_booting_cpu = c_idle.idle;
>
> +#ifdef CONFIG_HOTPLUG_CPU
> + sal_handoff_state = kmalloc(sizeof(sal_handoff_state_t),
> GFP_KERNEL);
> + if (!sal_handoff_state)
> + printk(KERN_ERR "Processor 0x%x/0x%x cannot save SAL
> handoff "
> + "state\n", cpu, sapicid);
> + else
> + memset(sal_handoff_state, 0,
> sizeof(sal_handoff_state_t));
> +#endif
> +
> Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n",
> ap_wakeup_vector, cpu, sapicid);
>
> platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
> @@ -419,6 +439,10 @@
> cpu_clear(cpu, cpu_online_map); /* was set in
> smp_callin() */
> return -EINVAL;
> }
> +#ifdef CONFIG_HOTPLUG_CPU
> + list_add(&sal_handoff_state->list, &sal_handoff_list);
> + sal_handoff_state = NULL;
> +#endif
> return 0;
> }
>
> @@ -590,6 +614,15 @@
> if (cpu = 0)
> return -EBUSY;
>
> + /*
> + * Need a SAL state to restore
> + */
> + if (list_empty(&sal_handoff_list))
> + return -EBUSY;
> +
> + sal_handoff_state = list_entry(sal_handoff_list.next,
> + sal_handoff_state_t, list);
> + list_del(&sal_handoff_state->list);
> fixup_irqs();
> local_flush_tlb_all();
> printk ("Disabled cpu %u\n", smp_processor_id());
> @@ -604,12 +637,14 @@
> /* They ack this in play_dead by setting CPU_DEAD */
> if (per_cpu(cpu_state, cpu) = CPU_DEAD)
> {
> - /*
> - * TBD: Enable this when physical removal
> - * or when we put the processor is put in
> - * SAL_BOOT_RENDEZ mode
> - * cpu_clear(cpu, cpu_callin_map);
> - */
> + cpu_clear(cpu, cpu_callin_map);
> + if (sal_handoff_state) {
> + kfree(sal_handoff_state);
> + sal_handoff_state = NULL;
> + } else {
> + printk(KERN_ERR "CPU %u had no SAL
> handoff "
> + "info\n", cpu);
> + }
> return;
> }
> msleep(100);
> === include/asm-ia64/sal.h 1.27 vs edited ==> --- 1.27/include/asm-ia64/sal.h 2005-01-22 15:57:26 -07:00
> +++ edited/include/asm-ia64/sal.h 2005-02-09 09:32:06 -07:00
> @@ -640,6 +640,36 @@
> u8 oem_data_pad[1024];
> } ia64_err_rec_t;
>
> +/* Return to SAL state and info */
> +typedef struct sal_handoff_state {
> + u64 br[6]; /* restore 1-5, 0 is SAL entry point */
> + u64 rr[8];
> + u64 preds;
> + /* ARs */
> + u64 bspstore;
> + u64 rnat;
> + u64 unat;
> + u64 fpsr;
> + u64 pfs;
> + u64 lc;
> + /* CRs */
> + u64 dcr;
> + u64 iva;
> + u64 pta;
> + u64 itv;
> + u64 pmv;
> + u64 cmcv;
> + u64 lrr[2];
> + /* GRs */
> + u64 gp;
> + u64 sp;
> + u64 tp;
> + u64 gr[4];
> + /* FPs */
> + struct ia64_fpreg fp[20];
> + struct list_head list;
> +} sal_handoff_state_t;
> +
> /*
> * Now define a couple of inline functions for improved type checking
> * and convenience.
>
> -
> To unsubscribe from this list: send the line "unsubscribe linux-ia64"
> in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at [1]http://vger.kernel.org/majordomo-info.html
>
> References
>
> 1. http://vger.kernel.org/majordomo-info.html
--
Cheers,
Ashok Raj
- Linux OS & Technology Team
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
2005-02-09 17:53 ` Ashok Raj
@ 2005-02-09 18:19 ` Alex Williamson
2005-02-09 19:26 ` Ashok Raj
` (7 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Alex Williamson @ 2005-02-09 18:19 UTC (permalink / raw)
To: linux-ia64
Hi Ashok,
Sorry I missed your patch. Your assembly is certainly cleaner than
mine. We seem to have several differences in the state that actually
gets saved and restored though. For instance, I see you're saving k0,
which is listed as scratch in the spec, but none of the fp, predicate,
branch registers, region registers, or preserved general registers.
Shouldn't a few more of those be preserved under "standard calling
conventions"? Also, what do you think about treating the saved state as
a stack? This could eventually allow the BSP to be sent off spinning in
SAL. Thanks,
Alex
On Wed, 2005-02-09 at 09:53 -0800, Ashok Raj wrote:
> On Wed, Feb 09, 2005 at 09:40:28AM -0800, Alex Williamson wrote:
>
> Hi Alex
>
> In fact i did submit a patch for this about a month ago. I was sharing some
> code from mca side for tlb purge, and this code has been in the swamp for
> several weeks now. I hope they are settled now, and i will re submit my
> patches once again.
>
> link from old post
>
> http://marc.theaimsgroup.com/?l=linux-ia64&m\x110239954713260&w=2
>
> I will repost to match whats there in tony-'s test/release tree asap.
>
> ashok
> >
> > When a CPU is sent offline, it currently goes into a dummy spin
> > loop
> > and pretends to be gone. This patch returns the CPU back to SAL via
> > the
> > mechanism described in the SAL spec. The state of secondary CPUs is
> > saved off to a dynamically allocated stack for use on return to SAL.
> > I've munged the _start code in head.S to avoid trampling over some of
> > the preserved registers before we get a chance to save them. The
> > assembly could probably use some optimizations, but these are hardly
> > performance paths. It seems to work reliably on zx1 and sx1000 boxes,
> > but needs some exposure on others. Patch against current bk. Thanks,
> >
> > Alex
--
Alex Williamson HP Linux & Open Source Lab
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
2005-02-09 17:53 ` Ashok Raj
2005-02-09 18:19 ` Alex Williamson
@ 2005-02-09 19:26 ` Ashok Raj
2005-02-09 19:44 ` Alex Williamson
` (6 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Ashok Raj @ 2005-02-09 19:26 UTC (permalink / raw)
To: linux-ia64
On Wed, Feb 09, 2005 at 11:19:43AM -0700, Alex Williamson wrote:
> Hi Ashok,
>
> Sorry I missed your patch. Your assembly is certainly cleaner than
> mine. We seem to have several differences in the state that actually
> gets saved and restored though. For instance, I see you're saving k0,
Section 3.2.4 seemed to indicate that SAL versions executing IA32 BIOS code
would have the IA32 I/O PORT block. That prompted me to save that even though
it was listed as scratch in the following section. (Maybe just required for
BSP?)
> which is listed as scratch in the spec, but none of the fp, predicate,
> branch registers, region registers, or preserved general registers.
> Shouldn't a few more of those be preserved under "standard calling
True. When i was testing, i ran into an issue with restoring region registers
Dont remember quite what it was, that prompted me to not do it for
that round of testing. It didnt seem to affect anything and appeared to
work fine on the tiger4 systems.
That result may not be sufficient for other platforms, so yes, we must
save those others that i missed in the first round. Iam waiting for my
bk to syncup, i will post the revised patch on top of keith's mca fixes soon.
I noticed you are not preserving idle threads for re-use, there will be leaks
otherwise, since you will be re-creating new threads in __cpu_up() otherwise.
> conventions"? Also, what do you think about treating the saved state as
> a stack? This could eventually allow the BSP to be sent off spinning in
> SAL. Thanks,
Technically the same method should work for BSP as well, but since BSP ran
the bootloader, unless he saves it and exposes it in a standard way to OS
we cannot restore. (Agreed very UGLY)
Longer term, it would be ideal to have a SAL call, and hope SAL would preserve
anything necessary for this calling cpu, and so its not the responsibility
of OS to preserve every register state. THen we dont need to have a distinction
between AP/BSP as well.
>
> Alex
>
> On Wed, 2005-02-09 at 09:53 -0800, Ashok Raj wrote:
> > On Wed, Feb 09, 2005 at 09:40:28AM -0800, Alex Williamson wrote:
> >
> > Hi Alex
> >
> > In fact i did submit a patch for this about a month ago. I was sharing some
> > code from mca side for tlb purge, and this code has been in the swamp for
> > several weeks now. I hope they are settled now, and i will re submit my
> > patches once again.
> >
> > link from old post
> >
> > http://marc.theaimsgroup.com/?l=linux-ia64&m\x110239954713260&w=2
> >
> > I will repost to match whats there in tony-'s test/release tree asap.
> >
> > ashok
> > >
> > > When a CPU is sent offline, it currently goes into a dummy spin
> > > loop
> > > and pretends to be gone. This patch returns the CPU back to SAL via
> > > the
> > > mechanism described in the SAL spec. The state of secondary CPUs is
> > > saved off to a dynamically allocated stack for use on return to SAL.
> > > I've munged the _start code in head.S to avoid trampling over some of
> > > the preserved registers before we get a chance to save them. The
> > > assembly could probably use some optimizations, but these are hardly
> > > performance paths. It seems to work reliably on zx1 and sx1000 boxes,
> > > but needs some exposure on others. Patch against current bk. Thanks,
> > >
> > > Alex
>
> --
> Alex Williamson HP Linux & Open Source Lab
>
--
Cheers,
Ashok Raj
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
` (2 preceding siblings ...)
2005-02-09 19:26 ` Ashok Raj
@ 2005-02-09 19:44 ` Alex Williamson
2005-02-09 19:51 ` Luck, Tony
` (5 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Alex Williamson @ 2005-02-09 19:44 UTC (permalink / raw)
To: linux-ia64
On Wed, 2005-02-09 at 11:26 -0800, Ashok Raj wrote:
>
> Section 3.2.4 seemed to indicate that SAL versions executing IA32 BIOS code
> would have the IA32 I/O PORT block. That prompted me to save that even though
> it was listed as scratch in the following section. (Maybe just required for
> BSP?)
Yeah, I think we've already got all the information for I/O port
base, so it should only be an issue of SAL needing that. According to
the spec, it shouldn't.
> True. When i was testing, i ran into an issue with restoring region registers
> Dont remember quite what it was, that prompted me to not do it for
> that round of testing. It didnt seem to affect anything and appeared to
> work fine on the tiger4 systems.
FWIW, I had to add the srlz.d to prevent a RAW #RR warning. Perhaps
that was the issue.
> > Also, what do you think about treating the saved state as
> > a stack? This could eventually allow the BSP to be sent off spinning in
> > SAL. Thanks,
>
> Technically the same method should work for BSP as well, but since BSP ran
> the bootloader, unless he saves it and exposes it in a standard way to OS
> we cannot restore. (Agreed very UGLY)
But the BSP doesn't need to save anything. We'll always have N-1 SAL
states saved and N-1 CPUs that can be taken offline. As long as we
don't hard link a state to a specific CPU, we're in good shape. I've
been testing on my boxes with an order that intentionally gives CPUs the
state saved off of another CPU on OS entry. I appear to be able to make
the BSP return to SAL as well, but I don't think the rest of the hotplug
code is ready for this (the other CPU doesn't seem to be getting
scheduled). Thanks,
Alex
--
Alex Williamson HP Linux & Open Source Lab
^ permalink raw reply [flat|nested] 11+ messages in thread* RE: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
` (3 preceding siblings ...)
2005-02-09 19:44 ` Alex Williamson
@ 2005-02-09 19:51 ` Luck, Tony
2005-02-09 20:03 ` Alex Williamson
` (4 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Luck, Tony @ 2005-02-09 19:51 UTC (permalink / raw)
To: linux-ia64
> But the BSP doesn't need to save anything. We'll always have N-1 SAL
>states saved and N-1 CPUs that can be taken offline. As long as we
>don't hard link a state to a specific CPU, we're in good shape. I've
>been testing on my boxes with an order that intentionally gives CPUs the
>state saved off of another CPU on OS entry. I appear to be able to make
>the BSP return to SAL as well, but I don't think the rest of the hotplug
>code is ready for this (the other CPU doesn't seem to be getting
>scheduled).
That sounds worrying ... it assumes that the SAL thinks that
cpus are fungible, which might not be true on ccNUMA systems.
-Tony
^ permalink raw reply [flat|nested] 11+ messages in thread* RE: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
` (4 preceding siblings ...)
2005-02-09 19:51 ` Luck, Tony
@ 2005-02-09 20:03 ` Alex Williamson
2005-02-09 22:38 ` Ashok Raj
` (3 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Alex Williamson @ 2005-02-09 20:03 UTC (permalink / raw)
To: linux-ia64
On Wed, 2005-02-09 at 11:51 -0800, Luck, Tony wrote:
> > But the BSP doesn't need to save anything. We'll always have N-1 SAL
> >states saved and N-1 CPUs that can be taken offline. As long as we
> >don't hard link a state to a specific CPU, we're in good shape. I've
> >been testing on my boxes with an order that intentionally gives CPUs the
> >state saved off of another CPU on OS entry. I appear to be able to make
> >the BSP return to SAL as well, but I don't think the rest of the hotplug
> >code is ready for this (the other CPU doesn't seem to be getting
> >scheduled).
>
> That sounds worrying ... it assumes that the SAL thinks that
> cpus are fungible, which might not be true on ccNUMA systems.
Ok, I suppose it could be interpreted that Table 3-2 defining cr.lid
as unchanged locks a state to a specific CPU... too bad.
Alex
--
Alex Williamson HP Linux & Open Source Lab
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
` (5 preceding siblings ...)
2005-02-09 20:03 ` Alex Williamson
@ 2005-02-09 22:38 ` Ashok Raj
2005-02-09 23:04 ` Alex Williamson
` (2 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Ashok Raj @ 2005-02-09 22:38 UTC (permalink / raw)
To: linux-ia64
On Wed, Feb 09, 2005 at 11:19:43AM -0700, Alex Williamson wrote:
> Hi Ashok,
>
> Sorry I missed your patch. Your assembly is certainly cleaner than
> mine. We seem to have several differences in the state that actually
> gets saved and restored though. For instance, I see you're saving k0,
> which is listed as scratch in the spec, but none of the fp, predicate,
> branch registers, region registers, or preserved general registers.
> Shouldn't a few more of those be preserved under "standard calling
> conventions"? Also, what do you think about treating the saved state as
> a stack? This could eventually allow the BSP to be sent off spinning in
> SAL. Thanks,
>
> Alex
Hi Alex
attached is the newly diffed patches that will work on top of tony's
linux-2.6.11-release bk tree. It has Keith's fixes as well and i use the
TLB purge portions from what the mca code is already doing.
TBD: save/restore missing registers that are not saved today. I have tested
on tiger with good stress. (For stress testing, you will need the fix for
CONFIG_GENERIC_HARDIRQ which is still not in tony's release tree. Guess it
will get merged after 2.6.11 release, since iam changing some generic files)
Could you give it a try on your systems and give an udpate.
In the meantime, i will try to incorporate the missing pieces as another update
to this patch.
---
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
This patch is required to support cpu removal for IPF systems. Existing code
just fakes the real offline by keeping it run the idle thread, and polling
for the bit to re-appear in the cpu_state to get out of the idle loop.
For the cpu-offline to work correctly, we need to pass control of this CPU
back to SAL so it can continue in the boot-rendez mode. This gives the
SAL control to not pick this cpu as the monarch processor for global MCA
events, and addition does not wait for this cpu to checkin with SAL
for global MCA events as well. The handoff is implemented as documented in
SAL specification section 3.2.5.1 "OS_BOOT_RENDEZ to SAL return State"
Once the processor is in this state, the cpu can be woken up again by sending
another wakeup IPI.
echo 0 > /sys/devices/system/cpu/cpu3/online
The above command will attempt to put cpu offline will handoff cpu to SAL
echo 1 > /sys/devices/system/cpu/cpu3/online
We will now issue a ipi to wakeup the processor using the cpu_up()
Handling idle threads
Idle threads are created upon demand if one is not available for that logical
cpu number. If say a logical cpu 2 is removed, and a new cpu is inserted
the platform ACPI code handling CPU hotplug would find a new logical cpu
number to use. In which case if the number was cpu 2, the existing idle
thread is re-used.
Testing Done:
Only on tiger4:
Stable 24+hrs of repeated cpu online/offline of 3 processors in a tiger4 system
with ltpstress, make -j's running.
Early firmware does not work well when a processor is handed off to SAL, and then
injecting a recoverable MCA event, (atleast the tiger ones).
Without injecting MCA, the processors can be handed off to SAL and can be
brought back by another echo 1 to the appropriate online file.
---
release_work-araj/arch/ia64/kernel/head.S | 114 +++++++++++++++++++++++++++
release_work-araj/arch/ia64/kernel/mca_asm.S | 88 ++++++++++++--------
release_work-araj/arch/ia64/kernel/process.c | 22 +----
release_work-araj/arch/ia64/kernel/smpboot.c | 81 +++++++++++--------
release_work-araj/include/asm-ia64/sal.h | 31 +++++++
5 files changed, 256 insertions(+), 80 deletions(-)
diff -puN arch/ia64/kernel/head.S~nbrz_no_macro arch/ia64/kernel/head.S
--- release_work/arch/ia64/kernel/head.S~nbrz_no_macro 2005-02-09 13:35:47.122311635 -0800
+++ release_work-araj/arch/ia64/kernel/head.S 2005-02-09 13:35:47.151608510 -0800
@@ -15,6 +15,8 @@
* Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
* Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com>
* -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
+ * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
+ * Support for CPU Hotplug
*/
#include <linux/config.h>
@@ -29,6 +31,58 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/system.h>
+#include <asm/mca_asm.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define SAL_PSR_BITS_TO_SET \
+ (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
+
+#define SAVE_FROM_REG(src, ptr, dest) \
+ mov dest=src;; \
+ st8 [ptr]Þst,0x08
+
+#define RESTORE_REG(reg, ptr, _tmp) \
+ ld8 _tmp=[ptr],0x08;; \
+ mov reg=_tmp
+
+#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
+ mov ar.lc=IA64_NUM_DBG_REGS-1;; \
+ mov _idx=0;; \
+1: \
+ SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \
+ add _idx=1,_idx;; \
+ br.cloop.sptk.many 1b
+
+#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
+ mov ar.lc=IA64_NUM_DBG_REGS-1;; \
+ mov _idx=0;; \
+_lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \
+ add _idx=1, _idx;; \
+ br.cloop.sptk.many _lbl
+
+
+#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_reg3,_reg4) \
+ movl _reg2=sal_state_for_booting_cpu;; \
+ ld8 _reg1=[_reg2];; \
+ SAVE_FROM_REG(b0,_reg1,_reg2);; \
+ SAVE_FROM_REG(ar.k0,_reg1,_reg2);; \
+ st8 [_reg1]=r1,0x08;; \
+ st8 [_reg1]=r12,0x08;; \
+ st8 [_reg1]=r13,0x08;; \
+ SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \
+ SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \
+ SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);;
+#else
+#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2,a3,a4)
+#endif
.section __special_page_section,"ax"
@@ -142,6 +196,9 @@ start_ap:
;;
1: // now we are in virtual mode
+ SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r2,r3,r4,r5)
+ ;;
+
// set IVT entry point---can't access I/O ports without it
movl r3=ia64_ivt
;;
@@ -993,4 +1050,61 @@ END(ia64_spinlock_contention)
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+GLOBAL_ENTRY(ia64_jump_to_sal)
+ alloc r16=ar.pfs,1,0,0,0;;
+ rsm psr.i | psr.ic
+{
+ flushrs
+ srlz.i
+}
+ tpa r25=in0
+ movl r18=tlb_purge_done;;
+ DATA_VA_TO_PA(r18);;
+ mov b1=r18 // Return location
+ movl r18=ia64_do_tlb_purge;;
+ DATA_VA_TO_PA(r18);;
+ mov b2=r18 // doing tlb_flush work.
+ mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
+ movl r17\x1f;;
+ DATA_VA_TO_PA(r17);;
+ mov cr.iip=r17
+ movl r16=SAL_PSR_BITS_TO_SET;;
+ mov cr.ipsr=r16
+ mov cr.ifs=r0;;
+ rfi;;
+1:
+ RESTORE_REG(b0, r25, r17);;
+ RESTORE_REG(ar.k0, r25, r17);;
+ ld8 r1=[r25],0x08;;
+ ld8 r12=[r25],0x08;;
+ ld8 r13=[r25],0x08;;
+ RESTORE_REG(ar.fpsr, r25, r17);;
+ RESTORE_REG(ar.rnat, r25, r17);;
+ RESTORE_REG(ar.bspstore, r25, r17);;
+ RESTORE_REG(cr.dcr, r25, r17);;
+ RESTORE_REG(cr.iva, r25, r17);;
+ RESTORE_REG(cr.pta, r25, r17);;
+ RESTORE_REG(cr.itv, r25, r17);;
+ RESTORE_REG(cr.pmv, r25, r17);;
+ RESTORE_REG(cr.cmcv, r25, r17);;
+ RESTORE_REG(cr.lrr0, r25, r17);;
+ RESTORE_REG(cr.lrr1, r25, r17);;
+ /*
+ * Invalidate all TLB data/inst
+ */
+
+ br.sptk.many b2;; // jump to tlb purge code
+
+tlb_purge_done:
+ /*
+ * Now that we have done all the register restores
+ * we are now ready for the big DIVE to SAL Land
+ */
+ ssm psr.ic;;
+ srlz.d;;
+ br.ret.sptk.many b0;;
+END(ia64_jump_to_sal)
+#endif /* CONFIG_HOTPLUG_CPU */
+
#endif /* CONFIG_SMP */
diff -puN arch/ia64/kernel/smpboot.c~nbrz_no_macro arch/ia64/kernel/smpboot.c
--- release_work/arch/ia64/kernel/smpboot.c~nbrz_no_macro 2005-02-09 13:35:47.126217885 -0800
+++ release_work-araj/arch/ia64/kernel/smpboot.c 2005-02-09 13:35:47.153561635 -0800
@@ -9,6 +9,7 @@
* 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
* smp_boot_cpus()/smp_commence() is replaced by
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
+ * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
*/
#include <linux/config.h>
@@ -58,6 +59,37 @@
#define Dprintk(x...)
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Store all idle threads, this can be reused instead of creating
+ * a new thread. Also avoids complicated thread destroy functionality
+ * for idle threads.
+ */
+struct task_struct *idle_thread_array[NR_CPUS];
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+/*
+ * start_ap in head.S uses this to store current booting cpu
+ * info.
+ */
+struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
+
+#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
+
+#define get_idle_for_cpu(x) (idle_thread_array[(x)])
+#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
+
+#else
+
+#define get_idle_for_cpu(x) (NULL)
+#define set_idle_for_cpu(x,p)
+#define set_brendez_area(x)
+#endif
+
/*
* ITC synchronization related stuff:
@@ -345,7 +377,6 @@ start_secondary (void *unused)
{
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
-
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
efi_map_pal_code();
cpu_init();
@@ -384,6 +415,13 @@ do_boot_cpu (int sapicid, int cpu)
.done = COMPLETION_INITIALIZER(c_idle.done),
};
DECLARE_WORK(work, do_fork_idle, &c_idle);
+
+ c_idle.idle = get_idle_for_cpu(cpu);
+ if (c_idle.idle) {
+ init_idle(c_idle.idle, cpu);
+ goto do_rest;
+ }
+
/*
* We can't use kernel_thread since we must avoid to reschedule the child.
*/
@@ -396,10 +434,15 @@ do_boot_cpu (int sapicid, int cpu)
if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu);
+
+ set_idle_for_cpu(cpu, c_idle.idle);
+
+do_rest:
task_for_booting_cpu = c_idle.idle;
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
+ set_brendez_area(cpu);
platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
/*
@@ -570,16 +613,6 @@ void __devinit smp_prepare_boot_cpu(void
#ifdef CONFIG_HOTPLUG_CPU
extern void fixup_irqs(void);
/* must be called with cpucontrol mutex held */
-static int __devinit cpu_enable(unsigned int cpu)
-{
- per_cpu(cpu_state,cpu) = CPU_UP_PREPARE;
- wmb();
-
- while (!cpu_online(cpu))
- cpu_relax();
- return 0;
-}
-
int __cpu_disable(void)
{
int cpu = smp_processor_id();
@@ -592,7 +625,7 @@ int __cpu_disable(void)
fixup_irqs();
local_flush_tlb_all();
- printk ("Disabled cpu %u\n", smp_processor_id());
+ cpu_clear(cpu, cpu_callin_map);
return 0;
}
@@ -604,12 +637,7 @@ void __cpu_die(unsigned int cpu)
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) = CPU_DEAD)
{
- /*
- * TBD: Enable this when physical removal
- * or when we put the processor is put in
- * SAL_BOOT_RENDEZ mode
- * cpu_clear(cpu, cpu_callin_map);
- */
+ printk ("CPU %d is now offline\n", cpu);
return;
}
msleep(100);
@@ -617,11 +645,6 @@ void __cpu_die(unsigned int cpu)
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
#else /* !CONFIG_HOTPLUG_CPU */
-static int __devinit cpu_enable(unsigned int cpu)
-{
- return 0;
-}
-
int __cpu_disable(void)
{
return -ENOSYS;
@@ -663,16 +686,12 @@ __cpu_up (unsigned int cpu)
return -EINVAL;
/*
- * Already booted.. just enable and get outa idle lool
+ * Already booted cpu? not valid anymore since we dont
+ * do idle loop tightspin anymore.
*/
if (cpu_isset(cpu, cpu_callin_map))
- {
- cpu_enable(cpu);
- local_irq_enable();
- while (!cpu_isset(cpu, cpu_online_map))
- mb();
- return 0;
- }
+ return -EINVAL;
+
/* Processor goes to start_secondary(), sets online flag */
ret = do_boot_cpu(sapicid, cpu);
if (ret < 0)
diff -puN arch/ia64/kernel/process.c~nbrz_no_macro arch/ia64/kernel/process.c
--- release_work/arch/ia64/kernel/process.c~nbrz_no_macro 2005-02-09 13:35:47.131100698 -0800
+++ release_work-araj/arch/ia64/kernel/process.c 2005-02-09 13:35:47.154538197 -0800
@@ -3,6 +3,7 @@
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
+ * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
*/
#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
#include <linux/config.h>
@@ -200,27 +201,20 @@ default_idle (void)
static inline void play_dead(void)
{
extern void ia64_cpu_local_tick (void);
+ unsigned int this_cpu = smp_processor_id();
+
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
- /* We shouldn't have to disable interrupts while dead, but
- * some interrupts just don't seem to go away, and this makes
- * it "work" for testing purposes. */
max_xtp();
local_irq_disable();
- /* Death loop */
- while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
- cpu_relax();
-
+ idle_task_exit();
+ ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
/*
- * Enable timer interrupts from now on
- * Not required if we put processor in SAL_BOOT_RENDEZ mode.
+ * The above is a point of no-return, the processor is
+ * expected to be in SAL loop now.
*/
- local_flush_tlb_all();
- cpu_set(smp_processor_id(), cpu_online_map);
- wmb();
- ia64_cpu_local_tick ();
- local_irq_enable();
+ BUG();
}
#else
static inline void play_dead(void)
diff -puN arch/ia64/kernel/mca_asm.S~nbrz_no_macro arch/ia64/kernel/mca_asm.S
--- release_work/arch/ia64/kernel/mca_asm.S~nbrz_no_macro 2005-02-09 13:35:47.135983510 -0800
+++ release_work-araj/arch/ia64/kernel/mca_asm.S 2005-02-09 13:35:47.155514760 -0800
@@ -110,46 +110,19 @@
.global ia64_os_mca_dispatch_end
.global ia64_sal_to_os_handoff_state
.global ia64_os_to_sal_handoff_state
+ .global ia64_do_tlb_purge
.text
.align 16
-ia64_os_mca_dispatch:
-
- // Serialize all MCA processing
- mov r3=1;;
- LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
-ia64_os_mca_spin:
- xchg8 r4=[r2],r3;;
- cmp.ne p6,p0=r4,r0
-(p6) br ia64_os_mca_spin
-
- // Save the SAL to OS MCA handoff state as defined
- // by SAL SPEC 3.0
- // NOTE : The order in which the state gets saved
- // is dependent on the way the C-structure
- // for ia64_mca_sal_to_os_state_t has been
- // defined in include/asm/mca.h
- SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
- ;;
-
- // LOG PROCESSOR STATE INFO FROM HERE ON..
-begin_os_mca_dump:
- br ia64_os_mca_proc_state_dump;;
-
-ia64_os_mca_done_dump:
-
- LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
- ;;
- ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
- ;;
- tbit.nz p6,p7=r18,60
-(p7) br.spnt done_tlb_purge_and_reload
-
- // The following code purges TC and TR entries. Then reload all TC entries.
- // Purge percpu data TC entries.
-begin_tlb_purge_and_reload:
+/*
+ * Just the TLB purge part is moved to a separate function
+ * so we can re-use the code for cpu hotplug code as well
+ * Caller should now setup b1, so we can branch once the
+ * tlb flush is complete.
+ */
+ia64_do_tlb_purge:
#define O(member) IA64_CPUINFO_##member##_OFFSET
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
@@ -230,6 +203,51 @@ begin_tlb_purge_and_reload:
;;
srlz.i
;;
+ // Now branch away to caller.
+ br.sptk.many b1
+ ;;
+
+ia64_os_mca_dispatch:
+
+ // Serialize all MCA processing
+ mov r3=1;;
+ LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
+ia64_os_mca_spin:
+ xchg8 r4=[r2],r3;;
+ cmp.ne p6,p0=r4,r0
+(p6) br ia64_os_mca_spin
+
+ // Save the SAL to OS MCA handoff state as defined
+ // by SAL SPEC 3.0
+ // NOTE : The order in which the state gets saved
+ // is dependent on the way the C-structure
+ // for ia64_mca_sal_to_os_state_t has been
+ // defined in include/asm/mca.h
+ SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
+ ;;
+
+ // LOG PROCESSOR STATE INFO FROM HERE ON..
+begin_os_mca_dump:
+ br ia64_os_mca_proc_state_dump;;
+
+ia64_os_mca_done_dump:
+
+ LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
+ ;;
+ ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
+ ;;
+ tbit.nz p6,p7=r18,60
+(p7) br.spnt done_tlb_purge_and_reload
+
+ // The following code purges TC and TR entries. Then reload all TC entries.
+ // Purge percpu data TC entries.
+begin_tlb_purge_and_reload:
+ movl r18=ia64_reload_tr;;
+ LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
+ mov b1=r18;;
+ br.sptk.many ia64_do_tlb_purge;;
+
+ia64_reload_tr:
// Finally reload the TR registers.
// 1. Reload DTR/ITR registers for kernel.
mov r18=KERNEL_TR_PAGE_SHIFT<<2
diff -puN include/asm-ia64/sal.h~nbrz_no_macro include/asm-ia64/sal.h
--- release_work/include/asm-ia64/sal.h~nbrz_no_macro 2005-02-09 13:35:47.139889760 -0800
+++ release_work-araj/include/asm-ia64/sal.h 2005-02-09 13:35:47.156491322 -0800
@@ -832,6 +832,37 @@ extern int ia64_sal_oemcall_nolock(struc
u64, u64, u64, u64, u64);
extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * System Abstraction Layer Specification
+ * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
+ */
+struct sal_to_os_boot {
+ u64 br0; /* return addr into SAL boot rendez routine */
+ u64 k0;
+ u64 gr1; /* SAL:GP */
+ u64 gr12; /* SAL:SP */
+ u64 gr13; /* SAL: Task Pointer */
+ u64 fpsr;
+ u64 rnat;
+ u64 bspstore;
+ u64 dcr; /* Default Control Register */
+ u64 iva;
+ u64 pta;
+ u64 itv;
+ u64 pmv;
+ u64 cmcv;
+ u64 lrr0;
+ u64 lrr1;
+};
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+extern void ia64_jump_to_sal(struct sal_to_os_boot *);
+#endif
extern void ia64_sal_handler_init(void *entry_point, void *gpval);
_
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
` (6 preceding siblings ...)
2005-02-09 22:38 ` Ashok Raj
@ 2005-02-09 23:04 ` Alex Williamson
2005-02-09 23:12 ` Ashok Raj
2005-02-11 21:38 ` Ashok Raj
9 siblings, 0 replies; 11+ messages in thread
From: Alex Williamson @ 2005-02-09 23:04 UTC (permalink / raw)
To: linux-ia64
On Wed, 2005-02-09 at 14:38 -0800, Ashok Raj wrote:
> Could you give it a try on your systems and give an udpate.
>
> In the meantime, i will try to incorporate the missing pieces as another update
> to this patch.
Ashok,
I'm running it in a loop (not under load) on both a zx1 and an sx1000
based system now. Looks good on both. We should definitely try to be
architecturally complete on the registers that get preserved, but we
don't seem to have any obvious dependency on the missing ones. Thanks,
Alex
--
Alex Williamson HP Linux & Open Source Lab
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
` (7 preceding siblings ...)
2005-02-09 23:04 ` Alex Williamson
@ 2005-02-09 23:12 ` Ashok Raj
2005-02-11 21:38 ` Ashok Raj
9 siblings, 0 replies; 11+ messages in thread
From: Ashok Raj @ 2005-02-09 23:12 UTC (permalink / raw)
To: linux-ia64
On Wed, Feb 09, 2005 at 04:04:14PM -0700, Alex Williamson wrote:
> On Wed, 2005-02-09 at 14:38 -0800, Ashok Raj wrote:
>
> > Could you give it a try on your systems and give an udpate.
> >
> > In the meantime, i will try to incorporate the missing pieces as another update
> > to this patch.
>
> Ashok,
>
> I'm running it in a loop (not under load) on both a zx1 and an sx1000
> based system now. Looks good on both. We should definitely try to be
> architecturally complete on the registers that get preserved, but we
> don't seem to have any obvious dependency on the missing ones. Thanks,
>
i will work on the missing registers save/restore, but iam glad its not
breaking anything.
Thanks a ton!
--
Cheers,
Ashok Raj
- Linux OS & Technology Team
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH] CPU hotplug returns CPUs to SAL
2005-02-09 17:40 [PATCH] CPU hotplug returns CPUs to SAL Alex Williamson
` (8 preceding siblings ...)
2005-02-09 23:12 ` Ashok Raj
@ 2005-02-11 21:38 ` Ashok Raj
9 siblings, 0 replies; 11+ messages in thread
From: Ashok Raj @ 2005-02-11 21:38 UTC (permalink / raw)
To: linux-ia64
Hi Tony
Here are updated patches for CPU SAL handoff. I have updated the patch
i sent earlier to account for some of the registers i missed
saving.
It should apply to the 2.6.11 release bk tree.
Thanks to Alex Williamson (HP) for offering to test the patches on HP
systems. It appears to run without any regressions so far on tiger4/zx1/zx1000
systems.
We have not yet tested them on other systems like sn2 yet. It would be great
if we can get some coverage there.
---
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
This patch is required to support cpu removal for IPF systems. Existing code
just fakes the real offline by keeping it run the idle thread, and polling
for the bit to re-appear in the cpu_state to get out of the idle loop.
For the cpu-offline to work correctly, we need to pass control of this CPU
back to SAL so it can continue in the boot-rendez mode. This gives the
SAL control to not pick this cpu as the monarch processor for global MCA
events, and addition does not wait for this cpu to checkin with SAL
for global MCA events as well. The handoff is implemented as documented in
SAL specification section 3.2.5.1 "OS_BOOT_RENDEZ to SAL return State"
Once the processor is in this state, the cpu can be woken up again by sending
another wakeup IPI.
echo 0 > /sys/devices/system/cpu/cpu3/online
The above command will attempt to put cpu offline will handoff cpu to SAL
echo 1 > /sys/devices/system/cpu/cpu3/online
We will now issue a ipi to wakeup the processor using the cpu_up()
Handling idle threads
Idle threads are created upon demand if one is not available for that logical
cpu number. If say a logical cpu 2 is removed, and a new cpu is inserted
the platform ACPI code handling CPU hotplug would find a new logical cpu
number to use. In which case if the number was cpu 2, the existing idle
thread is re-used.
Testing Done:
Only on tiger4 for stress.
Also tested by Alex Williamson (HP) on ZX1 and ZX1000.
Stable 24+hrs of repeated cpu online/offline of 3 processors in a
tiger4 system with ltpstress, make -j's running.
Early firmware does not work well when a processor is handed off to SAL,
and then injecting a recoverable MCA event, (atleast the tiger ones).
The processors can be handed off to SAL and can be
brought back by another echo 1 to the appropriate online file.
---
release_work-araj/arch/ia64/kernel/head.S | 280 ++++++++++++++++++++++++---
release_work-araj/arch/ia64/kernel/mca_asm.S | 88 +++++---
release_work-araj/arch/ia64/kernel/process.c | 22 --
release_work-araj/arch/ia64/kernel/smpboot.c | 81 ++++---
release_work-araj/include/asm-ia64/sal.h | 38 +++
5 files changed, 399 insertions(+), 110 deletions(-)
diff -puN arch/ia64/kernel/head.S~nbrz_no_macro arch/ia64/kernel/head.S
--- release_work/arch/ia64/kernel/head.S~nbrz_no_macro 2005-02-09 13:35:47.000000000 -0800
+++ release_work-araj/arch/ia64/kernel/head.S 2005-02-11 11:24:36.161306879 -0800
@@ -15,6 +15,8 @@
* Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
* Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com>
* -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2.
+ * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com>
+ * Support for CPU Hotplug
*/
#include <linux/config.h>
@@ -29,6 +31,134 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/system.h>
+#include <asm/mca_asm.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define SAL_PSR_BITS_TO_SET \
+ (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL)
+
+#define SAVE_FROM_REG(src, ptr, dest) \
+ mov dest=src;; \
+ st8 [ptr]Þst,0x08
+
+#define RESTORE_REG(reg, ptr, _tmp) \
+ ld8 _tmp=[ptr],0x08;; \
+ mov reg=_tmp
+
+#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\
+ mov ar.lc=IA64_NUM_DBG_REGS-1;; \
+ mov _idx=0;; \
+1: \
+ SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \
+ add _idx=1,_idx;; \
+ br.cloop.sptk.many 1b
+
+#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\
+ mov ar.lc=IA64_NUM_DBG_REGS-1;; \
+ mov _idx=0;; \
+_lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \
+ add _idx=1, _idx;; \
+ br.cloop.sptk.many _lbl
+
+#define SAVE_ONE_RR(num, _reg, _tmp) \
+ movl _tmp=(num<<61);; \
+ mov _reg=rr[_tmp]
+
+#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
+ SAVE_ONE_RR(0,_r0, _tmp);; \
+ SAVE_ONE_RR(1,_r1, _tmp);; \
+ SAVE_ONE_RR(2,_r2, _tmp);; \
+ SAVE_ONE_RR(3,_r3, _tmp);; \
+ SAVE_ONE_RR(4,_r4, _tmp);; \
+ SAVE_ONE_RR(5,_r5, _tmp);; \
+ SAVE_ONE_RR(6,_r6, _tmp);; \
+ SAVE_ONE_RR(7,_r7, _tmp);;
+
+#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \
+ st8 [ptr]=_r0, 8;; \
+ st8 [ptr]=_r1, 8;; \
+ st8 [ptr]=_r2, 8;; \
+ st8 [ptr]=_r3, 8;; \
+ st8 [ptr]=_r4, 8;; \
+ st8 [ptr]=_r5, 8;; \
+ st8 [ptr]=_r6, 8;; \
+ st8 [ptr]=_r7, 8;;
+
+#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \
+ mov ar.lc=0x08-1;; \
+ movl _idx1=0x00;; \
+RestRR: \
+ dep.z _idx2=_idx1,61,3;; \
+ ld8 _tmp=[ptr],8;; \
+ mov rr[_idx2]=_tmp;; \
+ srlz.d;; \
+ add _idx1=1,_idx1;; \
+ br.cloop.sptk.few RestRR
+
+/*
+ * Adjust region registers saved before starting to save
+ * break regs and rest of the states that need to be preserved.
+ */
+#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \
+ SAVE_FROM_REG(b0,_reg1,_reg2);; \
+ SAVE_FROM_REG(b1,_reg1,_reg2);; \
+ SAVE_FROM_REG(b2,_reg1,_reg2);; \
+ SAVE_FROM_REG(b3,_reg1,_reg2);; \
+ SAVE_FROM_REG(b4,_reg1,_reg2);; \
+ SAVE_FROM_REG(b5,_reg1,_reg2);; \
+ st8 [_reg1]=r1,0x08;; \
+ st8 [_reg1]=r12,0x08;; \
+ st8 [_reg1]=r13,0x08;; \
+ SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \
+ SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \
+ SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \
+ SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \
+ SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \
+ SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \
+ st8 [_reg1]=r4,0x08;; \
+ st8 [_reg1]=r5,0x08;; \
+ st8 [_reg1]=r6,0x08;; \
+ st8 [_reg1]=r7,0x08;; \
+ st8 [_reg1]=_pred,0x08;; \
+ SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \
+ stf.spill.nta [_reg1]ò,16;; \
+ stf.spill.nta [_reg1]ó,16;; \
+ stf.spill.nta [_reg1]ô,16;; \
+ stf.spill.nta [_reg1]õ,16;; \
+ stf.spill.nta [_reg1]ñ6,16;; \
+ stf.spill.nta [_reg1]ñ7,16;; \
+ stf.spill.nta [_reg1]ñ8,16;; \
+ stf.spill.nta [_reg1]ñ9,16;; \
+ stf.spill.nta [_reg1]ò0,16;; \
+ stf.spill.nta [_reg1]ò1,16;; \
+ stf.spill.nta [_reg1]ò2,16;; \
+ stf.spill.nta [_reg1]ò3,16;; \
+ stf.spill.nta [_reg1]ò4,16;; \
+ stf.spill.nta [_reg1]ò5,16;; \
+ stf.spill.nta [_reg1]ò6,16;; \
+ stf.spill.nta [_reg1]ò7,16;; \
+ stf.spill.nta [_reg1]ò8,16;; \
+ stf.spill.nta [_reg1]ò9,16;; \
+ stf.spill.nta [_reg1]ó0,16;; \
+ stf.spill.nta [_reg1]ó1,16;;
+
+#else
+#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2)
+#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
+#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7)
+#endif
+
+#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \
+ movl _tmp1=(num << 61);; \
+ mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
+ mov rr[_tmp1]=_tmp2
.section __special_page_section,"ax"
@@ -64,6 +194,12 @@ start_ap:
srlz.i
;;
/*
+ * Save the region registers, predicate before they get clobbered
+ */
+ SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15);
+ mov r25=pr;;
+
+ /*
* Initialize kernel region registers:
* rr[0]: VHPT enabled, page size = PAGE_SHIFT
* rr[1]: VHPT enabled, page size = PAGE_SHIFT
@@ -76,32 +212,14 @@ start_ap:
* We initialize all of them to prevent inadvertently assuming
* something about the state of address translation early in boot.
*/
- mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
- movl r7=(0<<61)
- mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
- movl r9=(1<<61)
- mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
- movl r11=(2<<61)
- mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
- movl r13=(3<<61)
- mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
- movl r15=(4<<61)
- mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
- movl r17=(5<<61)
- mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
- movl r19=(6<<61)
- mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
- movl r21=(7<<61)
- ;;
- mov rr[r7]=r6
- mov rr[r9]=r8
- mov rr[r11]=r10
- mov rr[r13]=r12
- mov rr[r15]=r14
- mov rr[r17]=r16
- mov rr[r19]=r18
- mov rr[r21]=r20
- ;;
+ SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);;
+ SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);;
+ SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);;
+ SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);;
+ SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);;
+ SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);;
+ SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);;
+ SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);;
/*
* Now pin mappings into the TLB for kernel text and data
*/
@@ -142,6 +260,13 @@ start_ap:
;;
1: // now we are in virtual mode
+ movl r2=sal_state_for_booting_cpu;;
+ ld8 r16=[r2];;
+
+ STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15);
+ SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25)
+ ;;
+
// set IVT entry point---can't access I/O ports without it
movl r3=ia64_ivt
;;
@@ -211,12 +336,13 @@ start_ap:
mov IA64_KR(CURRENT_STACK)=r16
mov r13=r2
/*
- * Reserve space at the top of the stack for "struct pt_regs". Kernel threads
- * don't store interesting values in that structure, but the space still needs
- * to be there because time-critical stuff such as the context switching can
- * be implemented more efficiently (for example, __switch_to()
+ * Reserve space at the top of the stack for "struct pt_regs". Kernel
+ * threads don't store interesting values in that structure, but the space
+ * still needs to be there because time-critical stuff such as the context
+ * switching can be implemented more efficiently (for example, __switch_to()
* always sets the psr.dfh bit of the task it is switching to).
*/
+
addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2
addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE
mov ar.rsc=0 // place RSE in enforced lazy mode
@@ -993,4 +1119,98 @@ END(ia64_spinlock_contention)
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+GLOBAL_ENTRY(ia64_jump_to_sal)
+ alloc r16=ar.pfs,1,0,0,0;;
+ rsm psr.i | psr.ic
+{
+ flushrs
+ srlz.i
+}
+ tpa r25=in0
+ movl r18=tlb_purge_done;;
+ DATA_VA_TO_PA(r18);;
+ mov b1=r18 // Return location
+ movl r18=ia64_do_tlb_purge;;
+ DATA_VA_TO_PA(r18);;
+ mov b2=r18 // doing tlb_flush work
+ mov ar.rsc=0 // Put RSE in enforced lazy, LE mode
+ movl r17\x1f;;
+ DATA_VA_TO_PA(r17);;
+ mov cr.iip=r17
+ movl r16=SAL_PSR_BITS_TO_SET;;
+ mov cr.ipsr=r16
+ mov cr.ifs=r0;;
+ rfi;;
+1:
+ /*
+ * Invalidate all TLB data/inst
+ */
+ br.sptk.many b2;; // jump to tlb purge code
+
+tlb_purge_done:
+ RESTORE_REGION_REGS(r25, r17,r18,r19);;
+ RESTORE_REG(b0, r25, r17);;
+ RESTORE_REG(b1, r25, r17);;
+ RESTORE_REG(b2, r25, r17);;
+ RESTORE_REG(b3, r25, r17);;
+ RESTORE_REG(b4, r25, r17);;
+ RESTORE_REG(b5, r25, r17);;
+ ld8 r1=[r25],0x08;;
+ ld8 r12=[r25],0x08;;
+ ld8 r13=[r25],0x08;;
+ RESTORE_REG(ar.fpsr, r25, r17);;
+ RESTORE_REG(ar.pfs, r25, r17);;
+ RESTORE_REG(ar.rnat, r25, r17);;
+ RESTORE_REG(ar.unat, r25, r17);;
+ RESTORE_REG(ar.bspstore, r25, r17);;
+ RESTORE_REG(cr.dcr, r25, r17);;
+ RESTORE_REG(cr.iva, r25, r17);;
+ RESTORE_REG(cr.pta, r25, r17);;
+ RESTORE_REG(cr.itv, r25, r17);;
+ RESTORE_REG(cr.pmv, r25, r17);;
+ RESTORE_REG(cr.cmcv, r25, r17);;
+ RESTORE_REG(cr.lrr0, r25, r17);;
+ RESTORE_REG(cr.lrr1, r25, r17);;
+ ld8 r4=[r25],0x08;;
+ ld8 r5=[r25],0x08;;
+ ld8 r6=[r25],0x08;;
+ ld8 r7=[r25],0x08;;
+ ld8 r17=[r25],0x08;;
+ mov pr=r17,-1;;
+ RESTORE_REG(ar.lc, r25, r17);;
+ /*
+ * Now Restore floating point regs
+ */
+ ldf.fill.nta f2=[r25],16;;
+ ldf.fill.nta f3=[r25],16;;
+ ldf.fill.nta f4=[r25],16;;
+ ldf.fill.nta f5=[r25],16;;
+ ldf.fill.nta f16=[r25],16;;
+ ldf.fill.nta f17=[r25],16;;
+ ldf.fill.nta f18=[r25],16;;
+ ldf.fill.nta f19=[r25],16;;
+ ldf.fill.nta f20=[r25],16;;
+ ldf.fill.nta f21=[r25],16;;
+ ldf.fill.nta f22=[r25],16;;
+ ldf.fill.nta f23=[r25],16;;
+ ldf.fill.nta f24=[r25],16;;
+ ldf.fill.nta f25=[r25],16;;
+ ldf.fill.nta f26=[r25],16;;
+ ldf.fill.nta f27=[r25],16;;
+ ldf.fill.nta f28=[r25],16;;
+ ldf.fill.nta f29=[r25],16;;
+ ldf.fill.nta f30=[r25],16;;
+ ldf.fill.nta f31=[r25],16;;
+
+ /*
+ * Now that we have done all the register restores
+ * we are now ready for the big DIVE to SAL Land
+ */
+ ssm psr.ic;;
+ srlz.d;;
+ br.ret.sptk.many b0;;
+END(ia64_jump_to_sal)
+#endif /* CONFIG_HOTPLUG_CPU */
+
#endif /* CONFIG_SMP */
diff -puN arch/ia64/kernel/smpboot.c~nbrz_no_macro arch/ia64/kernel/smpboot.c
--- release_work/arch/ia64/kernel/smpboot.c~nbrz_no_macro 2005-02-09 13:35:47.000000000 -0800
+++ release_work-araj/arch/ia64/kernel/smpboot.c 2005-02-11 11:21:33.965020049 -0800
@@ -9,6 +9,7 @@
* 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
* smp_boot_cpus()/smp_commence() is replaced by
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
+ * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
*/
#include <linux/config.h>
@@ -58,6 +59,37 @@
#define Dprintk(x...)
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Store all idle threads, this can be reused instead of creating
+ * a new thread. Also avoids complicated thread destroy functionality
+ * for idle threads.
+ */
+struct task_struct *idle_thread_array[NR_CPUS];
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+/*
+ * start_ap in head.S uses this to store current booting cpu
+ * info.
+ */
+struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
+
+#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
+
+#define get_idle_for_cpu(x) (idle_thread_array[(x)])
+#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
+
+#else
+
+#define get_idle_for_cpu(x) (NULL)
+#define set_idle_for_cpu(x,p)
+#define set_brendez_area(x)
+#endif
+
/*
* ITC synchronization related stuff:
@@ -345,7 +377,6 @@ start_secondary (void *unused)
{
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
-
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
efi_map_pal_code();
cpu_init();
@@ -384,6 +415,13 @@ do_boot_cpu (int sapicid, int cpu)
.done = COMPLETION_INITIALIZER(c_idle.done),
};
DECLARE_WORK(work, do_fork_idle, &c_idle);
+
+ c_idle.idle = get_idle_for_cpu(cpu);
+ if (c_idle.idle) {
+ init_idle(c_idle.idle, cpu);
+ goto do_rest;
+ }
+
/*
* We can't use kernel_thread since we must avoid to reschedule the child.
*/
@@ -396,10 +434,15 @@ do_boot_cpu (int sapicid, int cpu)
if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu);
+
+ set_idle_for_cpu(cpu, c_idle.idle);
+
+do_rest:
task_for_booting_cpu = c_idle.idle;
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
+ set_brendez_area(cpu);
platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
/*
@@ -570,16 +613,6 @@ void __devinit smp_prepare_boot_cpu(void
#ifdef CONFIG_HOTPLUG_CPU
extern void fixup_irqs(void);
/* must be called with cpucontrol mutex held */
-static int __devinit cpu_enable(unsigned int cpu)
-{
- per_cpu(cpu_state,cpu) = CPU_UP_PREPARE;
- wmb();
-
- while (!cpu_online(cpu))
- cpu_relax();
- return 0;
-}
-
int __cpu_disable(void)
{
int cpu = smp_processor_id();
@@ -592,7 +625,7 @@ int __cpu_disable(void)
fixup_irqs();
local_flush_tlb_all();
- printk ("Disabled cpu %u\n", smp_processor_id());
+ cpu_clear(cpu, cpu_callin_map);
return 0;
}
@@ -604,12 +637,7 @@ void __cpu_die(unsigned int cpu)
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) = CPU_DEAD)
{
- /*
- * TBD: Enable this when physical removal
- * or when we put the processor is put in
- * SAL_BOOT_RENDEZ mode
- * cpu_clear(cpu, cpu_callin_map);
- */
+ printk ("CPU %d is now offline\n", cpu);
return;
}
msleep(100);
@@ -617,11 +645,6 @@ void __cpu_die(unsigned int cpu)
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
#else /* !CONFIG_HOTPLUG_CPU */
-static int __devinit cpu_enable(unsigned int cpu)
-{
- return 0;
-}
-
int __cpu_disable(void)
{
return -ENOSYS;
@@ -663,16 +686,12 @@ __cpu_up (unsigned int cpu)
return -EINVAL;
/*
- * Already booted.. just enable and get outa idle lool
+ * Already booted cpu? not valid anymore since we dont
+ * do idle loop tightspin anymore.
*/
if (cpu_isset(cpu, cpu_callin_map))
- {
- cpu_enable(cpu);
- local_irq_enable();
- while (!cpu_isset(cpu, cpu_online_map))
- mb();
- return 0;
- }
+ return -EINVAL;
+
/* Processor goes to start_secondary(), sets online flag */
ret = do_boot_cpu(sapicid, cpu);
if (ret < 0)
diff -puN arch/ia64/kernel/process.c~nbrz_no_macro arch/ia64/kernel/process.c
--- release_work/arch/ia64/kernel/process.c~nbrz_no_macro 2005-02-09 13:35:47.000000000 -0800
+++ release_work-araj/arch/ia64/kernel/process.c 2005-02-09 13:35:47.154538197 -0800
@@ -3,6 +3,7 @@
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
+ * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
*/
#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
#include <linux/config.h>
@@ -200,27 +201,20 @@ default_idle (void)
static inline void play_dead(void)
{
extern void ia64_cpu_local_tick (void);
+ unsigned int this_cpu = smp_processor_id();
+
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
- /* We shouldn't have to disable interrupts while dead, but
- * some interrupts just don't seem to go away, and this makes
- * it "work" for testing purposes. */
max_xtp();
local_irq_disable();
- /* Death loop */
- while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
- cpu_relax();
-
+ idle_task_exit();
+ ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
/*
- * Enable timer interrupts from now on
- * Not required if we put processor in SAL_BOOT_RENDEZ mode.
+ * The above is a point of no-return, the processor is
+ * expected to be in SAL loop now.
*/
- local_flush_tlb_all();
- cpu_set(smp_processor_id(), cpu_online_map);
- wmb();
- ia64_cpu_local_tick ();
- local_irq_enable();
+ BUG();
}
#else
static inline void play_dead(void)
diff -puN arch/ia64/kernel/mca_asm.S~nbrz_no_macro arch/ia64/kernel/mca_asm.S
--- release_work/arch/ia64/kernel/mca_asm.S~nbrz_no_macro 2005-02-09 13:35:47.000000000 -0800
+++ release_work-araj/arch/ia64/kernel/mca_asm.S 2005-02-09 13:35:47.155514760 -0800
@@ -110,46 +110,19 @@
.global ia64_os_mca_dispatch_end
.global ia64_sal_to_os_handoff_state
.global ia64_os_to_sal_handoff_state
+ .global ia64_do_tlb_purge
.text
.align 16
-ia64_os_mca_dispatch:
-
- // Serialize all MCA processing
- mov r3=1;;
- LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
-ia64_os_mca_spin:
- xchg8 r4=[r2],r3;;
- cmp.ne p6,p0=r4,r0
-(p6) br ia64_os_mca_spin
-
- // Save the SAL to OS MCA handoff state as defined
- // by SAL SPEC 3.0
- // NOTE : The order in which the state gets saved
- // is dependent on the way the C-structure
- // for ia64_mca_sal_to_os_state_t has been
- // defined in include/asm/mca.h
- SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
- ;;
-
- // LOG PROCESSOR STATE INFO FROM HERE ON..
-begin_os_mca_dump:
- br ia64_os_mca_proc_state_dump;;
-
-ia64_os_mca_done_dump:
-
- LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
- ;;
- ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
- ;;
- tbit.nz p6,p7=r18,60
-(p7) br.spnt done_tlb_purge_and_reload
-
- // The following code purges TC and TR entries. Then reload all TC entries.
- // Purge percpu data TC entries.
-begin_tlb_purge_and_reload:
+/*
+ * Just the TLB purge part is moved to a separate function
+ * so we can re-use the code for cpu hotplug code as well
+ * Caller should now setup b1, so we can branch once the
+ * tlb flush is complete.
+ */
+ia64_do_tlb_purge:
#define O(member) IA64_CPUINFO_##member##_OFFSET
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
@@ -230,6 +203,51 @@ begin_tlb_purge_and_reload:
;;
srlz.i
;;
+ // Now branch away to caller.
+ br.sptk.many b1
+ ;;
+
+ia64_os_mca_dispatch:
+
+ // Serialize all MCA processing
+ mov r3=1;;
+ LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
+ia64_os_mca_spin:
+ xchg8 r4=[r2],r3;;
+ cmp.ne p6,p0=r4,r0
+(p6) br ia64_os_mca_spin
+
+ // Save the SAL to OS MCA handoff state as defined
+ // by SAL SPEC 3.0
+ // NOTE : The order in which the state gets saved
+ // is dependent on the way the C-structure
+ // for ia64_mca_sal_to_os_state_t has been
+ // defined in include/asm/mca.h
+ SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
+ ;;
+
+ // LOG PROCESSOR STATE INFO FROM HERE ON..
+begin_os_mca_dump:
+ br ia64_os_mca_proc_state_dump;;
+
+ia64_os_mca_done_dump:
+
+ LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
+ ;;
+ ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
+ ;;
+ tbit.nz p6,p7=r18,60
+(p7) br.spnt done_tlb_purge_and_reload
+
+ // The following code purges TC and TR entries. Then reload all TC entries.
+ // Purge percpu data TC entries.
+begin_tlb_purge_and_reload:
+ movl r18=ia64_reload_tr;;
+ LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
+ mov b1=r18;;
+ br.sptk.many ia64_do_tlb_purge;;
+
+ia64_reload_tr:
// Finally reload the TR registers.
// 1. Reload DTR/ITR registers for kernel.
mov r18=KERNEL_TR_PAGE_SHIFT<<2
diff -puN include/asm-ia64/sal.h~nbrz_no_macro include/asm-ia64/sal.h
--- release_work/include/asm-ia64/sal.h~nbrz_no_macro 2005-02-09 13:35:47.000000000 -0800
+++ release_work-araj/include/asm-ia64/sal.h 2005-02-11 11:24:36.159353754 -0800
@@ -832,6 +832,44 @@ extern int ia64_sal_oemcall_nolock(struc
u64, u64, u64, u64, u64);
extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * System Abstraction Layer Specification
+ * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
+ * Note: region regs are stored first in head.S _start. Hence they must
+ * stay up front.
+ */
+struct sal_to_os_boot {
+ u64 rr[8]; /* Region Registers */
+ u64 br[6]; /* br0: return addr into SAL boot rendez routine */
+ u64 gr1; /* SAL:GP */
+ u64 gr12; /* SAL:SP */
+ u64 gr13; /* SAL: Task Pointer */
+ u64 fpsr;
+ u64 pfs;
+ u64 rnat;
+ u64 unat;
+ u64 bspstore;
+ u64 dcr; /* Default Control Register */
+ u64 iva;
+ u64 pta;
+ u64 itv;
+ u64 pmv;
+ u64 cmcv;
+ u64 lrr[2];
+ u64 gr[4];
+ u64 pr; /* Predicate registers */
+ u64 lc; /* Loop Count */
+ struct ia64_fpreg fp[20];
+};
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+extern void ia64_jump_to_sal(struct sal_to_os_boot *);
+#endif
extern void ia64_sal_handler_init(void *entry_point, void *gpval);
_
^ permalink raw reply [flat|nested] 11+ messages in thread