public inbox for linux-ia64@vger.kernel.org
 help / color / mirror / Atom feed
* [patch] per cpu MCA/INIT fixes.
@ 2005-01-06  3:08 Russ Anderson
  2005-01-06 17:14 ` Jesse Barnes
                   ` (10 more replies)
  0 siblings, 11 replies; 12+ messages in thread
From: Russ Anderson @ 2005-01-06  3:08 UTC (permalink / raw)
  To: linux-ia64

[patch] per cpu MCA/INIT fixes.

Tree: test-2.6.11

High level description:

  Testing uncovered problems in the per cpu MCA code.  
  The problems fixed in this patch are:

  * K.3 was not getting set on all cpus.

  * The pointer to each cpu's mca save area was getting incremented
    before being set, with the result that the last cpu's pointer
    was wrong.

  * Made contig.c changes corresponding to earlier discontig.c changes.

  * An offset into cpuinfo_ia64 structure was wrong in mca_asm.S.

  Special thanks to Keith Owens for helping test and identify problems.

Detailed description:

  include/asm-ia64/mca.h

	Defined PERCPU_MCA_SIZE.

  arch/ia64/mm/init.c

	Move the setting of k.3 into its own function, set_mca_pointer().

  arch/ia64/mm/discontig.c

	Save the paddr of the cpu mca save area before incrementing.

  arch/ia64/kernel/efi.c 

	Clean up the setting of PAL addresses as requested by Tony.

  arch/ia64/kernel/setup.c

	Call set_mca_pointer() so k.3 is set on all cpus.

  arch/ia64/mm/contig.c 

	Allocate per cpu MCA save areas.

  include/asm-ia64/percpu.h

	Save the paddr of the cpu mca save area, so k.3 is set consistently.

  arch/ia64/kernel/mca_asm.S

	Corrected offset into cpuinfo_ia64 structure.
	Removed hardcoded offsets.

  arch/ia64/kernel/asm-offsets.c

	Defined offset into cpuinfo_ia64.

  include/asm-ia64/mca_asm.h

	Defined macro to find offset into cpuinfo_ia64.

Testing:

  	Tested on SGI Altix by injecting memory multibit errors.  
	Additional testing on other platforms is welcome.

Signed-off-by: Russ Anderson <rja@sgi.com>

-----------------------------------------------------------------
Index: linux/include/asm-ia64/mca.h
=================================--- linux.orig/include/asm-ia64/mca.h	2005-01-05 15:26:42.454994485 -0600
+++ linux/include/asm-ia64/mca.h	2005-01-05 15:33:50.641114843 -0600
@@ -61,6 +61,8 @@
 
 } ia64_mc_info_t;
 
+#define PERCPU_MCA_SIZE sizeof(struct ia64_mc_info_s)
+
 typedef struct ia64_mca_sal_to_os_state_s {
 	u64		imsto_os_gp;		/* GP of the os registered with the SAL */
 	u64		imsto_pal_proc;		/* PAL_PROC entry point - physical addr */
Index: linux/arch/ia64/mm/init.c
=================================--- linux.orig/arch/ia64/mm/init.c	2005-01-05 15:26:42.455970944 -0600
+++ linux/arch/ia64/mm/init.c	2005-01-05 15:33:50.643067761 -0600
@@ -287,12 +287,32 @@
 	ia64_patch_gate();
 }
 
+void
+set_mca_pointer(struct cpuinfo_ia64 *cpuinfo, void *cpu_data)
+{
+	void *my_cpu_data = ia64_imva(cpu_data);
+
+        /*
+         * The MCA info structure was allocated earlier and a physical address pointer
+         * saved in __per_cpu_mca[cpu].  Move that pointer into the cpuinfo structure.
+         */
+
+        cpuinfo->ia64_pa_mca_data = (__u64 *)__per_cpu_mca[smp_processor_id()];
+
+        cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
+        ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(cpuinfo));
+
+        /*
+         * Set pal_base and pal_paddr in cpuinfo structure.
+         */
+        efi_get_pal_addr();
+}
+
 void __devinit
 ia64_mmu_init (void *my_cpu_data)
 {
 	unsigned long psr, pta, impl_va_bits;
 	extern void __devinit tlb_init (void);
-	struct cpuinfo_ia64 *cpuinfo;
 
 #ifdef CONFIG_DISABLE_VHPT
 #	define VHPT_ENABLE_BIT	0
@@ -357,22 +377,6 @@
 	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
 	ia64_srlz_d();
 #endif
-
-	/*
-	 * The MCA info structure was allocated earlier and a physical address pointer
-	 * saved in k3.  Move that pointer into the cpuinfo structure and save
-	 * the physical address of the cpuinfo structure in k3.
-	 */
-	cpuinfo = (struct cpuinfo_ia64 *)my_cpu_data;
-	cpuinfo->ia64_pa_mca_data = (__u64 *)ia64_get_kr(IA64_KR_PA_CPU_INFO);
-
-	cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
-	ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(my_cpu_data));
-
-	/*
-	 * Set pal_base and pal_paddr in cpuinfo structure.
-	 */
-	efi_get_pal_addr();
 }
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
Index: linux/arch/ia64/mm/discontig.c
=================================--- linux.orig/arch/ia64/mm/discontig.c	2005-01-05 15:26:42.455970944 -0600
+++ linux/arch/ia64/mm/discontig.c	2005-01-05 15:33:50.644044221 -0600
@@ -348,12 +348,12 @@
 		mem_data[node].node_data = __va(pernode);
 		pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
 
-		mca_data_phys = (void *)pernode;
-		pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
-
 		mem_data[node].pgdat->bdata = bdp;
 		pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
 
+		mca_data_phys = (void *)pernode;
+		pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
+
 		/*
 		 * Copy the static per-cpu data into the region we
 		 * just set aside and then setup __per_cpu_offset
@@ -368,13 +368,11 @@
 					 * The memory for the cpuinfo structure is allocated
 					 * here, but the data in the structure is initialized
 					 * later.  Save the physical address of the MCA save
-					 * area in IA64_KR_PA_CPU_INFO.  When the cpuinfo struct 
-					 * is initialized, the value in IA64_KR_PA_CPU_INFO
-					 * will be put in the cpuinfo structure and 
-					 * IA64_KR_PA_CPU_INFO will be set to the physical
-					 * addresss of the cpuinfo structure.
+					 * area in __per_cpu_mca[cpu].  When the cpuinfo struct 
+					 * is initialized, the value in __per_cpu_mca[cpu]
+					 * will be put in the cpuinfo structure.
 					 */
-					ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(mca_data_phys));
+					__per_cpu_mca[cpu] = __pa(mca_data_phys);
 					mca_data_phys += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t));
 				}
 				__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
Index: linux/arch/ia64/kernel/efi.c
=================================--- linux.orig/arch/ia64/kernel/efi.c	2005-01-05 15:26:42.456947403 -0600
+++ linux/arch/ia64/kernel/efi.c	2005-01-05 15:33:50.645997139 -0600
@@ -414,15 +414,15 @@
  * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
  * Abstraction Layer chapter 11 in ADAG
  */
-void
-efi_map_pal_code (void)
+
+static efi_memory_desc_t *
+pal_code_memdesc (void)
 {
 	void *efi_map_start, *efi_map_end, *p;
 	efi_memory_desc_t *md;
 	u64 efi_desc_size;
 	int pal_code_count = 0;
-	u64 mask, psr;
-	u64 vaddr;
+	u64 vaddr, mask;
 
 	efi_map_start = __va(ia64_boot_param->efi_memmap);
 	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
@@ -466,91 +466,58 @@
 		if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
 			panic("Woah!  PAL code size bigger than a granule!");
 
-		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
 #if EFI_DEBUG
+		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
 		printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
-		       smp_processor_id(), md->phys_addr,
-		       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
-		       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+			smp_processor_id(), md->phys_addr,
+			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+			vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
 #endif
-
-		/*
-		 * Cannot write to CRx with PSR.ic=1
-		 */
-		psr = ia64_clear_ic();
-		ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
-			 pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
-			 IA64_GRANULE_SHIFT);
-		ia64_set_psr(psr);		/* restore psr */
-		ia64_srlz_i();
-
+		return md;
 	}
+
+	return NULL;
 }
 
-/* 
- * Put pal_base and pal_paddr in the cpuinfo structure.
- */
 void
-efi_get_pal_addr(void)
+efi_get_pal_addr (void)
 {
-	void *efi_map_start, *efi_map_end, *p;
-	efi_memory_desc_t *md;
-	u64 efi_desc_size;
-	int pal_code_count = 0;
-	u64 mask;
-	u64 vaddr;
+	efi_memory_desc_t *md = pal_code_memdesc();
+	u64 vaddr, mask;
 	struct cpuinfo_ia64 *cpuinfo;
 
-	efi_map_start = __va(ia64_boot_param->efi_memmap);
-	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-	efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-		md = p;
-		if (md->type != EFI_PAL_CODE)
-			continue;
+	if (md != NULL) {
 
-		if (++pal_code_count > 1) {
-			printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
-			       md->phys_addr);
-			continue;
-		}
-		/*
-		 * The only ITLB entry in region 7 that is used is the one installed by
-		 * __start().  That entry covers a 64MB range.
-		 */
-		mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
 		vaddr = PAGE_OFFSET + md->phys_addr;
-
-		/*
-		 * We must check that the PAL mapping won't overlap with the kernel
-		 * mapping.
-		 *
-		 * PAL code is guaranteed to be aligned on a power of 2 between 4k and
-		 * 256KB and that only one ITR is needed to map it. This implies that the
-		 * PAL code is always aligned on its size, i.e., the closest matching page
-		 * size supported by the TLB. Therefore PAL code is guaranteed never to
-		 * cross a 64MB unless it is bigger than 64MB (very unlikely!).  So for
-		 * now the following test is enough to determine whether or not we need a
-		 * dedicated ITR for the PAL code.
-		 */
-		if ((vaddr & mask) = (KERNEL_START & mask)) {
-			printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
-			       __FUNCTION__);
-			continue;
-		}
-
-		if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
-			panic("Woah!  PAL code size bigger than a granule!");
-
 		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
 
-		/* insert this TR into our list for MCA recovery purposes */
 		cpuinfo = (struct cpuinfo_ia64 *)__va(ia64_get_kr(IA64_KR_PA_CPU_INFO));
 		cpuinfo->pal_base = vaddr & mask;
 		cpuinfo->pal_paddr = pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
-		printk(KERN_INFO "CPU %d: late efi pal_base 0x%lx pal_paddr 0x%lx\n",
-			smp_processor_id(), cpuinfo->pal_base, cpuinfo->pal_paddr);
+	}
+}
+
+void
+efi_map_pal_code (void)
+{
+	efi_memory_desc_t *md = pal_code_memdesc();
+	u64 vaddr, mask, psr;
+
+	if (md != NULL) {
+
+		vaddr = PAGE_OFFSET + md->phys_addr;
+		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
+		/*
+		 * Cannot write to CRx with PSR.ic=1
+		 */
+		psr = ia64_clear_ic();
+		ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
+			pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
+			IA64_GRANULE_SHIFT);
+		ia64_set_psr(psr);		/* restore psr */
+		ia64_srlz_i();
 	}
 }
 
Index: linux/arch/ia64/kernel/setup.c
=================================--- linux.orig/arch/ia64/kernel/setup.c	2005-01-05 15:26:42.456947403 -0600
+++ linux/arch/ia64/kernel/setup.c	2005-01-05 15:33:50.646973598 -0600
@@ -58,6 +58,7 @@
 
 #ifdef CONFIG_SMP
 unsigned long __per_cpu_offset[NR_CPUS];
+unsigned long __per_cpu_mca[NR_CPUS];
 EXPORT_SYMBOL(__per_cpu_offset);
 #endif
 
@@ -574,6 +575,7 @@
 cpu_init (void)
 {
 	extern void __devinit ia64_mmu_init (void *);
+	extern void set_mca_pointer (struct cpuinfo_ia64 *, void *);
 	unsigned long num_phys_stacked;
 	pal_vm_info_2_u_t vmi;
 	unsigned int max_ctx;
@@ -628,6 +630,7 @@
 		BUG();
 
 	ia64_mmu_init(ia64_imva(cpu_data));
+	set_mca_pointer(cpu_info, cpu_data);
 
 #ifdef CONFIG_IA32_SUPPORT
 	ia32_cpu_init();
Index: linux/arch/ia64/mm/contig.c
=================================--- linux.orig/arch/ia64/mm/contig.c	2005-01-05 15:26:42.456947403 -0600
+++ linux/arch/ia64/mm/contig.c	2005-01-05 15:33:50.646973598 -0600
@@ -25,6 +25,10 @@
 #include <asm/pgtable.h>
 #include <asm/sections.h>
 
+#ifdef CONFIG_SMP
+#include <asm/mca.h>
+#endif
+
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 static unsigned long num_dma_physpages;
 #endif
@@ -177,7 +181,7 @@
 void *
 per_cpu_init (void)
 {
-	void *cpu_data;
+	void *cpu_data, *mca_data;
 	int cpu;
 
 	/*
@@ -188,11 +192,15 @@
 	if (smp_processor_id() = 0) {
 		cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
 					   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+		mca_data = __alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS,
+					   PERCPU_MCA_SIZE, __pa(MAX_DMA_ADDRESS));
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
 			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
 			cpu_data += PERCPU_PAGE_SIZE;
 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+			__per_cpu_mca[cpu] = (unsigned long)mca_data;
+			mca_data += PERCPU_MCA_SIZE;
 		}
 	}
 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
Index: linux/include/asm-ia64/percpu.h
=================================--- linux.orig/include/asm-ia64/percpu.h	2005-01-05 15:26:42.454994485 -0600
+++ linux/include/asm-ia64/percpu.h	2005-01-05 15:33:50.647950057 -0600
@@ -37,6 +37,7 @@
 #ifdef CONFIG_SMP
 
 extern unsigned long __per_cpu_offset[NR_CPUS];
+extern unsigned long __per_cpu_mca[NR_CPUS];
 
 /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
 DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
Index: linux/arch/ia64/kernel/mca_asm.S
=================================--- linux.orig/arch/ia64/kernel/mca_asm.S	2005-01-05 15:26:42.468664912 -0600
+++ linux/arch/ia64/kernel/mca_asm.S	2005-01-05 19:44:39.210989487 -0600
@@ -147,7 +147,6 @@
 	GET_PERCPU_PADDR(r2)	// paddr of percpu_paddr in cpuinfo struct
 	;;
 	mov	r17=r2
-	mov 	r23=r2		// save current ia64_mca_percpu_info addr pointer.
 	;;
 	adds r17=8,r17
 	;;
@@ -247,7 +246,9 @@
 	srlz.d
 	;;
 	// 2. Reload DTR register for PERCPU data.
-	mov r17=r23
+	GET_PERCPU_PADDR(r2)		// paddr of percpu_paddr in cpuinfo struct
+	;;
+	mov r17=r2
 	movl r16=PERCPU_ADDR		// vaddr
 	movl r18=PERCPU_PAGE_SHIFT<<2
 	;;
@@ -262,7 +263,9 @@
 	srlz.d
 	;;
 	// 3. Reload ITR for PAL code.
-	adds r17@,r23
+	GET_CPUINFO_PAL_PADDR(r2)	// paddr of pal_paddr in cpuinfo struct
+	;;
+	mov r17=r2
 	;;
 	ld8 r18=[r17],8			// pte
 	;;
Index: linux/arch/ia64/kernel/asm-offsets.c
=================================--- linux.orig/arch/ia64/kernel/asm-offsets.c	2004-12-19 16:54:37.192672402 -0600
+++ linux/arch/ia64/kernel/asm-offsets.c	2005-01-05 19:37:59.838371971 -0600
@@ -205,6 +205,7 @@
 	BLANK();
 	/* used by arch/ia64/kernel/mca_asm.S */
 	DEFINE(IA64_CPUINFO_PERCPU_PADDR, offsetof (struct cpuinfo_ia64, percpu_paddr));
+	DEFINE(IA64_CPUINFO_PAL_PADDR, offsetof (struct cpuinfo_ia64, pal_paddr));
 	DEFINE(IA64_CPUINFO_PA_MCA_INFO, offsetof (struct cpuinfo_ia64, ia64_pa_mca_data));
 	DEFINE(IA64_MCA_PROC_STATE_DUMP, offsetof (struct ia64_mca_cpu_s, ia64_mca_proc_state_dump));
 	DEFINE(IA64_MCA_STACK, offsetof (struct ia64_mca_cpu_s, ia64_mca_stack));
Index: linux/include/asm-ia64/mca_asm.h
=================================--- linux.orig/include/asm-ia64/mca_asm.h	2004-12-19 16:54:52.890254006 -0600
+++ linux/include/asm-ia64/mca_asm.h	2005-01-05 19:44:43.369738007 -0600
@@ -53,6 +53,10 @@
 	mov	reg	= ar.k3;;						\
 	addl	reg	= IA64_CPUINFO_PERCPU_PADDR,reg
 
+#define GET_CPUINFO_PAL_PADDR(reg)						\
+	mov	reg	= ar.k3;;						\
+	addl	reg	= IA64_CPUINFO_PAL_PADDR,reg
+
 /*
  * This macro gets the physical address of this cpu's MCA save structure.
  */
-----------------------------------------------------------------
-- 
Russ Anderson, OS RAS/Partitioning Project Lead  
SGI - Silicon Graphics Inc          rja@sgi.com

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
@ 2005-01-06 17:14 ` Jesse Barnes
  2005-01-06 18:21 ` Russ Anderson
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Jesse Barnes @ 2005-01-06 17:14 UTC (permalink / raw)
  To: linux-ia64

On Wednesday, January 5, 2005 7:08 pm, Russ Anderson wrote:
>  #ifdef CONFIG_VIRTUAL_MEM_MAP
> Index: linux/arch/ia64/mm/discontig.c
> =================================> --- linux.orig/arch/ia64/mm/discontig.c 2005-01-05 15:26:42.455970944 -0600
> +++ linux/arch/ia64/mm/discontig.c 2005-01-05 15:33:50.644044221 -0600
> @@ -348,12 +348,12 @@
>    mem_data[node].node_data = __va(pernode);
>    pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
>
> -  mca_data_phys = (void *)pernode;
> -  pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
> -
>    mem_data[node].pgdat->bdata = bdp;
>    pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
>
> +  mca_data_phys = (void *)pernode;
> +  pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
> +

The above will probably conflict with Jack's recent change to stagger the 
per-node structures by one cacheline.  You may have to rediff against one of 
Tony's test trees.

> +#ifdef CONFIG_SMP
> +#include <asm/mca.h>
> +#endif

Just a nit, header files should always be safe to include unconditionally.  If 
they're not, the headers need fixing (barring out of tree stuff like kdb of 
course).

Jesse


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
  2005-01-06 17:14 ` Jesse Barnes
@ 2005-01-06 18:21 ` Russ Anderson
  2005-01-06 18:28 ` Luck, Tony
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Russ Anderson @ 2005-01-06 18:21 UTC (permalink / raw)
  To: linux-ia64

Jesse Barnes wrote:
> On Wednesday, January 5, 2005 7:08 pm, Russ Anderson wrote:
> > Index: linux/arch/ia64/mm/discontig.c
> > =================================> > --- linux.orig/arch/ia64/mm/discontig.c 2005-01-05 15:26:42.455970944 -0600
> > +++ linux/arch/ia64/mm/discontig.c 2005-01-05 15:33:50.644044221 -0600
> > @@ -348,12 +348,12 @@
> >    mem_data[node].node_data = __va(pernode);
> >    pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
> >
> > -  mca_data_phys = (void *)pernode;
> > -  pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
> > -
> >    mem_data[node].pgdat->bdata = bdp;
> >    pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
> >
> > +  mca_data_phys = (void *)pernode;
> > +  pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
> 
> The above will probably conflict with Jack's recent change to stagger the 
> per-node structures by one cacheline.  You may have to rediff against one of 
> Tony's test trees.

Yea, I wasn't quite sure what to do about that.  Jack's change had not
been accepted, so I didn't want to make my change dependent on it.
His change is only two lines, so it's easy to change my patch if needed.

FWIW, the lines don't conflict and my patch applies correctly on top of
Jack's change.

patching file arch/ia64/mm/discontig.c
Hunk #1 succeeded at 350 (offset 2 lines).
Hunk #2 succeeded at 370 (offset 2 lines).


-- 
Russ Anderson, OS RAS/Partitioning Project Lead  
SGI - Silicon Graphics Inc          rja@sgi.com

^ permalink raw reply	[flat|nested] 12+ messages in thread

* RE: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
  2005-01-06 17:14 ` Jesse Barnes
  2005-01-06 18:21 ` Russ Anderson
@ 2005-01-06 18:28 ` Luck, Tony
  2005-01-06 18:36 ` Jesse Barnes
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Luck, Tony @ 2005-01-06 18:28 UTC (permalink / raw)
  To: linux-ia64

>The above will probably conflict with Jack's recent change to 
>stagger the per-node structures by one cacheline.  You may have to rediff 
>against one of Tony's test trees.

I just applied and pushed Jack's patch to the test-2.6.11 tree, Russ, can
you fix up your patch to apply cleanly on top of Jack's.

>> +#ifdef CONFIG_SMP
>> +#include <asm/mca.h>
>> +#endif
>
>Just a nit, header files should always be safe to include 
>unconditionally.  If they're not, the headers need fixing (barring out of tree 
>stuff like kdb of course).

Agreed ... this looks really ugly ... surely we want to support MCA
on uni-processors as well as SMP!  What's the issue that provoked
adding the #ifdef here?

-Tony

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (2 preceding siblings ...)
  2005-01-06 18:28 ` Luck, Tony
@ 2005-01-06 18:36 ` Jesse Barnes
  2005-01-06 20:50 ` Russ Anderson
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Jesse Barnes @ 2005-01-06 18:36 UTC (permalink / raw)
  To: linux-ia64

On Thursday, January 6, 2005 10:21 am, Russ Anderson wrote:
> Yea, I wasn't quite sure what to do about that.  Jack's change had not
> been accepted, so I didn't want to make my change dependent on it.
> His change is only two lines, so it's easy to change my patch if needed.
>
> FWIW, the lines don't conflict and my patch applies correctly on top of
> Jack's change.
>
> patching file arch/ia64/mm/discontig.c
> Hunk #1 succeeded at 350 (offset 2 lines).
> Hunk #2 succeeded at 370 (offset 2 lines).

Oh, ok, that looks good then.  I guess you just need to fix the #ifdef around 
#include <asm/mca.h> then.

Thanks,
Jesse

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (3 preceding siblings ...)
  2005-01-06 18:36 ` Jesse Barnes
@ 2005-01-06 20:50 ` Russ Anderson
  2005-01-06 23:17 ` Russ Anderson
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Russ Anderson @ 2005-01-06 20:50 UTC (permalink / raw)
  To: linux-ia64

Tony Luck wrote:
> 
> I just applied and pushed Jack's patch to the test-2.6.11 tree, Russ, can
> you fix up your patch to apply cleanly on top of Jack's.

Yup.
 
> >> +#ifdef CONFIG_SMP
> >> +#include <asm/mca.h>
> >> +#endif
> >
> >Just a nit, header files should always be safe to include 
> >unconditionally.  If they're not, the headers need fixing (barring out of tree 
> >stuff like kdb of course).
> 
> Agreed ... this looks really ugly ... surely we want to support MCA
> on uni-processors as well as SMP!  What's the issue that provoked
> adding the #ifdef here?

The only reason was that the changes are all in per_cpu_init() and
that routine is inside #ifdef CONFIG_SMP.

I built the patch with all configs in arch/ia64/configs, but now 
see that they all have CONFIG_SMP=y.  (sigh)

Given all the infrastructure setup done under #ifdef CONFIG_SMP, would
it work with uni-processor?  I'll start digging through the code...

-- 
Russ Anderson, OS RAS/Partitioning Project Lead  
SGI - Silicon Graphics Inc          rja@sgi.com

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (4 preceding siblings ...)
  2005-01-06 20:50 ` Russ Anderson
@ 2005-01-06 23:17 ` Russ Anderson
  2005-01-07  6:51 ` Luck, Tony
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Russ Anderson @ 2005-01-06 23:17 UTC (permalink / raw)
  To: linux-ia64

Tony Luck wrote:
> >The above will probably conflict with Jack's recent change to 
> >stagger the per-node structures by one cacheline.  You may have to rediff 
> >against one of Tony's test trees.
> 
> I just applied and pushed Jack's patch to the test-2.6.11 tree, Russ, can
> you fix up your patch to apply cleanly on top of Jack's.
> 
> >> +#ifdef CONFIG_SMP
> >> +#include <asm/mca.h>
> >> +#endif
> >
> >Just a nit, header files should always be safe to include 
> >unconditionally.  If they're not, the headers need fixing (barring out of tree 
> >stuff like kdb of course).
> 
> Agreed ... this looks really ugly ... surely we want to support MCA
> on uni-processors as well as SMP!  What's the issue that provoked
> adding the #ifdef here?

An updated patch that applies on Jack's and builds with zx1 & tiger UP.
The SGI UP is previously broke.  Jesse is working on that.


--------------------------------------------------------------------------------------
Index: linux/include/asm-ia64/mca.h
=================================--- linux.orig/include/asm-ia64/mca.h	2005-01-06 13:16:41.149172632 -0600
+++ linux/include/asm-ia64/mca.h	2005-01-06 16:02:51.004769134 -0600
@@ -61,6 +61,8 @@
 
 } ia64_mc_info_t;
 
+#define PERCPU_MCA_SIZE sizeof(struct ia64_mc_info_s)
+
 typedef struct ia64_mca_sal_to_os_state_s {
 	u64		imsto_os_gp;		/* GP of the os registered with the SAL */
 	u64		imsto_pal_proc;		/* PAL_PROC entry point - physical addr */
Index: linux/arch/ia64/mm/init.c
=================================--- linux.orig/arch/ia64/mm/init.c	2005-01-06 13:16:41.150149091 -0600
+++ linux/arch/ia64/mm/init.c	2005-01-06 13:19:59.042306891 -0600
@@ -287,12 +287,32 @@
 	ia64_patch_gate();
 }
 
+void
+set_mca_pointer(struct cpuinfo_ia64 *cpuinfo, void *cpu_data)
+{
+	void *my_cpu_data = ia64_imva(cpu_data);
+
+        /*
+         * The MCA info structure was allocated earlier and a physical address pointer
+         * saved in __per_cpu_mca[cpu].  Move that pointer into the cpuinfo structure.
+         */
+
+        cpuinfo->ia64_pa_mca_data = (__u64 *)__per_cpu_mca[smp_processor_id()];
+
+        cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
+        ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(cpuinfo));
+
+        /*
+         * Set pal_base and pal_paddr in cpuinfo structure.
+         */
+        efi_get_pal_addr();
+}
+
 void __devinit
 ia64_mmu_init (void *my_cpu_data)
 {
 	unsigned long psr, pta, impl_va_bits;
 	extern void __devinit tlb_init (void);
-	struct cpuinfo_ia64 *cpuinfo;
 
 #ifdef CONFIG_DISABLE_VHPT
 #	define VHPT_ENABLE_BIT	0
@@ -357,22 +377,6 @@
 	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
 	ia64_srlz_d();
 #endif
-
-	/*
-	 * The MCA info structure was allocated earlier and a physical address pointer
-	 * saved in k3.  Move that pointer into the cpuinfo structure and save
-	 * the physical address of the cpuinfo structure in k3.
-	 */
-	cpuinfo = (struct cpuinfo_ia64 *)my_cpu_data;
-	cpuinfo->ia64_pa_mca_data = (__u64 *)ia64_get_kr(IA64_KR_PA_CPU_INFO);
-
-	cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
-	ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(my_cpu_data));
-
-	/*
-	 * Set pal_base and pal_paddr in cpuinfo structure.
-	 */
-	efi_get_pal_addr();
 }
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
Index: linux/arch/ia64/mm/discontig.c
=================================--- linux.orig/arch/ia64/mm/discontig.c	2005-01-06 13:16:56.624098123 -0600
+++ linux/arch/ia64/mm/discontig.c	2005-01-06 13:19:59.044259810 -0600
@@ -350,12 +350,12 @@
 		mem_data[node].node_data = __va(pernode);
 		pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
 
-		mca_data_phys = (void *)pernode;
-		pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
-
 		mem_data[node].pgdat->bdata = bdp;
 		pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
 
+		mca_data_phys = (void *)pernode;
+		pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
+
 		/*
 		 * Copy the static per-cpu data into the region we
 		 * just set aside and then setup __per_cpu_offset
@@ -370,13 +370,11 @@
 					 * The memory for the cpuinfo structure is allocated
 					 * here, but the data in the structure is initialized
 					 * later.  Save the physical address of the MCA save
-					 * area in IA64_KR_PA_CPU_INFO.  When the cpuinfo struct 
-					 * is initialized, the value in IA64_KR_PA_CPU_INFO
-					 * will be put in the cpuinfo structure and 
-					 * IA64_KR_PA_CPU_INFO will be set to the physical
-					 * addresss of the cpuinfo structure.
+					 * area in __per_cpu_mca[cpu].  When the cpuinfo struct 
+					 * is initialized, the value in __per_cpu_mca[cpu]
+					 * will be put in the cpuinfo structure.
 					 */
-					ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(mca_data_phys));
+					__per_cpu_mca[cpu] = __pa(mca_data_phys);
 					mca_data_phys += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t));
 				}
 				__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
Index: linux/arch/ia64/kernel/efi.c
=================================--- linux.orig/arch/ia64/kernel/efi.c	2005-01-06 13:16:41.170654735 -0600
+++ linux/arch/ia64/kernel/efi.c	2005-01-06 13:19:59.056953780 -0600
@@ -414,15 +414,15 @@
  * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
  * Abstraction Layer chapter 11 in ADAG
  */
-void
-efi_map_pal_code (void)
+
+static efi_memory_desc_t *
+pal_code_memdesc (void)
 {
 	void *efi_map_start, *efi_map_end, *p;
 	efi_memory_desc_t *md;
 	u64 efi_desc_size;
 	int pal_code_count = 0;
-	u64 mask, psr;
-	u64 vaddr;
+	u64 vaddr, mask;
 
 	efi_map_start = __va(ia64_boot_param->efi_memmap);
 	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
@@ -466,91 +466,58 @@
 		if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
 			panic("Woah!  PAL code size bigger than a granule!");
 
-		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
 #if EFI_DEBUG
+		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
 		printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
-		       smp_processor_id(), md->phys_addr,
-		       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
-		       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+			smp_processor_id(), md->phys_addr,
+			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+			vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
 #endif
-
-		/*
-		 * Cannot write to CRx with PSR.ic=1
-		 */
-		psr = ia64_clear_ic();
-		ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
-			 pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
-			 IA64_GRANULE_SHIFT);
-		ia64_set_psr(psr);		/* restore psr */
-		ia64_srlz_i();
-
+		return md;
 	}
+
+	return NULL;
 }
 
-/* 
- * Put pal_base and pal_paddr in the cpuinfo structure.
- */
 void
-efi_get_pal_addr(void)
+efi_get_pal_addr (void)
 {
-	void *efi_map_start, *efi_map_end, *p;
-	efi_memory_desc_t *md;
-	u64 efi_desc_size;
-	int pal_code_count = 0;
-	u64 mask;
-	u64 vaddr;
+	efi_memory_desc_t *md = pal_code_memdesc();
+	u64 vaddr, mask;
 	struct cpuinfo_ia64 *cpuinfo;
 
-	efi_map_start = __va(ia64_boot_param->efi_memmap);
-	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-	efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-		md = p;
-		if (md->type != EFI_PAL_CODE)
-			continue;
+	if (md != NULL) {
 
-		if (++pal_code_count > 1) {
-			printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
-			       md->phys_addr);
-			continue;
-		}
-		/*
-		 * The only ITLB entry in region 7 that is used is the one installed by
-		 * __start().  That entry covers a 64MB range.
-		 */
-		mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
 		vaddr = PAGE_OFFSET + md->phys_addr;
-
-		/*
-		 * We must check that the PAL mapping won't overlap with the kernel
-		 * mapping.
-		 *
-		 * PAL code is guaranteed to be aligned on a power of 2 between 4k and
-		 * 256KB and that only one ITR is needed to map it. This implies that the
-		 * PAL code is always aligned on its size, i.e., the closest matching page
-		 * size supported by the TLB. Therefore PAL code is guaranteed never to
-		 * cross a 64MB unless it is bigger than 64MB (very unlikely!).  So for
-		 * now the following test is enough to determine whether or not we need a
-		 * dedicated ITR for the PAL code.
-		 */
-		if ((vaddr & mask) = (KERNEL_START & mask)) {
-			printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
-			       __FUNCTION__);
-			continue;
-		}
-
-		if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
-			panic("Woah!  PAL code size bigger than a granule!");
-
 		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
 
-		/* insert this TR into our list for MCA recovery purposes */
 		cpuinfo = (struct cpuinfo_ia64 *)__va(ia64_get_kr(IA64_KR_PA_CPU_INFO));
 		cpuinfo->pal_base = vaddr & mask;
 		cpuinfo->pal_paddr = pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
-		printk(KERN_INFO "CPU %d: late efi pal_base 0x%lx pal_paddr 0x%lx\n",
-			smp_processor_id(), cpuinfo->pal_base, cpuinfo->pal_paddr);
+	}
+}
+
+void
+efi_map_pal_code (void)
+{
+	efi_memory_desc_t *md = pal_code_memdesc();
+	u64 vaddr, mask, psr;
+
+	if (md != NULL) {
+
+		vaddr = PAGE_OFFSET + md->phys_addr;
+		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
+		/*
+		 * Cannot write to CRx with PSR.ic=1
+		 */
+		psr = ia64_clear_ic();
+		ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
+			pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
+			IA64_GRANULE_SHIFT);
+		ia64_set_psr(psr);		/* restore psr */
+		ia64_srlz_i();
 	}
 }
 
Index: linux/arch/ia64/kernel/setup.c
=================================--- linux.orig/arch/ia64/kernel/setup.c	2005-01-06 13:16:41.171631194 -0600
+++ linux/arch/ia64/kernel/setup.c	2005-01-06 15:24:29.152848789 -0600
@@ -60,6 +60,7 @@
 unsigned long __per_cpu_offset[NR_CPUS];
 EXPORT_SYMBOL(__per_cpu_offset);
 #endif
+unsigned long __per_cpu_mca[NR_CPUS];
 
 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
@@ -574,6 +575,7 @@
 cpu_init (void)
 {
 	extern void __devinit ia64_mmu_init (void *);
+	extern void set_mca_pointer (struct cpuinfo_ia64 *, void *);
 	unsigned long num_phys_stacked;
 	pal_vm_info_2_u_t vmi;
 	unsigned int max_ctx;
@@ -628,6 +630,7 @@
 		BUG();
 
 	ia64_mmu_init(ia64_imva(cpu_data));
+	set_mca_pointer(cpu_info, cpu_data);
 
 #ifdef CONFIG_IA32_SUPPORT
 	ia32_cpu_init();
Index: linux/arch/ia64/mm/contig.c
=================================--- linux.orig/arch/ia64/mm/contig.c	2005-01-06 13:16:41.151125551 -0600
+++ linux/arch/ia64/mm/contig.c	2005-01-06 16:01:15.956171788 -0600
@@ -24,6 +24,7 @@
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/sections.h>
+#include <asm/mca.h>
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 static unsigned long num_dma_physpages;
@@ -177,7 +178,7 @@
 void *
 per_cpu_init (void)
 {
-	void *cpu_data;
+	void *cpu_data, *mca_data;
 	int cpu;
 
 	/*
@@ -188,11 +189,15 @@
 	if (smp_processor_id() = 0) {
 		cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
 					   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+		mca_data = __alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS,
+					   PERCPU_MCA_SIZE, __pa(MAX_DMA_ADDRESS));
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
 			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
 			cpu_data += PERCPU_PAGE_SIZE;
 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+			__per_cpu_mca[cpu] = (unsigned long)mca_data;
+			mca_data += PERCPU_MCA_SIZE;
 		}
 	}
 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
Index: linux/include/asm-ia64/percpu.h
=================================--- linux.orig/include/asm-ia64/percpu.h	2005-01-06 13:16:41.149172632 -0600
+++ linux/include/asm-ia64/percpu.h	2005-01-06 15:25:19.889677562 -0600
@@ -56,6 +56,8 @@
 
 #endif	/* SMP */
 
+extern unsigned long __per_cpu_mca[NR_CPUS];
+
 #define EXPORT_PER_CPU_SYMBOL(var)		EXPORT_SYMBOL(per_cpu__##var)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var)		EXPORT_SYMBOL_GPL(per_cpu__##var)
 
Index: linux/arch/ia64/kernel/mca_asm.S
=================================--- linux.orig/arch/ia64/kernel/mca_asm.S	2005-01-06 13:16:41.171631194 -0600
+++ linux/arch/ia64/kernel/mca_asm.S	2005-01-06 13:19:59.070624210 -0600
@@ -147,7 +147,6 @@
 	GET_PERCPU_PADDR(r2)	// paddr of percpu_paddr in cpuinfo struct
 	;;
 	mov	r17=r2
-	mov 	r23=r2		// save current ia64_mca_percpu_info addr pointer.
 	;;
 	adds r17=8,r17
 	;;
@@ -247,7 +246,9 @@
 	srlz.d
 	;;
 	// 2. Reload DTR register for PERCPU data.
-	mov r17=r23
+	GET_PERCPU_PADDR(r2)		// paddr of percpu_paddr in cpuinfo struct
+	;;
+	mov r17=r2
 	movl r16=PERCPU_ADDR		// vaddr
 	movl r18=PERCPU_PAGE_SHIFT<<2
 	;;
@@ -262,7 +263,9 @@
 	srlz.d
 	;;
 	// 3. Reload ITR for PAL code.
-	adds r17@,r23
+	GET_CPUINFO_PAL_PADDR(r2)	// paddr of pal_paddr in cpuinfo struct
+	;;
+	mov r17=r2
 	;;
 	ld8 r18=[r17],8			// pte
 	;;
Index: linux/arch/ia64/kernel/asm-offsets.c
=================================--- linux.orig/arch/ia64/kernel/asm-offsets.c	2005-01-06 13:16:41.171631194 -0600
+++ linux/arch/ia64/kernel/asm-offsets.c	2005-01-06 16:02:33.841535054 -0600
@@ -205,6 +205,7 @@
 	BLANK();
 	/* used by arch/ia64/kernel/mca_asm.S */
 	DEFINE(IA64_CPUINFO_PERCPU_PADDR, offsetof (struct cpuinfo_ia64, percpu_paddr));
+	DEFINE(IA64_CPUINFO_PAL_PADDR, offsetof (struct cpuinfo_ia64, pal_paddr));
 	DEFINE(IA64_CPUINFO_PA_MCA_INFO, offsetof (struct cpuinfo_ia64, ia64_pa_mca_data));
 	DEFINE(IA64_MCA_PROC_STATE_DUMP, offsetof (struct ia64_mca_cpu_s, ia64_mca_proc_state_dump));
 	DEFINE(IA64_MCA_STACK, offsetof (struct ia64_mca_cpu_s, ia64_mca_stack));
Index: linux/include/asm-ia64/mca_asm.h
=================================--- linux.orig/include/asm-ia64/mca_asm.h	2005-01-06 13:16:41.150149091 -0600
+++ linux/include/asm-ia64/mca_asm.h	2005-01-06 13:19:59.072577129 -0600
@@ -53,6 +53,10 @@
 	mov	reg	= ar.k3;;						\
 	addl	reg	= IA64_CPUINFO_PERCPU_PADDR,reg
 
+#define GET_CPUINFO_PAL_PADDR(reg)						\
+	mov	reg	= ar.k3;;						\
+	addl	reg	= IA64_CPUINFO_PAL_PADDR,reg
+
 /*
  * This macro gets the physical address of this cpu's MCA save structure.
  */

-- 
Russ Anderson, OS RAS/Partitioning Project Lead  
SGI - Silicon Graphics Inc          rja@sgi.com

^ permalink raw reply	[flat|nested] 12+ messages in thread

* RE: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (5 preceding siblings ...)
  2005-01-06 23:17 ` Russ Anderson
@ 2005-01-07  6:51 ` Luck, Tony
  2005-01-07  7:08 ` Luck, Tony
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Luck, Tony @ 2005-01-07  6:51 UTC (permalink / raw)
  To: linux-ia64

>An updated patch that applies on Jack's and builds with zx1 & tiger UP.
>The SGI UP is previously broke.  Jesse is working on that.

Build for tiger_defconfig looked pretty clean, but it doesn't boot :-(

No messages to console or serial console.  Peering into memory, I see
this in __log_buf:

<6>SAL Platform features: BusLock IRQ_Redirection.
<6>SAL: AP wakeup using external interrupt vector 0xf0.
<4>kernel BUG at mm/bootmem.c:158!.
<4>swapper[0]: bugcheck! 0 [1].
<4>Modules linked in:.
<4>.<4 >Pid: 0, CPU 0, comm:              swapper.
<4>psr : 00001010084a2010 ifs : 8000000000000690 ip  : [<a000000100766580>]    Not tainted.
<4>ip is at __alloc_bootmem_core+0x540/0x6e0.


Building with a generic config boots just fine.  So this is
possibly a discontig vs. contig problem?  Probably this allocation:

+               mca_data = __alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS,
+                                          PERCPU_MCA_SIZE, __pa(MAX_DMA_ADDRESS));

PERCPU_MCA_SIZE doesn't look likely to be a power of two, so using it
as the 'align' argument to __alloc_bootmem trips the BUG at mm/bootmem.c:158


-Tony

^ permalink raw reply	[flat|nested] 12+ messages in thread

* RE: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (6 preceding siblings ...)
  2005-01-07  6:51 ` Luck, Tony
@ 2005-01-07  7:08 ` Luck, Tony
  2005-01-07 17:47 ` Bjorn Helgaas
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Luck, Tony @ 2005-01-07  7:08 UTC (permalink / raw)
  To: linux-ia64

>PERCPU_MCA_SIZE doesn't look likely to be a power of two, so using it
>as the 'align' argument to __alloc_bootmem trips the BUG at 
>mm/bootmem.c:158

When I "s/PERCPU_MCA_SIZE/PAGE_SIZE/", the tiger_defconfig kernel
boots quite nicely.

I'm too tired to look at what the alignment requirements for this
allocation are right now ... probably just "sizeof (long)"?

-Tony

^ permalink raw reply	[flat|nested] 12+ messages in thread

* RE: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (7 preceding siblings ...)
  2005-01-07  7:08 ` Luck, Tony
@ 2005-01-07 17:47 ` Bjorn Helgaas
  2005-01-07 18:56 ` Luck, Tony
  2005-01-07 19:28 ` Russ Anderson
  10 siblings, 0 replies; 12+ messages in thread
From: Bjorn Helgaas @ 2005-01-07 17:47 UTC (permalink / raw)
  To: linux-ia64

On Thu, 2005-01-06 at 22:51 -0800, Luck, Tony wrote:
> >An updated patch that applies on Jack's and builds with zx1 & tiger UP.
> >The SGI UP is previously broke.  Jesse is working on that.
> 
> Build for tiger_defconfig looked pretty clean, but it doesn't boot :-(
> 
> No messages to console or serial console.  Peering into memory, I see
> this in __log_buf:

Most people don't have this mystical ability to peer into
memory :-).  You *should* be able to get this information
easily from a serial console with "console=uart,io,0x2f8".

(For some reason tiger hides ttyS0 (0x3f8) somewhere, and
the port on the back panel is actually ttyS1 (0x2f8)).


^ permalink raw reply	[flat|nested] 12+ messages in thread

* RE: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (8 preceding siblings ...)
  2005-01-07 17:47 ` Bjorn Helgaas
@ 2005-01-07 18:56 ` Luck, Tony
  2005-01-07 19:28 ` Russ Anderson
  10 siblings, 0 replies; 12+ messages in thread
From: Luck, Tony @ 2005-01-07 18:56 UTC (permalink / raw)
  To: linux-ia64

[-- Attachment #1: Type: text/plain, Size: 457 bytes --]

>+               mca_data = __alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS,
>+                                          PERCPU_MCA_SIZE, __pa(MAX_DMA_ADDRESS));
>
>PERCPU_MCA_SIZE doesn't look likely to be a power of two, so using it
>as the 'align' argument to __alloc_bootmem trips the BUG at mm/bootmem.c:158

I just switched the allocation to use alloc_bootmem() which uses the default
cache line alignment, which appears to be plenty here.

-Tony

[-- Attachment #2: contig.patch --]
[-- Type: application/octet-stream, Size: 675 bytes --]

===== arch/ia64/mm/contig.c 1.10 vs edited =====
--- 1.10/arch/ia64/mm/contig.c	2005-01-06 16:21:22 -08:00
+++ edited/arch/ia64/mm/contig.c	2005-01-07 10:39:38 -08:00
@@ -189,8 +189,7 @@
 	if (smp_processor_id() == 0) {
 		cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
 					   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-		mca_data = __alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS,
-					   PERCPU_MCA_SIZE, __pa(MAX_DMA_ADDRESS));
+		mca_data = alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS);
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
 			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [patch] per cpu MCA/INIT fixes.
  2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
                   ` (9 preceding siblings ...)
  2005-01-07 18:56 ` Luck, Tony
@ 2005-01-07 19:28 ` Russ Anderson
  10 siblings, 0 replies; 12+ messages in thread
From: Russ Anderson @ 2005-01-07 19:28 UTC (permalink / raw)
  To: linux-ia64

Tony Luck wrote:
> 
> >+               mca_data = __alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS,
> >+                                          PERCPU_MCA_SIZE, > __pa(MAX_DMA_ADDRESS));
> >
> >PERCPU_MCA_SIZE doesn't look likely to be a power of two, so using it
> >as the 'align' argument to __alloc_bootmem trips the BUG at 
> mm/bootmem.c:158
> 
> I just switched the allocation to use alloc_bootmem() which uses the default
> cache line alignment, which appears to be plenty here.

Thanks Tony.

Here is an updated patch with the change

------------------------------------------------------------------------------------
Index: linux/include/asm-ia64/mca.h
=================================--- linux.orig/include/asm-ia64/mca.h	2005-01-06 13:16:41.149172632 -0600
+++ linux/include/asm-ia64/mca.h	2005-01-07 13:21:53.237844196 -0600
@@ -108,12 +108,14 @@
 
 typedef struct ia64_mca_cpu_s {
 	u64	ia64_mca_stack[IA64_MCA_STACK_SIZE] 		__attribute__((aligned(16)));
-	u64	ia64_mca_proc_state_dump[512] 			__attribute__((aligned(16)));
+	u64	ia64_mca_proc_state_dump[512]			__attribute__((aligned(16)));
 	u64	ia64_mca_stackframe[32]				__attribute__((aligned(16)));
-	u64	ia64_mca_bspstore[IA64_MCA_BSPSTORE_SIZE] 	__attribute__((aligned(16)));
-	u64	ia64_init_stack[KERNEL_STACK_SIZE] 		__attribute__((aligned(16)));
+	u64	ia64_mca_bspstore[IA64_MCA_BSPSTORE_SIZE]	__attribute__((aligned(16)));
+	u64	ia64_init_stack[KERNEL_STACK_SIZE/8] 		__attribute__((aligned(16)));
 } ia64_mca_cpu_t;
 
+#define PERCPU_MCA_SIZE sizeof(ia64_mca_cpu_t)
+
 extern void ia64_mca_init(void);
 extern void ia64_os_mca_dispatch(void);
 extern void ia64_os_mca_dispatch_end(void);
Index: linux/arch/ia64/mm/init.c
=================================--- linux.orig/arch/ia64/mm/init.c	2005-01-06 13:16:41.150149091 -0600
+++ linux/arch/ia64/mm/init.c	2005-01-06 13:19:59.042306891 -0600
@@ -287,12 +287,32 @@
 	ia64_patch_gate();
 }
 
+void
+set_mca_pointer(struct cpuinfo_ia64 *cpuinfo, void *cpu_data)
+{
+	void *my_cpu_data = ia64_imva(cpu_data);
+
+        /*
+         * The MCA info structure was allocated earlier and a physical address pointer
+         * saved in __per_cpu_mca[cpu].  Move that pointer into the cpuinfo structure.
+         */
+
+        cpuinfo->ia64_pa_mca_data = (__u64 *)__per_cpu_mca[smp_processor_id()];
+
+        cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
+        ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(cpuinfo));
+
+        /*
+         * Set pal_base and pal_paddr in cpuinfo structure.
+         */
+        efi_get_pal_addr();
+}
+
 void __devinit
 ia64_mmu_init (void *my_cpu_data)
 {
 	unsigned long psr, pta, impl_va_bits;
 	extern void __devinit tlb_init (void);
-	struct cpuinfo_ia64 *cpuinfo;
 
 #ifdef CONFIG_DISABLE_VHPT
 #	define VHPT_ENABLE_BIT	0
@@ -357,22 +377,6 @@
 	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
 	ia64_srlz_d();
 #endif
-
-	/*
-	 * The MCA info structure was allocated earlier and a physical address pointer
-	 * saved in k3.  Move that pointer into the cpuinfo structure and save
-	 * the physical address of the cpuinfo structure in k3.
-	 */
-	cpuinfo = (struct cpuinfo_ia64 *)my_cpu_data;
-	cpuinfo->ia64_pa_mca_data = (__u64 *)ia64_get_kr(IA64_KR_PA_CPU_INFO);
-
-	cpuinfo->percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
-	ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(my_cpu_data));
-
-	/*
-	 * Set pal_base and pal_paddr in cpuinfo structure.
-	 */
-	efi_get_pal_addr();
 }
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
Index: linux/arch/ia64/mm/discontig.c
=================================--- linux.orig/arch/ia64/mm/discontig.c	2005-01-06 13:16:56.624098123 -0600
+++ linux/arch/ia64/mm/discontig.c	2005-01-07 13:18:23.878154485 -0600
@@ -350,12 +350,12 @@
 		mem_data[node].node_data = __va(pernode);
 		pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
 
-		mca_data_phys = (void *)pernode;
-		pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
-
 		mem_data[node].pgdat->bdata = bdp;
 		pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
 
+		mca_data_phys = (void *)pernode;
+		pernode += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t)) * phys_cpus;
+
 		/*
 		 * Copy the static per-cpu data into the region we
 		 * just set aside and then setup __per_cpu_offset
@@ -370,13 +370,11 @@
 					 * The memory for the cpuinfo structure is allocated
 					 * here, but the data in the structure is initialized
 					 * later.  Save the physical address of the MCA save
-					 * area in IA64_KR_PA_CPU_INFO.  When the cpuinfo struct 
-					 * is initialized, the value in IA64_KR_PA_CPU_INFO
-					 * will be put in the cpuinfo structure and 
-					 * IA64_KR_PA_CPU_INFO will be set to the physical
-					 * addresss of the cpuinfo structure.
+					 * area in __per_cpu_mca[cpu].  When the cpuinfo struct 
+					 * is initialized, the value in __per_cpu_mca[cpu]
+					 * will be put in the cpuinfo structure.
 					 */
-					ia64_set_kr(IA64_KR_PA_CPU_INFO, __pa(mca_data_phys));
+					__per_cpu_mca[cpu] = __pa(mca_data_phys);
 					mca_data_phys += L1_CACHE_ALIGN(sizeof(ia64_mca_cpu_t));
 				}
 				__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
Index: linux/arch/ia64/kernel/efi.c
=================================--- linux.orig/arch/ia64/kernel/efi.c	2005-01-06 13:16:41.170654735 -0600
+++ linux/arch/ia64/kernel/efi.c	2005-01-06 13:19:59.056953780 -0600
@@ -414,15 +414,15 @@
  * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor
  * Abstraction Layer chapter 11 in ADAG
  */
-void
-efi_map_pal_code (void)
+
+static efi_memory_desc_t *
+pal_code_memdesc (void)
 {
 	void *efi_map_start, *efi_map_end, *p;
 	efi_memory_desc_t *md;
 	u64 efi_desc_size;
 	int pal_code_count = 0;
-	u64 mask, psr;
-	u64 vaddr;
+	u64 vaddr, mask;
 
 	efi_map_start = __va(ia64_boot_param->efi_memmap);
 	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
@@ -466,91 +466,58 @@
 		if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
 			panic("Woah!  PAL code size bigger than a granule!");
 
-		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
 #if EFI_DEBUG
+		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
 		printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
-		       smp_processor_id(), md->phys_addr,
-		       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
-		       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+			smp_processor_id(), md->phys_addr,
+			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+			vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
 #endif
-
-		/*
-		 * Cannot write to CRx with PSR.ic=1
-		 */
-		psr = ia64_clear_ic();
-		ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
-			 pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
-			 IA64_GRANULE_SHIFT);
-		ia64_set_psr(psr);		/* restore psr */
-		ia64_srlz_i();
-
+		return md;
 	}
+
+	return NULL;
 }
 
-/* 
- * Put pal_base and pal_paddr in the cpuinfo structure.
- */
 void
-efi_get_pal_addr(void)
+efi_get_pal_addr (void)
 {
-	void *efi_map_start, *efi_map_end, *p;
-	efi_memory_desc_t *md;
-	u64 efi_desc_size;
-	int pal_code_count = 0;
-	u64 mask;
-	u64 vaddr;
+	efi_memory_desc_t *md = pal_code_memdesc();
+	u64 vaddr, mask;
 	struct cpuinfo_ia64 *cpuinfo;
 
-	efi_map_start = __va(ia64_boot_param->efi_memmap);
-	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-	efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-		md = p;
-		if (md->type != EFI_PAL_CODE)
-			continue;
+	if (md != NULL) {
 
-		if (++pal_code_count > 1) {
-			printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
-			       md->phys_addr);
-			continue;
-		}
-		/*
-		 * The only ITLB entry in region 7 that is used is the one installed by
-		 * __start().  That entry covers a 64MB range.
-		 */
-		mask  = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
 		vaddr = PAGE_OFFSET + md->phys_addr;
-
-		/*
-		 * We must check that the PAL mapping won't overlap with the kernel
-		 * mapping.
-		 *
-		 * PAL code is guaranteed to be aligned on a power of 2 between 4k and
-		 * 256KB and that only one ITR is needed to map it. This implies that the
-		 * PAL code is always aligned on its size, i.e., the closest matching page
-		 * size supported by the TLB. Therefore PAL code is guaranteed never to
-		 * cross a 64MB unless it is bigger than 64MB (very unlikely!).  So for
-		 * now the following test is enough to determine whether or not we need a
-		 * dedicated ITR for the PAL code.
-		 */
-		if ((vaddr & mask) = (KERNEL_START & mask)) {
-			printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
-			       __FUNCTION__);
-			continue;
-		}
-
-		if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
-			panic("Woah!  PAL code size bigger than a granule!");
-
 		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
 
-		/* insert this TR into our list for MCA recovery purposes */
 		cpuinfo = (struct cpuinfo_ia64 *)__va(ia64_get_kr(IA64_KR_PA_CPU_INFO));
 		cpuinfo->pal_base = vaddr & mask;
 		cpuinfo->pal_paddr = pte_val(mk_pte_phys(md->phys_addr, PAGE_KERNEL));
-		printk(KERN_INFO "CPU %d: late efi pal_base 0x%lx pal_paddr 0x%lx\n",
-			smp_processor_id(), cpuinfo->pal_base, cpuinfo->pal_paddr);
+	}
+}
+
+void
+efi_map_pal_code (void)
+{
+	efi_memory_desc_t *md = pal_code_memdesc();
+	u64 vaddr, mask, psr;
+
+	if (md != NULL) {
+
+		vaddr = PAGE_OFFSET + md->phys_addr;
+		mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
+
+		/*
+		 * Cannot write to CRx with PSR.ic=1
+		 */
+		psr = ia64_clear_ic();
+		ia64_itr(0x1, IA64_TR_PALCODE, vaddr & mask,
+			pte_val(pfn_pte(md->phys_addr >> PAGE_SHIFT, PAGE_KERNEL)),
+			IA64_GRANULE_SHIFT);
+		ia64_set_psr(psr);		/* restore psr */
+		ia64_srlz_i();
 	}
 }
 
Index: linux/arch/ia64/kernel/setup.c
=================================--- linux.orig/arch/ia64/kernel/setup.c	2005-01-06 13:16:41.171631194 -0600
+++ linux/arch/ia64/kernel/setup.c	2005-01-06 15:24:29.152848789 -0600
@@ -60,6 +60,7 @@
 unsigned long __per_cpu_offset[NR_CPUS];
 EXPORT_SYMBOL(__per_cpu_offset);
 #endif
+unsigned long __per_cpu_mca[NR_CPUS];
 
 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
@@ -574,6 +575,7 @@
 cpu_init (void)
 {
 	extern void __devinit ia64_mmu_init (void *);
+	extern void set_mca_pointer (struct cpuinfo_ia64 *, void *);
 	unsigned long num_phys_stacked;
 	pal_vm_info_2_u_t vmi;
 	unsigned int max_ctx;
@@ -628,6 +630,7 @@
 		BUG();
 
 	ia64_mmu_init(ia64_imva(cpu_data));
+	set_mca_pointer(cpu_info, cpu_data);
 
 #ifdef CONFIG_IA32_SUPPORT
 	ia32_cpu_init();
Index: linux/arch/ia64/mm/contig.c
=================================--- linux.orig/arch/ia64/mm/contig.c	2005-01-06 13:16:41.151125551 -0600
+++ linux/arch/ia64/mm/contig.c	2005-01-07 13:18:02.943846575 -0600
@@ -24,6 +24,7 @@
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/sections.h>
+#include <asm/mca.h>
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 static unsigned long num_dma_physpages;
@@ -177,7 +178,7 @@
 void *
 per_cpu_init (void)
 {
-	void *cpu_data;
+	void *cpu_data, *mca_data;
 	int cpu;
 
 	/*
@@ -188,13 +189,17 @@
 	if (smp_processor_id() = 0) {
 		cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
 					   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+		mca_data = alloc_bootmem(PERCPU_MCA_SIZE * NR_CPUS),
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
 			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
 			cpu_data += PERCPU_PAGE_SIZE;
 			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+			__per_cpu_mca[cpu] = (unsigned long)mca_data;
+			mca_data += PERCPU_MCA_SIZE;
 		}
 	}
+
 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 }
 #endif /* CONFIG_SMP */
Index: linux/include/asm-ia64/percpu.h
=================================--- linux.orig/include/asm-ia64/percpu.h	2005-01-06 13:16:41.149172632 -0600
+++ linux/include/asm-ia64/percpu.h	2005-01-06 15:25:19.889677562 -0600
@@ -56,6 +56,8 @@
 
 #endif	/* SMP */
 
+extern unsigned long __per_cpu_mca[NR_CPUS];
+
 #define EXPORT_PER_CPU_SYMBOL(var)		EXPORT_SYMBOL(per_cpu__##var)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var)		EXPORT_SYMBOL_GPL(per_cpu__##var)
 
Index: linux/arch/ia64/kernel/mca_asm.S
=================================--- linux.orig/arch/ia64/kernel/mca_asm.S	2005-01-06 13:16:41.171631194 -0600
+++ linux/arch/ia64/kernel/mca_asm.S	2005-01-06 13:19:59.070624210 -0600
@@ -147,7 +147,6 @@
 	GET_PERCPU_PADDR(r2)	// paddr of percpu_paddr in cpuinfo struct
 	;;
 	mov	r17=r2
-	mov 	r23=r2		// save current ia64_mca_percpu_info addr pointer.
 	;;
 	adds r17=8,r17
 	;;
@@ -247,7 +246,9 @@
 	srlz.d
 	;;
 	// 2. Reload DTR register for PERCPU data.
-	mov r17=r23
+	GET_PERCPU_PADDR(r2)		// paddr of percpu_paddr in cpuinfo struct
+	;;
+	mov r17=r2
 	movl r16=PERCPU_ADDR		// vaddr
 	movl r18=PERCPU_PAGE_SHIFT<<2
 	;;
@@ -262,7 +263,9 @@
 	srlz.d
 	;;
 	// 3. Reload ITR for PAL code.
-	adds r17@,r23
+	GET_CPUINFO_PAL_PADDR(r2)	// paddr of pal_paddr in cpuinfo struct
+	;;
+	mov r17=r2
 	;;
 	ld8 r18=[r17],8			// pte
 	;;
Index: linux/arch/ia64/kernel/asm-offsets.c
=================================--- linux.orig/arch/ia64/kernel/asm-offsets.c	2005-01-06 13:16:41.171631194 -0600
+++ linux/arch/ia64/kernel/asm-offsets.c	2005-01-06 16:02:33.841535054 -0600
@@ -205,6 +205,7 @@
 	BLANK();
 	/* used by arch/ia64/kernel/mca_asm.S */
 	DEFINE(IA64_CPUINFO_PERCPU_PADDR, offsetof (struct cpuinfo_ia64, percpu_paddr));
+	DEFINE(IA64_CPUINFO_PAL_PADDR, offsetof (struct cpuinfo_ia64, pal_paddr));
 	DEFINE(IA64_CPUINFO_PA_MCA_INFO, offsetof (struct cpuinfo_ia64, ia64_pa_mca_data));
 	DEFINE(IA64_MCA_PROC_STATE_DUMP, offsetof (struct ia64_mca_cpu_s, ia64_mca_proc_state_dump));
 	DEFINE(IA64_MCA_STACK, offsetof (struct ia64_mca_cpu_s, ia64_mca_stack));
Index: linux/include/asm-ia64/mca_asm.h
=================================--- linux.orig/include/asm-ia64/mca_asm.h	2005-01-06 13:16:41.150149091 -0600
+++ linux/include/asm-ia64/mca_asm.h	2005-01-06 13:19:59.072577129 -0600
@@ -53,6 +53,10 @@
 	mov	reg	= ar.k3;;						\
 	addl	reg	= IA64_CPUINFO_PERCPU_PADDR,reg
 
+#define GET_CPUINFO_PAL_PADDR(reg)						\
+	mov	reg	= ar.k3;;						\
+	addl	reg	= IA64_CPUINFO_PAL_PADDR,reg
+
 /*
  * This macro gets the physical address of this cpu's MCA save structure.
  */

-- 
Russ Anderson, OS RAS/Partitioning Project Lead  
SGI - Silicon Graphics Inc          rja@sgi.com

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2005-01-07 19:28 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-01-06  3:08 [patch] per cpu MCA/INIT fixes Russ Anderson
2005-01-06 17:14 ` Jesse Barnes
2005-01-06 18:21 ` Russ Anderson
2005-01-06 18:28 ` Luck, Tony
2005-01-06 18:36 ` Jesse Barnes
2005-01-06 20:50 ` Russ Anderson
2005-01-06 23:17 ` Russ Anderson
2005-01-07  6:51 ` Luck, Tony
2005-01-07  7:08 ` Luck, Tony
2005-01-07 17:47 ` Bjorn Helgaas
2005-01-07 18:56 ` Luck, Tony
2005-01-07 19:28 ` Russ Anderson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox