* [Patch 3/3] Allocate mca/init stacks as part of per_node reservation.
@ 2008-02-13 17:55 holt
2008-02-21 21:08 ` holt
` (4 more replies)
0 siblings, 5 replies; 6+ messages in thread
From: holt @ 2008-02-13 17:55 UTC (permalink / raw)
To: linux-ia64
This patch will reserve space on the node at the same time as per_cpu
space is reserved. By the time the for_each_possible_cpu() cpumask is
initialized, we will have already reserved and initialized the mca and
init stack areas for every possible cpu.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Russ Anderson <rja@sgi.com>
---
Index: per_cpu_mca_v1/arch/ia64/mm/discontig.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/mm/discontig.c 2008-02-13 11:53:55.695414286 -0600
+++ per_cpu_mca_v1/arch/ia64/mm/discontig.c 2008-02-13 11:53:55.875436635 -0600
@@ -27,6 +27,7 @@
#include <asm/meminit.h>
#include <asm/numa.h>
#include <asm/sections.h>
+#include <asm/mca.h>
/*
* Track per-node information needed to setup the boot memory allocator, the
@@ -125,11 +126,38 @@ static unsigned long __meminit compute_p
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
+ pernodesize = ALIGN(pernodesize, KERNEL_STACK_SIZE);
+ pernodesize += sizeof(struct ia64_mca_cpu) * cpus;
pernodesize = PAGE_ALIGN(pernodesize);
return pernodesize;
}
/**
+ * per_node_mca_setup - setup mca areas on each node for its cpus
+ * @mca_area: mca area on this node
+ * @node: node to setup
+ */
+static unsigned long per_node_mca_setup(unsigned long mca_area, int node)
+{
+#ifdef CONFIG_SMP
+ int cpu;
+
+ for_each_possible_early_cpu(cpu) {
+ if (node = node_cpuid[cpu].nid) {
+ __per_cpu_mca[cpu] = mca_area;
+ mca_area += sizeof(struct ia64_mca_cpu);
+ }
+ }
+#else
+ if (node = 0) {
+ __per_cpu_mca[0] = mca_area;
+ mca_area += sizeof(struct ia64_mca_cpu);
+ }
+#endif
+ return mca_area;
+}
+
+/**
* per_cpu_node_setup - setup per-cpu areas on each node
* @cpu_data: per-cpu area on this node
* @node: node to setup
@@ -186,6 +214,10 @@ static void __init fill_pernode(int node
pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
+ pernode = ALIGN(pernode, KERNEL_STACK_SIZE);
+
+ pernode = per_node_mca_setup(pernode, node);
+
cpu_data = per_cpu_node_setup(cpu_data, node);
return;
Index: per_cpu_mca_v1/arch/ia64/mm/contig.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/mm/contig.c 2008-02-13 11:53:32.424525355 -0600
+++ per_cpu_mca_v1/arch/ia64/mm/contig.c 2008-02-13 11:53:55.887438125 -0600
@@ -182,8 +182,26 @@ alloc_per_cpu_data(void)
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
+
+static inline void
+alloc_mca_data(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ __per_cpu_mca[cpu] = __pa(__alloc_bootmem(
+ sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0));
+ }
+}
#else
#define alloc_per_cpu_data() do { } while (0)
+
+static inline void
+alloc_mca_data(void)
+{
+ __per_cpu_mca[0] = __pa(__alloc_bootmem(
+ sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0));
+}
#endif /* CONFIG_SMP */
/**
@@ -223,6 +241,8 @@ find_memory (void)
find_initrd();
alloc_per_cpu_data();
+
+ alloc_mca_data();
}
static int
Index: per_cpu_mca_v1/arch/ia64/kernel/mca.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/kernel/mca.c 2008-02-13 11:53:32.424525355 -0600
+++ per_cpu_mca_v1/arch/ia64/kernel/mca.c 2008-02-13 11:53:55.907440608 -0600
@@ -1759,45 +1759,26 @@ format_mca_init_stack(void *mca_data, un
strncpy(p->comm, type, sizeof(p->comm)-1);
}
-/* Caller prevents this from being called after init */
-static void * __init_refok mca_bootmem(void)
-{
- return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
-}
-
/* Do per-CPU MCA-related initialization. */
void __cpuinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
void *data;
- long sz = sizeof(struct ia64_mca_cpu);
int cpu = smp_processor_id();
- static int first_time = 1;
/*
* Structure will already be allocated if cpu has been online,
* then offlined.
*/
- if (__per_cpu_mca[cpu]) {
- data = __va(__per_cpu_mca[cpu]);
- } else {
- if (first_time) {
- data = mca_bootmem();
- first_time = 0;
- } else
- data = page_address(alloc_pages_node(numa_node_id(),
- GFP_KERNEL, get_order(sz)));
- if (!data)
- panic("Could not allocate MCA memory for cpu %d\n",
- cpu);
- }
+ BUG_ON(__per_cpu_mca[cpu] = 0);
+ data = __va(__per_cpu_mca[cpu]);
+
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
"MCA", cpu);
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
"INIT", cpu);
- __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
+ __get_cpu_var(ia64_mca_data) = __pa(data);
/*
* Stash away a copy of the PTE needed to map the per-CPU page.
--
^ permalink raw reply [flat|nested] 6+ messages in thread
* [Patch 3/3] Allocate mca/init stacks as part of per_node reservation.
2008-02-13 17:55 [Patch 3/3] Allocate mca/init stacks as part of per_node reservation holt
@ 2008-02-21 21:08 ` holt
2008-03-14 21:18 ` Luck, Tony
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: holt @ 2008-02-21 21:08 UTC (permalink / raw)
To: linux-ia64
This patch will reserve space on the node at the same time as per_cpu
space is reserved. By the time the for_each_possible_cpu() cpumask is
initialized, we will have already reserved and initialized the mca and
init stack areas for every possible cpu.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Russ Anderson <rja@sgi.com>
---
Index: per_cpu_mca_v1/arch/ia64/mm/discontig.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/mm/discontig.c 2008-02-13 11:53:55.695414286 -0600
+++ per_cpu_mca_v1/arch/ia64/mm/discontig.c 2008-02-13 11:53:55.875436635 -0600
@@ -27,6 +27,7 @@
#include <asm/meminit.h>
#include <asm/numa.h>
#include <asm/sections.h>
+#include <asm/mca.h>
/*
* Track per-node information needed to setup the boot memory allocator, the
@@ -125,11 +126,38 @@ static unsigned long __meminit compute_p
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
+ pernodesize = ALIGN(pernodesize, KERNEL_STACK_SIZE);
+ pernodesize += sizeof(struct ia64_mca_cpu) * cpus;
pernodesize = PAGE_ALIGN(pernodesize);
return pernodesize;
}
/**
+ * per_node_mca_setup - setup mca areas on each node for its cpus
+ * @mca_area: mca area on this node
+ * @node: node to setup
+ */
+static unsigned long per_node_mca_setup(unsigned long mca_area, int node)
+{
+#ifdef CONFIG_SMP
+ int cpu;
+
+ for_each_possible_early_cpu(cpu) {
+ if (node = node_cpuid[cpu].nid) {
+ __per_cpu_mca[cpu] = mca_area;
+ mca_area += sizeof(struct ia64_mca_cpu);
+ }
+ }
+#else
+ if (node = 0) {
+ __per_cpu_mca[0] = mca_area;
+ mca_area += sizeof(struct ia64_mca_cpu);
+ }
+#endif
+ return mca_area;
+}
+
+/**
* per_cpu_node_setup - setup per-cpu areas on each node
* @cpu_data: per-cpu area on this node
* @node: node to setup
@@ -186,6 +214,10 @@ static void __init fill_pernode(int node
pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
+ pernode = ALIGN(pernode, KERNEL_STACK_SIZE);
+
+ pernode = per_node_mca_setup(pernode, node);
+
cpu_data = per_cpu_node_setup(cpu_data, node);
return;
Index: per_cpu_mca_v1/arch/ia64/mm/contig.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/mm/contig.c 2008-02-13 11:53:32.424525355 -0600
+++ per_cpu_mca_v1/arch/ia64/mm/contig.c 2008-02-13 11:53:55.887438125 -0600
@@ -182,8 +182,26 @@ alloc_per_cpu_data(void)
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
+
+static inline void
+alloc_mca_data(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ __per_cpu_mca[cpu] = __pa(__alloc_bootmem(
+ sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0));
+ }
+}
#else
#define alloc_per_cpu_data() do { } while (0)
+
+static inline void
+alloc_mca_data(void)
+{
+ __per_cpu_mca[0] = __pa(__alloc_bootmem(
+ sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0));
+}
#endif /* CONFIG_SMP */
/**
@@ -223,6 +241,8 @@ find_memory (void)
find_initrd();
alloc_per_cpu_data();
+
+ alloc_mca_data();
}
static int
Index: per_cpu_mca_v1/arch/ia64/kernel/mca.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/kernel/mca.c 2008-02-13 11:53:32.424525355 -0600
+++ per_cpu_mca_v1/arch/ia64/kernel/mca.c 2008-02-13 11:53:55.907440608 -0600
@@ -1759,45 +1759,26 @@ format_mca_init_stack(void *mca_data, un
strncpy(p->comm, type, sizeof(p->comm)-1);
}
-/* Caller prevents this from being called after init */
-static void * __init_refok mca_bootmem(void)
-{
- return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
-}
-
/* Do per-CPU MCA-related initialization. */
void __cpuinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
void *data;
- long sz = sizeof(struct ia64_mca_cpu);
int cpu = smp_processor_id();
- static int first_time = 1;
/*
* Structure will already be allocated if cpu has been online,
* then offlined.
*/
- if (__per_cpu_mca[cpu]) {
- data = __va(__per_cpu_mca[cpu]);
- } else {
- if (first_time) {
- data = mca_bootmem();
- first_time = 0;
- } else
- data = page_address(alloc_pages_node(numa_node_id(),
- GFP_KERNEL, get_order(sz)));
- if (!data)
- panic("Could not allocate MCA memory for cpu %d\n",
- cpu);
- }
+ BUG_ON(__per_cpu_mca[cpu] = 0);
+ data = __va(__per_cpu_mca[cpu]);
+
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
"MCA", cpu);
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
"INIT", cpu);
- __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
+ __get_cpu_var(ia64_mca_data) = __pa(data);
/*
* Stash away a copy of the PTE needed to map the per-CPU page.
--
^ permalink raw reply [flat|nested] 6+ messages in thread
* RE: [Patch 3/3] Allocate mca/init stacks as part of per_node reservation.
2008-02-13 17:55 [Patch 3/3] Allocate mca/init stacks as part of per_node reservation holt
2008-02-21 21:08 ` holt
@ 2008-03-14 21:18 ` Luck, Tony
2008-03-14 23:56 ` Luck, Tony
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Luck, Tony @ 2008-03-14 21:18 UTC (permalink / raw)
To: linux-ia64
Index: per_cpu_mca_v1/arch/ia64/mm/contig.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/mm/contig.c 2008-02-13 11:53:32.424525355 -0600
+++ per_cpu_mca_v1/arch/ia64/mm/contig.c 2008-02-13 11:53:55.887438125 -0600
@@ -182,8 +182,26 @@ alloc_per_cpu_data(void)
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
+
+static inline void
+alloc_mca_data(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ __per_cpu_mca[cpu] = __pa(__alloc_bootmem(
+ sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0));
+ }
+}
I applied this series ... and everything built cleanly. But when I
try to boot my Tiger (built from tiger_defconfig) it hangs quite
early in boot (last message is "Boot processor id 0x0/0xc128").
I threw some printk() into the above function, and it seems
that in the contig.c case the possible cpu map isn't initialized
early enough. The loop here just allocates mca_data for cpu 0
and no others.
-Tony
^ permalink raw reply [flat|nested] 6+ messages in thread
* RE: [Patch 3/3] Allocate mca/init stacks as part of per_node reservation.
2008-02-13 17:55 [Patch 3/3] Allocate mca/init stacks as part of per_node reservation holt
2008-02-21 21:08 ` holt
2008-03-14 21:18 ` Luck, Tony
@ 2008-03-14 23:56 ` Luck, Tony
2008-03-17 12:12 ` holt
2008-03-17 12:24 ` [Patch 3/3] Allocate mca/init stacks as part of per_node Robin Holt
4 siblings, 0 replies; 6+ messages in thread
From: Luck, Tony @ 2008-03-14 23:56 UTC (permalink / raw)
To: linux-ia64
Perhaps we should leave mca.c using alloc_pages_node()
for cpu!=0 and just have the mm/{contig.c,discontig.c}
do a pre-allocation for cpu0 only. In contig.c you'd
just use the !SMP version of alloc_mca_data() for all
cases. Changes to discontig.c are left as an exercise
to the reader. Patch for mca.c looks like this:
--- a/arch/ia64/kernel/mca.c 2008-03-14 16:51:10.000000000 -0700
+++ b/arch/ia64/kernel/mca.c 2008-03-14 16:50:35.000000000 -0700
@@ -1756,13 +1756,6 @@
strncpy(p->comm, type, sizeof(p->comm)-1);
}
-/* Caller prevents this from being called after init */
-static void * __init_refok mca_bootmem(void)
-{
- return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
-}
-
/* Do per-CPU MCA-related initialization. */
void __cpuinit
ia64_mca_cpu_init(void *cpu_data)
@@ -1771,20 +1764,15 @@
void *data;
long sz = sizeof(struct ia64_mca_cpu);
int cpu = smp_processor_id();
- static int first_time = 1;
/*
* Structure will already be allocated if cpu has been online,
- * then offlined.
+ * then offlined. Cpu 0 is pre-allocated using bootmem.
*/
if (__per_cpu_mca[cpu]) {
data = __va(__per_cpu_mca[cpu]);
} else {
- if (first_time) {
- data = mca_bootmem();
- first_time = 0;
- } else
- data = page_address(alloc_pages_node(numa_node_id(),
+ data = page_address(alloc_pages_node(numa_node_id(),
GFP_KERNEL, get_order(sz)));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
^ permalink raw reply [flat|nested] 6+ messages in thread
* [Patch 3/3] Allocate mca/init stacks as part of per_node reservation.
2008-02-13 17:55 [Patch 3/3] Allocate mca/init stacks as part of per_node reservation holt
` (2 preceding siblings ...)
2008-03-14 23:56 ` Luck, Tony
@ 2008-03-17 12:12 ` holt
2008-03-17 12:24 ` [Patch 3/3] Allocate mca/init stacks as part of per_node Robin Holt
4 siblings, 0 replies; 6+ messages in thread
From: holt @ 2008-03-17 12:12 UTC (permalink / raw)
To: linux-ia64
This patch will reserve space on the node at the same time as per_cpu
space is reserved. By the time the for_each_possible_cpu() cpumask is
initialized, we will have already reserved and initialized the mca and
init stack areas for every possible cpu.
Signed-off-by: Robin Holt <holt@sgi.com>
Signed-off-by: Russ Anderson <rja@sgi.com>
---
Index: per_cpu_mca_v1/arch/ia64/mm/discontig.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/mm/discontig.c 2008-03-17 07:02:33.085979563 -0500
+++ per_cpu_mca_v1/arch/ia64/mm/discontig.c 2008-03-17 07:03:13.386995549 -0500
@@ -27,6 +27,7 @@
#include <asm/meminit.h>
#include <asm/numa.h>
#include <asm/sections.h>
+#include <asm/mca.h>
/*
* Track per-node information needed to setup the boot memory allocator, the
@@ -127,11 +128,38 @@ static unsigned long __meminit compute_p
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
+ pernodesize = ALIGN(pernodesize, KERNEL_STACK_SIZE);
+ pernodesize += sizeof(struct ia64_mca_cpu) * cpus;
pernodesize = PAGE_ALIGN(pernodesize);
return pernodesize;
}
/**
+ * per_node_mca_setup - setup mca areas on each node for its cpus
+ * @mca_area: mca area on this node
+ * @node: node to setup
+ */
+static unsigned long per_node_mca_setup(unsigned long mca_area, int node)
+{
+#ifdef CONFIG_SMP
+ int cpu;
+
+ for_each_possible_early_cpu(cpu) {
+ if (node = node_cpuid[cpu].nid) {
+ __per_cpu_mca[cpu] = mca_area;
+ mca_area += sizeof(struct ia64_mca_cpu);
+ }
+ }
+#else
+ if (node = 0) {
+ __per_cpu_mca[0] = mca_area;
+ mca_area += sizeof(struct ia64_mca_cpu);
+ }
+#endif
+ return mca_area;
+}
+
+/**
* per_cpu_node_setup - setup per-cpu areas on each node
* @cpu_data: per-cpu area on this node
* @node: node to setup
@@ -188,6 +216,10 @@ static void __init fill_pernode(int node
pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
+ pernode = ALIGN(pernode, KERNEL_STACK_SIZE);
+
+ pernode = per_node_mca_setup(pernode, node);
+
cpu_data = per_cpu_node_setup(cpu_data, node);
return;
Index: per_cpu_mca_v1/arch/ia64/mm/contig.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/mm/contig.c 2008-03-17 07:02:33.085979563 -0500
+++ per_cpu_mca_v1/arch/ia64/mm/contig.c 2008-03-17 07:03:28.716903531 -0500
@@ -184,8 +184,26 @@ alloc_per_cpu_data(void)
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
+
+static inline void
+alloc_mca_data(void)
+{
+ int cpu;
+
+ for_each_possible_early_cpu(cpu) {
+ __per_cpu_mca[cpu] = __pa(__alloc_bootmem(
+ sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0));
+ }
+}
#else
#define alloc_per_cpu_data() do { } while (0)
+
+static inline void
+alloc_mca_data(void)
+{
+ __per_cpu_mca[0] = __pa(__alloc_bootmem(
+ sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0));
+}
#endif /* CONFIG_SMP */
/**
@@ -225,6 +243,8 @@ find_memory (void)
find_initrd();
alloc_per_cpu_data();
+
+ alloc_mca_data();
}
static int
Index: per_cpu_mca_v1/arch/ia64/kernel/mca.c
=================================--- per_cpu_mca_v1.orig/arch/ia64/kernel/mca.c 2008-03-17 07:02:33.085979563 -0500
+++ per_cpu_mca_v1/arch/ia64/kernel/mca.c 2008-03-17 07:03:13.427000528 -0500
@@ -1759,45 +1759,26 @@ format_mca_init_stack(void *mca_data, un
strncpy(p->comm, type, sizeof(p->comm)-1);
}
-/* Caller prevents this from being called after init */
-static void * __init_refok mca_bootmem(void)
-{
- return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
-}
-
/* Do per-CPU MCA-related initialization. */
void __cpuinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
void *data;
- long sz = sizeof(struct ia64_mca_cpu);
int cpu = smp_processor_id();
- static int first_time = 1;
/*
* Structure will already be allocated if cpu has been online,
* then offlined.
*/
- if (__per_cpu_mca[cpu]) {
- data = __va(__per_cpu_mca[cpu]);
- } else {
- if (first_time) {
- data = mca_bootmem();
- first_time = 0;
- } else
- data = page_address(alloc_pages_node(numa_node_id(),
- GFP_KERNEL, get_order(sz)));
- if (!data)
- panic("Could not allocate MCA memory for cpu %d\n",
- cpu);
- }
+ BUG_ON(__per_cpu_mca[cpu] = 0);
+ data = __va(__per_cpu_mca[cpu]);
+
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
"MCA", cpu);
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
"INIT", cpu);
- __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
+ __get_cpu_var(ia64_mca_data) = __pa(data);
/*
* Stash away a copy of the PTE needed to map the per-CPU page.
--
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Patch 3/3] Allocate mca/init stacks as part of per_node
2008-02-13 17:55 [Patch 3/3] Allocate mca/init stacks as part of per_node reservation holt
` (3 preceding siblings ...)
2008-03-17 12:12 ` holt
@ 2008-03-17 12:24 ` Robin Holt
4 siblings, 0 replies; 6+ messages in thread
From: Robin Holt @ 2008-03-17 12:24 UTC (permalink / raw)
To: linux-ia64
On Fri, Mar 14, 2008 at 04:56:26PM -0700, Luck, Tony wrote:
> Perhaps we should leave mca.c using alloc_pages_node()
> for cpu!=0 and just have the mm/{contig.c,discontig.c}
> do a pre-allocation for cpu0 only. In contig.c you'd
> just use the !SMP version of alloc_mca_data() for all
> cases. Changes to discontig.c are left as an exercise
> to the reader. Patch for mca.c looks like this:
I fixed up the -V7 patch as, what I intended to be, -V8 RFC, but I
rushed the send. Could you test that patch instead? It does keep the
mca stacks being allocated by the early arch init instead of mca.c.
Thanks,
Robin
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2008-03-17 12:24 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-02-13 17:55 [Patch 3/3] Allocate mca/init stacks as part of per_node reservation holt
2008-02-21 21:08 ` holt
2008-03-14 21:18 ` Luck, Tony
2008-03-14 23:56 ` Luck, Tony
2008-03-17 12:12 ` holt
2008-03-17 12:24 ` [Patch 3/3] Allocate mca/init stacks as part of per_node Robin Holt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox