* [PATCH] Use SLB size from the device tree
@ 2007-11-08 23:40 Michael Neuling
2007-11-09 0:16 ` Olof Johansson
2007-12-06 3:32 ` Paul Mackerras
0 siblings, 2 replies; 5+ messages in thread
From: Michael Neuling @ 2007-11-08 23:40 UTC (permalink / raw)
To: paulus; +Cc: Olof Johansson, linuxppc-dev, Will Schmidt
Currently we hardwire the number of SLBs but the PAPR says we export an
ibm,slb-size property to specify the number of SLB entries. This patch
uses this property instead of assuming 64 always. If no property is
found, we assume 64 entries as before.
This soft patches the SLB handler, so it won't change performance at
all.
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
Paulus: for your 2.6.25 tree.
Olof: this touches the pasemi code, but I've not tested with one.
Will: this will interact with your SLB xmon patch, but is easy to
resolve.
arch/powerpc/kernel/prom.c | 11 +++++++++++
arch/powerpc/mm/hash_utils_64.c | 1 +
arch/powerpc/mm/slb.c | 3 +++
arch/powerpc/mm/slb_low.S | 5 +++--
arch/powerpc/platforms/pasemi/setup.c | 3 ++-
arch/powerpc/xmon/xmon.c | 2 +-
include/asm-powerpc/mmu-hash64.h | 1 +
include/asm-powerpc/reg.h | 6 ------
8 files changed, 22 insertions(+), 10 deletions(-)
Index: linux-2.6-ozlabs/arch/powerpc/kernel/prom.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/prom.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/prom.c
@@ -583,6 +583,16 @@ static void __init check_cpu_pa_features
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
+static void __init check_cpu_slb_size(unsigned long node)
+{
+ u32 *slb_size_ptr;
+
+ slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+ if (slb_size_ptr != NULL) {
+ mmu_slb_size = *slb_size_ptr;
+ }
+}
+
static struct feature_property {
const char *name;
u32 min_value;
@@ -701,6 +711,7 @@ static int __init early_init_dt_scan_cpu
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
+ check_cpu_slb_size(node);
#ifdef CONFIG_PPC_PSERIES
if (nthreads > 1)
Index: linux-2.6-ozlabs/arch/powerpc/mm/hash_utils_64.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/mm/hash_utils_64.c
+++ linux-2.6-ozlabs/arch/powerpc/mm/hash_utils_64.c
@@ -95,6 +95,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
+u16 mmu_slb_size = 64;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
Index: linux-2.6-ozlabs/arch/powerpc/mm/slb.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/mm/slb.c
+++ linux-2.6-ozlabs/arch/powerpc/mm/slb.c
@@ -255,6 +255,7 @@ void slb_initialize(void)
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
+ extern unsigned int *slb_compare_rr_to_size;
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -268,6 +269,8 @@ void slb_initialize(void)
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_io,
SLB_VSID_KERNEL | io_llp);
+ patch_slb_encoding(slb_compare_rr_to_size,
+ mmu_slb_size);
DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: io LLP = %04x\n", io_llp);
Index: linux-2.6-ozlabs/arch/powerpc/mm/slb_low.S
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/mm/slb_low.S
+++ linux-2.6-ozlabs/arch/powerpc/mm/slb_low.S
@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISER
7: ld r10,PACASTABRR(r13)
addi r10,r10,1
- /* use a cpu feature mask if we ever change our slb size */
- cmpldi r10,SLB_NUM_ENTRIES
+ /* This gets soft patched on boot. */
+_GLOBAL(slb_compare_rr_to_size)
+ cmpldi r10,0
blt+ 4f
li r10,SLB_NUM_BOLTED
Index: linux-2.6-ozlabs/arch/powerpc/platforms/pasemi/setup.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/platforms/pasemi/setup.c
+++ linux-2.6-ozlabs/arch/powerpc/platforms/pasemi/setup.c
@@ -36,6 +36,7 @@
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/of_platform.h>
+#include <asm/mmu.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
@@ -295,7 +296,7 @@ static int pas_machine_check_handler(str
int i;
printk(KERN_ERR "slb contents:\n");
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
Index: linux-2.6-ozlabs/arch/powerpc/xmon/xmon.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/xmon/xmon.c
+++ linux-2.6-ozlabs/arch/powerpc/xmon/xmon.c
@@ -2531,7 +2531,7 @@ static void dump_slb(void)
printf("SLB contents of cpu %x\n", smp_processor_id());
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
printf("%02d %016lx ", i, tmp);
Index: linux-2.6-ozlabs/include/asm-powerpc/mmu-hash64.h
===================================================================
--- linux-2.6-ozlabs.orig/include/asm-powerpc/mmu-hash64.h
+++ linux-2.6-ozlabs/include/asm-powerpc/mmu-hash64.h
@@ -180,6 +180,7 @@ extern int mmu_vmalloc_psize;
extern int mmu_io_psize;
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
+extern u16 mmu_slb_size;
/*
* If the processor supports 64k normal pages but not 64k cache
Index: linux-2.6-ozlabs/include/asm-powerpc/reg.h
===================================================================
--- linux-2.6-ozlabs.orig/include/asm-powerpc/reg.h
+++ linux-2.6-ozlabs/include/asm-powerpc/reg.h
@@ -695,12 +695,6 @@
#define PV_BE 0x0070
#define PV_PA6T 0x0090
-/*
- * Number of entries in the SLB. If this ever changes we should handle
- * it with a use a cpu feature fixup.
- */
-#define SLB_NUM_ENTRIES 64
-
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] Use SLB size from the device tree
2007-11-08 23:40 [PATCH] Use SLB size from the device tree Michael Neuling
@ 2007-11-09 0:16 ` Olof Johansson
2007-12-06 3:32 ` Paul Mackerras
1 sibling, 0 replies; 5+ messages in thread
From: Olof Johansson @ 2007-11-09 0:16 UTC (permalink / raw)
To: Michael Neuling; +Cc: linuxppc-dev, paulus, Will Schmidt
On Fri, Nov 09, 2007 at 10:40:18AM +1100, Michael Neuling wrote:
> Currently we hardwire the number of SLBs but the PAPR says we export an
> ibm,slb-size property to specify the number of SLB entries. This patch
> uses this property instead of assuming 64 always. If no property is
> found, we assume 64 entries as before.
>
> This soft patches the SLB handler, so it won't change performance at
> all.
>
> Signed-off-by: Michael Neuling <mikey@neuling.org>
Acked-by: Olof Johansson <olof@lixom.net>
I wonder if it isn't time soon to move all the mmu_.* globals to a
struct. That's a separate issue though.
-Olof
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] Use SLB size from the device tree
2007-11-08 23:40 [PATCH] Use SLB size from the device tree Michael Neuling
2007-11-09 0:16 ` Olof Johansson
@ 2007-12-06 3:32 ` Paul Mackerras
2007-12-06 6:24 ` Michael Neuling
1 sibling, 1 reply; 5+ messages in thread
From: Paul Mackerras @ 2007-12-06 3:32 UTC (permalink / raw)
To: Michael Neuling; +Cc: Olof Johansson, linuxppc-dev, Will Schmidt
Michael Neuling writes:
> Currently we hardwire the number of SLBs but the PAPR says we export an
> ibm,slb-size property to specify the number of SLB entries. This patch
> uses this property instead of assuming 64 always. If no property is
> found, we assume 64 entries as before.
On 32-bit platforms (e.g. powermac) I get:
/home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c: In function 'check_cpu_slb_size':
/home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c:592: error: 'mmu_slb_size' undeclared (first use in this function)
/home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c:592: error: (Each undeclared identifier is reported only once
/home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c:592: error: for each function it appears in.)
make[2]: *** [arch/powerpc/kernel/prom.o] Error 1
Paul.
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH] Use SLB size from the device tree
2007-12-06 3:32 ` Paul Mackerras
@ 2007-12-06 6:24 ` Michael Neuling
2007-12-06 21:22 ` [PATCH v3] update xmon slb code Will Schmidt
0 siblings, 1 reply; 5+ messages in thread
From: Michael Neuling @ 2007-12-06 6:24 UTC (permalink / raw)
To: Paul Mackerras; +Cc: Olof Johansson, linuxppc-dev, Will Schmidt
Currently we hardwire the number of SLBs but PAPR says we export an
ibm,slb-size property to specify the number of SLB entries. This
patch uses this property instead of assuming 64. If no property is
found, we assume 64 entries as before.
This soft patches the SLB handler, so it won't change performance at
all.
Signed-off-by: Michael Neuling <mikey@neuling.org>
---
> On 32-bit platforms (e.g. powermac) I get:
>
> /home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c: In function 'check_cp
u_slb_size':
> /home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c:592: error: 'mmu_slb_s
ize' undeclared (first use in this function)
> /home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c:592: error: (Each unde
clared identifier is reported only once
> /home/paulus/kernel/powerpc/arch/powerpc/kernel/prom.c:592: error: for each f
unction it appears in.)
> make[2]: *** [arch/powerpc/kernel/prom.o] Error 1
Here's a nickel kid, buy yourself another 32bits! :-)
Or in other words.... Sorry about that! Below should fix it.
arch/powerpc/kernel/prom.c | 15 +++++++++++++++
arch/powerpc/mm/hash_utils_64.c | 1 +
arch/powerpc/mm/slb.c | 3 +++
arch/powerpc/mm/slb_low.S | 5 +++--
arch/powerpc/platforms/pasemi/setup.c | 3 ++-
arch/powerpc/xmon/xmon.c | 2 +-
include/asm-powerpc/mmu-hash64.h | 1 +
include/asm-powerpc/reg.h | 6 ------
8 files changed, 26 insertions(+), 10 deletions(-)
Index: linux-2.6-ozlabs/arch/powerpc/kernel/prom.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/kernel/prom.c
+++ linux-2.6-ozlabs/arch/powerpc/kernel/prom.c
@@ -583,6 +583,20 @@ static void __init check_cpu_pa_features
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
+#ifdef CONFIG_PPC64
+static void __init check_cpu_slb_size(unsigned long node)
+{
+ u32 *slb_size_ptr;
+
+ slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+ if (slb_size_ptr != NULL) {
+ mmu_slb_size = *slb_size_ptr;
+ }
+}
+#else
+#define check_cpu_slb_size(node) do { } while(0)
+#endif
+
static struct feature_property {
const char *name;
u32 min_value;
@@ -713,6 +727,7 @@ static int __init early_init_dt_scan_cpu
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
+ check_cpu_slb_size(node);
#ifdef CONFIG_PPC_PSERIES
if (nthreads > 1)
Index: linux-2.6-ozlabs/arch/powerpc/mm/hash_utils_64.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/mm/hash_utils_64.c
+++ linux-2.6-ozlabs/arch/powerpc/mm/hash_utils_64.c
@@ -96,6 +96,7 @@ int mmu_vmalloc_psize = MMU_PAGE_4K;
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
+u16 mmu_slb_size = 64;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
Index: linux-2.6-ozlabs/arch/powerpc/mm/slb.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/mm/slb.c
+++ linux-2.6-ozlabs/arch/powerpc/mm/slb.c
@@ -256,6 +256,7 @@ void slb_initialize(void)
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
+ extern unsigned int *slb_compare_rr_to_size;
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -269,6 +270,8 @@ void slb_initialize(void)
SLB_VSID_KERNEL | linear_llp);
patch_slb_encoding(slb_miss_kernel_load_io,
SLB_VSID_KERNEL | io_llp);
+ patch_slb_encoding(slb_compare_rr_to_size,
+ mmu_slb_size);
DBG("SLB: linear LLP = %04x\n", linear_llp);
DBG("SLB: io LLP = %04x\n", io_llp);
Index: linux-2.6-ozlabs/arch/powerpc/mm/slb_low.S
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/mm/slb_low.S
+++ linux-2.6-ozlabs/arch/powerpc/mm/slb_low.S
@@ -227,8 +227,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISER
7: ld r10,PACASTABRR(r13)
addi r10,r10,1
- /* use a cpu feature mask if we ever change our slb size */
- cmpldi r10,SLB_NUM_ENTRIES
+ /* This gets soft patched on boot. */
+_GLOBAL(slb_compare_rr_to_size)
+ cmpldi r10,0
blt+ 4f
li r10,SLB_NUM_BOLTED
Index: linux-2.6-ozlabs/arch/powerpc/platforms/pasemi/setup.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/platforms/pasemi/setup.c
+++ linux-2.6-ozlabs/arch/powerpc/platforms/pasemi/setup.c
@@ -36,6 +36,7 @@
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/of_platform.h>
+#include <asm/mmu.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
@@ -295,7 +296,7 @@ static int pas_machine_check_handler(str
int i;
printk(KERN_ERR "slb contents:\n");
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
printk(KERN_ERR "%02d %016lx %016lx\n", i, e, v);
Index: linux-2.6-ozlabs/arch/powerpc/xmon/xmon.c
===================================================================
--- linux-2.6-ozlabs.orig/arch/powerpc/xmon/xmon.c
+++ linux-2.6-ozlabs/arch/powerpc/xmon/xmon.c
@@ -2543,7 +2543,7 @@ static void dump_slb(void)
printf("SLB contents of cpu %x\n", smp_processor_id());
- for (i = 0; i < SLB_NUM_ENTRIES; i++) {
+ for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
printf("%02d %016lx ", i, tmp);
Index: linux-2.6-ozlabs/include/asm-powerpc/mmu-hash64.h
===================================================================
--- linux-2.6-ozlabs.orig/include/asm-powerpc/mmu-hash64.h
+++ linux-2.6-ozlabs/include/asm-powerpc/mmu-hash64.h
@@ -180,6 +180,7 @@ extern int mmu_vmalloc_psize;
extern int mmu_io_psize;
extern int mmu_kernel_ssize;
extern int mmu_highuser_ssize;
+extern u16 mmu_slb_size;
/*
* If the processor supports 64k normal pages but not 64k cache
Index: linux-2.6-ozlabs/include/asm-powerpc/reg.h
===================================================================
--- linux-2.6-ozlabs.orig/include/asm-powerpc/reg.h
+++ linux-2.6-ozlabs/include/asm-powerpc/reg.h
@@ -691,12 +691,6 @@
#define PV_BE 0x0070
#define PV_PA6T 0x0090
-/*
- * Number of entries in the SLB. If this ever changes we should handle
- * it with a use a cpu feature fixup.
- */
-#define SLB_NUM_ENTRIES 64
-
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v3] update xmon slb code.
2007-12-06 6:24 ` Michael Neuling
@ 2007-12-06 21:22 ` Will Schmidt
0 siblings, 0 replies; 5+ messages in thread
From: Will Schmidt @ 2007-12-06 21:22 UTC (permalink / raw)
To: Michael Neuling; +Cc: Olof Johansson, linuxppc-dev, Paul Mackerras
[powerpc] update xmon slb code
This adds a bit more detail to the xmon SLB output. When the valid bit is
set, This displays the ESID and VSID values, as well as decoding the
segment size, (1T or 256M) and displaying the LLP bits. This supresses the
output for any slb entries that contain only zeros.
sample output from power6 (1T segment support):
00 c000000008000000 40004f7ca3000500 1T ESID= c00000 VSID= 4f7ca3 LLP:100
01 d000000008000000 4000eb71b0000400 1T ESID= d00000 VSID= eb71b0 LLP: 0
08 0000000018000000 0000c8499f8ccc80 256M ESID= 1 VSID= c8499f8cc LLP: 0
09 00000000f8000000 0000d2c1a8e46c80 256M ESID= f VSID= d2c1a8e46 LLP: 0
10 0000000048000000 0000ca87eab1dc80 256M ESID= 4 VSID= ca87eab1d LLP: 0
43 cf00000008000000 400011b260000500 1T ESID= cf0000 VSID= 11b260 LLP:100
sample output from power5 (notice the non-valid but non-zero entries)
10 0000000008000000 00004fd0e077ac80 256M ESID= 0 VSID= 4fd0e077a LLP: 0
11 00000000f8000000 00005b085830fc80 256M ESID= f VSID= 5b085830f LLP: 0
12 0000000048000000 000052ce99fe6c80 256M ESID= 4 VSID= 52ce99fe6 LLP: 0
13 0000000018000000 000050904ed95c80 256M ESID= 1 VSID= 50904ed95 LLP: 0
14 cf00000008000000 0000d59aca40f500 256M ESID=cf0000000 VSID= d59aca40f LLP:100
15 c000000078000000 000045cb97751500 256M ESID=c00000007 VSID= 45cb97751 LLP:100
Tested on power5 and power6.
Signed-Off-By: Will Schmidt <will_schmidt@vnet.ibm.com>
---
This is a resend.. this latest respin is updated to apply on top of Mikeys slb_mmu_size change.
(earlier Updates made per comments from Olof and Ben and Paul).
This version adds padding around the ESID and VSID fields, and the LLP bits
are displayed too.
Counting bits, the VSID output looks to be as large as 51 bits, which requires
up to 13 spaces. This doesnt count the B field bits which are now masked off
the top end of the VSID output.
I'll try to follow up sometime later with code that will handle decoding page
sizes. I dont have a testcase handy to properly exercise that yet. :-)
---
arch/powerpc/xmon/xmon.c | 29 +++++++++++++++++++++++------
1 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 121b04d..5314db7 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2539,16 +2539,33 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
static void dump_slb(void)
{
int i;
- unsigned long tmp;
+ unsigned long esid,vsid,valid;
+ unsigned long llp;
printf("SLB contents of cpu %x\n", smp_processor_id());
for (i = 0; i < mmu_slb_size; i++) {
- asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
- printf("%02d %016lx ", i, tmp);
-
- asm volatile("slbmfev %0,%1" : "=r" (tmp) : "r" (i));
- printf("%016lx\n", tmp);
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+ valid = (esid & SLB_ESID_V);
+ if (valid | esid | vsid) {
+ printf("%02d %016lx %016lx", i, esid, vsid);
+ if (valid) {
+ llp = vsid & SLB_VSID_LLP;
+ if (vsid & SLB_VSID_B_1T) {
+ printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n",
+ GET_ESID_1T(esid),
+ (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T,
+ llp);
+ } else {
+ printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n",
+ GET_ESID(esid),
+ (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT,
+ llp);
+ }
+ } else
+ printf("\n");
+ }
}
}
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2007-12-06 21:22 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-11-08 23:40 [PATCH] Use SLB size from the device tree Michael Neuling
2007-11-09 0:16 ` Olof Johansson
2007-12-06 3:32 ` Paul Mackerras
2007-12-06 6:24 ` Michael Neuling
2007-12-06 21:22 ` [PATCH v3] update xmon slb code Will Schmidt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).