public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86-64: fix build with older binutils
@ 2009-05-05 13:05 Jan Beulich
  2009-05-05 16:39 ` Sam Ravnborg
  0 siblings, 1 reply; 5+ messages in thread
From: Jan Beulich @ 2009-05-05 13:05 UTC (permalink / raw)
  To: mingo, tglx, hpa; +Cc: linux-kernel

Impact: build fix

binutils prior to 2.17 can't deal with the currently possible situation
of a new segment following the per-CPU segment, but that new segment
being empty - objcopy misplaces the .bss (and perhaps also the .brk)
sections outside of any segment. However, the current ordering of
sections really just appears to be the effect of cumulative unrelated
changes; re-ordering things allows to easily guarantee that the segment
following the per-CPU one is non-empty, and at once eliminates the need
for the bogus data.init2 segment.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

---
 arch/x86/kernel/vmlinux_64.lds.S |   91 +++++++++++++++++----------------------
 1 file changed, 41 insertions(+), 50 deletions(-)

--- linux-2.6.30-rc4/arch/x86/kernel/vmlinux_64.lds.S	2009-04-30 09:42:42.000000000 +0200
+++ 2.6.30-rc4-x86_64-link-order/arch/x86/kernel/vmlinux_64.lds.S	2009-04-29 15:21:20.000000000 +0200
@@ -18,11 +18,10 @@ PHDRS {
 	text PT_LOAD FLAGS(5);	/* R_E */
 	data PT_LOAD FLAGS(7);	/* RWE */
 	user PT_LOAD FLAGS(7);	/* RWE */
-	data.init PT_LOAD FLAGS(7);	/* RWE */
 #ifdef CONFIG_SMP
 	percpu PT_LOAD FLAGS(7);	/* RWE */
 #endif
-	data.init2 PT_LOAD FLAGS(7);	/* RWE */
+	init PT_LOAD FLAGS(7);	/* RWE */
 	note PT_NOTE FLAGS(0);	/* ___ */
 }
 SECTIONS
@@ -54,30 +53,31 @@ SECTIONS
   	__stop___ex_table = .;
   } :text = 0x9090
 
-  RODATA
+  RO_DATA(PAGE_SIZE)
 
-  . = ALIGN(PAGE_SIZE);		/* Align data segment to page size boundary */
 				/* Data */
   .data : AT(ADDR(.data) - LOAD_OFFSET) {
-	DATA_DATA
-	CONSTRUCTORS
-	_edata = .;			/* End of data section */
-	} :data
-
+	. = ALIGN(THREAD_SIZE);	/* init_task */
+	*(.data.init_task)
 
-  .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
 	. = ALIGN(PAGE_SIZE);
+	*(.data.page_aligned)
+
 	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
 	*(.data.cacheline_aligned)
-  }
-  . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
-  .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+
+	DATA_DATA
+	CONSTRUCTORS
+
+	. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
   	*(.data.read_mostly)
-  }
+
+	_edata = .;			/* End of data section */
+  } :data
 
 #define VSYSCALL_ADDR (-10*1024*1024)
-#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
-#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
+#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
+#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
 
 #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
@@ -125,34 +125,24 @@ SECTIONS
 #undef VVIRT_OFFSET
 #undef VVIRT
 
-  .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
-	. = ALIGN(THREAD_SIZE);	/* init_task */
-	*(.data.init_task)
-  }:data.init
-
-  .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
-	. = ALIGN(PAGE_SIZE);
-	*(.data.page_aligned)
-  }
-
-  .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
-	/* might get freed after init */
-	. = ALIGN(PAGE_SIZE);
-	__smp_alt_begin = .;
-	__smp_locks = .;
-	*(.smp_locks)
-	__smp_locks_end = .;
-	. = ALIGN(PAGE_SIZE);
-	__smp_alt_end = .;
-  }
-
   . = ALIGN(PAGE_SIZE);		/* Init code and data */
   __init_begin = .;	/* paired with __init_end */
+
+#ifdef CONFIG_SMP
+  /*
+   * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
+   * output PHDR, so the next output section - .init.text - should
+   * start another segment - init.
+   */
+  . = ALIGN(PAGE_SIZE);
+  PERCPU_VADDR(0, :percpu)
+#endif
+
   .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
 	_sinittext = .;
 	INIT_TEXT
 	_einittext = .;
-  }
+  } :init
   .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
 	__initdata_begin = .;
 	INIT_DATA
@@ -216,30 +206,31 @@ SECTIONS
   }
 #endif
 
-#ifdef CONFIG_SMP
-  /*
-   * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
-   * output PHDR, so the next output section - __data_nosave - should
-   * start another section data.init2.  Also, pda should be at the head of
-   * percpu area.  Preallocate it and define the percpu offset symbol
-   * so that it can be accessed as a percpu variable.
-   */
-  . = ALIGN(PAGE_SIZE);
-  PERCPU_VADDR(0, :percpu)
-#else
+#ifndef CONFIG_SMP
   PERCPU(PAGE_SIZE)
 #endif
 
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
 
+  .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+	/* might get freed after init */
+	. = ALIGN(PAGE_SIZE);
+	__smp_alt_begin = .;
+	__smp_locks = .;
+	*(.smp_locks)
+	__smp_locks_end = .;
+	. = ALIGN(PAGE_SIZE);
+	__smp_alt_end = .;
+  }
+
   .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
 	. = ALIGN(PAGE_SIZE);
 	__nosave_begin = .;
 	*(.data.nosave)
 	. = ALIGN(PAGE_SIZE);
 	__nosave_end = .;
-  } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
+  }
 
   .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
 	. = ALIGN(PAGE_SIZE);



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86-64: fix build with older binutils
  2009-05-05 13:05 [PATCH] x86-64: fix build with older binutils Jan Beulich
@ 2009-05-05 16:39 ` Sam Ravnborg
  2009-05-06  6:59   ` Jan Beulich
  0 siblings, 1 reply; 5+ messages in thread
From: Sam Ravnborg @ 2009-05-05 16:39 UTC (permalink / raw)
  To: Jan Beulich; +Cc: mingo, tglx, hpa, linux-kernel

Hi Jan.

> Impact: build fix
> 
> binutils prior to 2.17 can't deal with the currently possible situation
> of a new segment following the per-CPU segment, but that new segment
> being empty - objcopy misplaces the .bss (and perhaps also the .brk)
> sections outside of any segment. However, the current ordering of
> sections really just appears to be the effect of cumulative unrelated
> changes; re-ordering things allows to easily guarantee that the segment
> following the per-CPU one is non-empty, and at once eliminates the need
> for the bogus data.init2 segment.
> 
> Signed-off-by: Jan Beulich <jbeulich@novell.com>

I like the way your patch simplies the linker scrip for x86.
We have recently merged the linker scripts for 32 and 64 bit,
and I tried to apply your patch on top of the unified version.
(Applied manually obviously).

With a 64 bit defconfig build I got:
/home/sam/x-tools/x86_64-unknown-linux-gnu/bin/x86_64-unknown-linux-gnu-ld: section .vsyscall_0 [00000000016c6000 -> 00000000016c60e7] overlaps section .init.rodata [00000000016c5a00 -> 00000000016c6348]
make[1]: *** [.tmp_vmlinux1] Error 1

I did not try to build a kernel with your original patch.

Can you spot anything obvious wrong in my patch.
It is on top of x86/kbuild in -tip.

	Sam

diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 4c85b2e..477fa48 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -46,11 +46,10 @@ PHDRS {
 	data PT_LOAD FLAGS(7);          /* RWE */
 #ifdef CONFIG_X86_64
 	user PT_LOAD FLAGS(7);          /* RWE */
-	data.init PT_LOAD FLAGS(7);     /* RWE */
 #ifdef CONFIG_SMP
 	percpu PT_LOAD FLAGS(7);        /* RWE */
 #endif
-	data.init2 PT_LOAD FLAGS(7);    /* RWE */
+	init PT_LOAD FLAGS(7);          /* RWE */
 #endif
 	note PT_NOTE FLAGS(0);          /* ___ */
 }
@@ -103,70 +102,58 @@ SECTIONS
 		__stop___ex_table = .;
 	} :text = 0x9090
 
-	RODATA
+	RO_DATA(PAGE_SIZE)
 
 	/* Data */
 	. = ALIGN(PAGE_SIZE);
 	.data : AT(ADDR(.data) - LOAD_OFFSET) {
-		DATA_DATA
-		CONSTRUCTORS
-
-#ifdef CONFIG_X86_64
-		/* End of data section */
-		_edata = .;
-#endif
-	} :data
+		/* init_task */
+		. = ALIGN(THREAD_SIZE);
+		*(.data.init_task)
 
-#ifdef CONFIG_X86_32
-	/* 32 bit has nosave before _edata */
-	. = ALIGN(PAGE_SIZE);
-	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
-		__nosave_begin = .;
-		*(.data.nosave)
 		. = ALIGN(PAGE_SIZE);
-		__nosave_end = .;
-	}
-#endif
-
-	. = ALIGN(PAGE_SIZE);
-	.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
 		*(.data.page_aligned)
-		*(.data.idt)
-	}
 
 #ifdef CONFIG_X86_32
-	. = ALIGN(32);
+		. = ALIGN(32);
 #else
-	. = ALIGN(PAGE_SIZE);
-	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+		. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
 #endif
-	.data.cacheline_aligned :
-		AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
 		*(.data.cacheline_aligned)
-	}
 
-	/* rarely changed data like cpu maps */
+		DATA_DATA
+		CONSTRUCTORS
+
+		/* rarely changed data like cpu maps */
 #ifdef CONFIG_X86_32
-	. = ALIGN(32);
+		. = ALIGN(32);
 #else
-	. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
+		. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
 #endif
-	.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
 		*(.data.read_mostly)
 
-#ifdef CONFIG_X86_32
 		/* End of data section */
 		_edata = .;
-#endif
+	} :data
+
+#ifdef CONFIG_X86_32
+	/* 32 bit has nosave before _edata */
+	. = ALIGN(PAGE_SIZE);
+	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+		__nosave_begin = .;
+		*(.data.nosave)
+		. = ALIGN(PAGE_SIZE);
+		__nosave_end = .;
 	}
+#endif
 
 #ifdef CONFIG_X86_64
 
 #define VSYSCALL_ADDR (-10*1024*1024)
-#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
-                            SIZEOF(.data.read_mostly) + 4095) & ~(4095))
-#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
-                            SIZEOF(.data.read_mostly) + 4095) & ~(4095))
+#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + \
+                            SIZEOF(.data) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
+#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + \
+                            SIZEOF(.data) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
 
 #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
 #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
@@ -232,35 +219,31 @@ SECTIONS
 
 #endif /* CONFIG_X86_64 */
 
-	/* init_task */
-	. = ALIGN(THREAD_SIZE);
-	.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
-		*(.data.init_task)
+	/* will be freed after init - paired with __init_end */
+	.init.start : AT(ADDR(.init.start) - LOAD_OFFSET) {
+		. = ALIGN(PAGE_SIZE);
+		__init_begin = .;
 	}
-#ifdef CONFIG_X86_64
-	 :data.init
-#endif
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
 	/*
-	 * smp_locks might be freed after init
-	 * start/end must be page aligned
+	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
+	 * output PHDR, so the next output section - .init.text - should
+	 * start another segment - init.
 	 */
 	. = ALIGN(PAGE_SIZE);
-	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
-		__smp_locks = .;
-		*(.smp_locks)
-		__smp_locks_end = .;
-		. = ALIGN(PAGE_SIZE);
-	}
+	PERCPU_VADDR(0, :percpu)
+#endif
 
-	/* Init code and data - will be freed after init */
-	. = ALIGN(PAGE_SIZE);
 	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
-		__init_begin = .; /* paired with __init_end */
 		_sinittext = .;
 		INIT_TEXT
 		_einittext = .;
 	}
+#ifdef CONFIG_X86_64
+	 :init
+#endif
+
 
 	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
 		INIT_DATA
@@ -331,17 +314,7 @@ SECTIONS
 	}
 #endif
 
-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
-	/*
-	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
-	 * output PHDR, so the next output section - __data_nosave - should
-	 * start another section data.init2.  Also, pda should be at the head of
-	 * percpu area.  Preallocate it and define the percpu offset symbol
-	 * so that it can be accessed as a percpu variable.
-	 */
-	. = ALIGN(PAGE_SIZE);
-	PERCPU_VADDR(0, :percpu)
-#else
+#if defined(CONFIG_X86_32) || !defined(CONFIG_SMP)
 	PERCPU(PAGE_SIZE)
 #endif
 
@@ -352,6 +325,18 @@ SECTIONS
 		__init_end = .;
 	}
 
+	/*
+	 * smp_locks might be freed after init
+	 * start/end must be page aligned
+	 */
+	. = ALIGN(PAGE_SIZE);
+	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+		__smp_locks = .;
+		*(.smp_locks)
+		__smp_locks_end = .;
+		. = ALIGN(PAGE_SIZE);
+	}
+
 #ifdef CONFIG_X86_64
 	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
 		. = ALIGN(PAGE_SIZE);
@@ -359,8 +344,7 @@ SECTIONS
 		*(.data.nosave)
 		. = ALIGN(PAGE_SIZE);
 		__nosave_end = .;
-	} :data.init2
-	/* use another section data.init2, see PERCPU_VADDR() above */
+	}
 #endif
 
 	/* BSS */

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86-64: fix build with older binutils
  2009-05-05 16:39 ` Sam Ravnborg
@ 2009-05-06  6:59   ` Jan Beulich
  2009-05-06 12:42     ` Sam Ravnborg
  2009-05-08  4:40     ` H. Peter Anvin
  0 siblings, 2 replies; 5+ messages in thread
From: Jan Beulich @ 2009-05-06  6:59 UTC (permalink / raw)
  To: Sam Ravnborg; +Cc: mingo, tglx, linux-kernel, hpa

>>> Sam Ravnborg <sam@ravnborg.org> 05.05.09 18:39 >>>
>I like the way your patch simplies the linker scrip for x86.
>We have recently merged the linker scripts for 32 and 64 bit,
>and I tried to apply your patch on top of the unified version.
>(Applied manually obviously).
>
>With a 64 bit defconfig build I got:
>/home/sam/x-tools/x86_64-unknown-linux-gnu/bin/x86_64-unknown-linux-gnu-ld: section .vsyscall_0 [00000000016c6000 -> >00000000016c60e7] overlaps section .init.rodata [00000000016c5a00 -> 00000000016c6348]
>make[1]: *** [.tmp_vmlinux1] Error 1
>
>I did not try to build a kernel with your original patch.
>
>Can you spot anything obvious wrong in my patch.
>It is on top of x86/kbuild in -tip.

Yeah, the placement of .init.start appears to be wrong - it should the
SMP/x86-64 case of the per-CPU segment, and it should also be the
one getting the :init attached. Hopefully that won't get us back to the
binutils problem I was originally encountering - what is the extra .init.start
section good for anyway? And why does __init_end continue to live
outside of any section (this sort of thing causes problems with
CONFIG_RELOCATABLE and older binutils afair, as such symbols get
marked absolute by the latter)? While this was the case (and a mistake)
for x86-64 prior to the merge, it was properly placed in a section for
ix86, and hence I'd view it as a regression there.

Btw., why is .data.nosave being placed differently for 32- and 64-bit?

Jan


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86-64: fix build with older binutils
  2009-05-06  6:59   ` Jan Beulich
@ 2009-05-06 12:42     ` Sam Ravnborg
  2009-05-08  4:40     ` H. Peter Anvin
  1 sibling, 0 replies; 5+ messages in thread
From: Sam Ravnborg @ 2009-05-06 12:42 UTC (permalink / raw)
  To: Jan Beulich; +Cc: mingo, tglx, linux-kernel, hpa

Hi Jan.

On Wed, May 06, 2009 at 07:59:04AM +0100, Jan Beulich wrote:
> >>> Sam Ravnborg <sam@ravnborg.org> 05.05.09 18:39 >>>
> >I like the way your patch simplies the linker scrip for x86.
> >We have recently merged the linker scripts for 32 and 64 bit,
> >and I tried to apply your patch on top of the unified version.
> >(Applied manually obviously).
> >
> >With a 64 bit defconfig build I got:
> >/home/sam/x-tools/x86_64-unknown-linux-gnu/bin/x86_64-unknown-linux-gnu-ld: section .vsyscall_0 [00000000016c6000 -> >00000000016c60e7] overlaps section .init.rodata [00000000016c5a00 -> 00000000016c6348]
> >make[1]: *** [.tmp_vmlinux1] Error 1
> >
> >I did not try to build a kernel with your original patch.
> >
> >Can you spot anything obvious wrong in my patch.
> >It is on top of x86/kbuild in -tip.
> 
> Yeah, the placement of .init.start appears to be wrong - it should the
> SMP/x86-64 case of the per-CPU segment, and it should also be the
> one getting the :init attached. Hopefully that won't get us back to the
> binutils problem I was originally encountering - what is the extra .init.start
> section good for anyway?

.init.start is there so __init_begin is not an absolute symbol to support
a relocable kernel.
I named the section .init.stat because we have the

    PERCPU_VADDR(0, :percpu)

stuff inbetween this and .init.text



> And why does __init_end continue to live
> outside of any section (this sort of thing causes problems with
> CONFIG_RELOCATABLE and older binutils afair, as such symbols get
> marked absolute by the latter)?

Talking about this snippet where __init_end is inside
a .init.end output section.

        /* freed after init ends here */
        .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
                __init_end = .;
        }

This was copied from 32 bit verbatim.

I would love to stick it inside one of the already defined sections.
But to minimize chrunch when we merged I kept the 32 bit layout.

> Btw., why is .data.nosave being placed differently for 32- and 64-bit?

A merge artifact - again to try to minimize changes - plus the fact that
on 64 we added it to a specific segment whereas we did not do so in 32 bit.

And I'm yet to fully understand the use of segments so I am reluctant
to touch it for now.

If you feel inclined it would be great if you could give the merged
linker script an overhaul. There is obviously stuff to be simplified and
I have limited time to do so atm.

	Sam

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86-64: fix build with older binutils
  2009-05-06  6:59   ` Jan Beulich
  2009-05-06 12:42     ` Sam Ravnborg
@ 2009-05-08  4:40     ` H. Peter Anvin
  1 sibling, 0 replies; 5+ messages in thread
From: H. Peter Anvin @ 2009-05-08  4:40 UTC (permalink / raw)
  To: Jan Beulich; +Cc: Sam Ravnborg, mingo, tglx, linux-kernel

Jan Beulich wrote:
> 
> Yeah, the placement of .init.start appears to be wrong - it should the
> SMP/x86-64 case of the per-CPU segment, and it should also be the
> one getting the :init attached. Hopefully that won't get us back to the
> binutils problem I was originally encountering - what is the extra .init.start
> section good for anyway? And why does __init_end continue to live
> outside of any section (this sort of thing causes problems with
> CONFIG_RELOCATABLE and older binutils afair, as such symbols get
> marked absolute by the latter)? While this was the case (and a mistake)
> for x86-64 prior to the merge, it was properly placed in a section for
> ix86, and hence I'd view it as a regression there.
> 
> Btw., why is .data.nosave being placed differently for 32- and 64-bit?
> 

Probably for no good reason, but it might need additional fixes.

	-hpa

-- 
H. Peter Anvin, Intel Open Source Technology Center
I work for Intel.  I don't speak on their behalf.


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2009-05-08  4:41 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-05-05 13:05 [PATCH] x86-64: fix build with older binutils Jan Beulich
2009-05-05 16:39 ` Sam Ravnborg
2009-05-06  6:59   ` Jan Beulich
2009-05-06 12:42     ` Sam Ravnborg
2009-05-08  4:40     ` H. Peter Anvin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox