* [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem [not found] <20130320163131.GE2273@redhat.com> @ 2013-03-20 19:24 ` Yinghai Lu 2013-03-22 17:59 ` Vivek Goyal 0 siblings, 1 reply; 7+ messages in thread From: Yinghai Lu @ 2013-03-20 19:24 UTC (permalink / raw) To: Simon Horman, H. Peter Anvin, Vivek Goyal, Eric W. Biederman Cc: Yinghai Lu, kexec Vivek found specical handling crashkernel low in not good. We should extend kexec-tools to handle multiple Crash kernel instead. Extend crash_reserved_mem to array instead and use kexec_iomem_for_each_line directly. After that we can drop crashkernel low. Suggested-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> --- kexec/arch/i386/crashdump-x86.c | 91 ++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 48 deletions(-) Index: kexec-tools/kexec/arch/i386/crashdump-x86.c =================================================================== --- kexec-tools.orig/kexec/arch/i386/crashdump-x86.c +++ kexec-tools/kexec/arch/i386/crashdump-x86.c @@ -188,9 +188,9 @@ static int exclude_region(int *nr_ranges static struct memory_range crash_memory_range[CRASH_MAX_MEMORY_RANGES]; /* Memory region reserved for storing panic kernel and other data. */ -static struct memory_range crash_reserved_mem; -/* under 4G parts */ -static struct memory_range crash_reserved_low_mem; +#define CRASH_RESERVED_MEM_NR 8 +static struct memory_range crash_reserved_mem[CRASH_RESERVED_MEM_NR]; +static int crash_reserved_mem_nr; /* Reads the appropriate file and retrieves the SYSTEM RAM regions for whom to * create Elf headers. Keeping it separate from get_memory_ranges() as @@ -207,7 +207,7 @@ static int get_crash_memory_ranges(struc int kexec_flags, unsigned long lowmem_limit) { const char *iomem = proc_iomem(); - int memory_ranges = 0, gart = 0; + int memory_ranges = 0, gart = 0, i; char line[MAX_LINE]; FILE *fp; unsigned long long start, end; @@ -268,29 +268,28 @@ static int get_crash_memory_ranges(struc } fclose(fp); if (kexec_flags & KEXEC_PRESERVE_CONTEXT) { - int i; for (i = 0; i < memory_ranges; i++) { if (crash_memory_range[i].end > 0x0009ffff) { - crash_reserved_mem.start = \ + crash_reserved_mem[0].start = \ crash_memory_range[i].start; break; } } - if (crash_reserved_mem.start >= mem_max) { + if (crash_reserved_mem[0].start >= mem_max) { fprintf(stderr, "Too small mem_max: 0x%llx.\n", mem_max); return -1; } - crash_reserved_mem.end = mem_max; - crash_reserved_mem.type = RANGE_RAM; + crash_reserved_mem[0].end = mem_max; + crash_reserved_mem[0].type = RANGE_RAM; + crash_reserved_mem_nr = 1; } - if (exclude_region(&memory_ranges, crash_reserved_mem.start, - crash_reserved_mem.end) < 0) - return -1; - if (crash_reserved_low_mem.start && - exclude_region(&memory_ranges, crash_reserved_low_mem.start, - crash_reserved_low_mem.end) < 0) - return -1; + + for (i = 0; i < crash_reserved_mem_nr; i++) + if (exclude_region(&memory_ranges, crash_reserved_mem[i].start, + crash_reserved_mem[i].end) < 0) + return -1; + if (gart) { /* exclude GART region if the system has one */ if (exclude_region(&memory_ranges, gart_start, gart_end) < 0) @@ -351,9 +350,10 @@ static int get_crash_memory_ranges_xen(s qsort(*range, *ranges, sizeof(struct memory_range), compare_ranges); - if (exclude_region(ranges, crash_reserved_mem.start, - crash_reserved_mem.end) < 0) - goto err; + for (i = 0; i < crash_reserved_mem_nr; i++) + if (exclude_region(ranges, crash_reserved_mem[i].start, + crash_reserved_mem[i].end) < 0) + goto err; ret = 0; @@ -434,9 +434,10 @@ static int get_crash_memory_ranges_xen(s qsort(*range, *ranges, sizeof(struct memory_range), compare_ranges); - if (exclude_region(ranges, crash_reserved_mem.start, - crash_reserved_mem.end) < 0) - goto err; + for (i = 0; i < crash_reserved_mem_nr; i++) + if (exclude_region(ranges, crash_reserved_mem[i].start, + crash_reserved_mem[i].end) < 0) + goto err; ret = 0; @@ -1022,15 +1023,10 @@ int load_crashdump_segments(struct kexec memmap_p = xmalloc(sz); memset(memmap_p, 0, sz); add_memmap(memmap_p, info->backup_src_start, info->backup_src_size); - sz = crash_reserved_mem.end - crash_reserved_mem.start +1; - if (add_memmap(memmap_p, crash_reserved_mem.start, sz) < 0) { - return ENOCRASHKERNEL; - } - - if (crash_reserved_low_mem.start) { - sz = crash_reserved_low_mem.end - crash_reserved_low_mem.start - +1; - add_memmap(memmap_p, crash_reserved_low_mem.start, sz); + for (i = 0; i < crash_reserved_mem_nr; i++) { + sz = crash_reserved_mem[i].end - crash_reserved_mem[i].start +1; + if (add_memmap(memmap_p, crash_reserved_mem[i].start, sz) < 0) + return ENOCRASHKERNEL; } /* Create a backup region segment to store backup data*/ @@ -1101,25 +1097,24 @@ int load_crashdump_segments(struct kexec return 0; } -int is_crashkernel_mem_reserved(void) +static int crashkernel_mem_callback(void *UNUSED(data), int nr, + char *UNUSED(str), + unsigned long base, + unsigned long length) { - uint64_t start, end; - - if (parse_iomem_single("Crash kernel\n", &start, &end) || start == end) - return 0; - - crash_reserved_mem.start = start; - crash_reserved_mem.end = end; - crash_reserved_mem.type = RANGE_RAM; - - /* If there is no Crash low kernel, still can go on */ - if (parse_iomem_single("Crash kernel low\n", &start, &end) || - start == end) + if (nr >= CRASH_RESERVED_MEM_NR) return 1; - crash_reserved_low_mem.start = start; - crash_reserved_low_mem.end = end; - crash_reserved_low_mem.type = RANGE_RAM; + crash_reserved_mem[nr].start = base; + crash_reserved_mem[nr].end = base + length - 1; + crash_reserved_mem[nr].type = RANGE_RAM; + return 0; +} + +int is_crashkernel_mem_reserved(void) +{ + crash_reserved_mem_nr = kexec_iomem_for_each_line("Crash kernel\n", + crashkernel_mem_callback, NULL); - return 1; + return !!crash_reserved_mem_nr; } _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem 2013-03-20 19:24 ` [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem Yinghai Lu @ 2013-03-22 17:59 ` Vivek Goyal 2013-03-22 20:52 ` Yinghai Lu 0 siblings, 1 reply; 7+ messages in thread From: Vivek Goyal @ 2013-03-22 17:59 UTC (permalink / raw) To: Yinghai Lu; +Cc: Simon Horman, kexec, Eric W. Biederman, H. Peter Anvin On Wed, Mar 20, 2013 at 12:24:54PM -0700, Yinghai Lu wrote: > Vivek found specical handling crashkernel low in not good. > We should extend kexec-tools to handle multiple Crash kernel instead. > > Extend crash_reserved_mem to array instead and use > kexec_iomem_for_each_line directly. After that we can drop > crashkernel low. Hi Yinghai, I get following error while loading kernel. parse_iomem_single failed. Could not get memory layout I think you need to handle parse_iomem_single("Crash Kernel") in kexec-x86-common.c. It assumes that there is a single contiguous reserved region of memory and set mem_min and mem_max based on that. But that will not work when there are multiple "Crash Kernel" entries. In case of kexec_on_panic, we seem to have all the memory ranges in info->memory_ranges[]. I guess we don't need that. We just need ranges which are reserved for crash kernel and marked by "Crash Kernel". In that case we will be able to handle multiple "Crash Kernel" ranges. Thanks Vivek > > Suggested-by: Vivek Goyal <vgoyal@redhat.com> > Signed-off-by: Yinghai Lu <yinghai@kernel.org> > > --- > kexec/arch/i386/crashdump-x86.c | 91 ++++++++++++++++++---------------------- > 1 file changed, 43 insertions(+), 48 deletions(-) > > Index: kexec-tools/kexec/arch/i386/crashdump-x86.c > =================================================================== > --- kexec-tools.orig/kexec/arch/i386/crashdump-x86.c > +++ kexec-tools/kexec/arch/i386/crashdump-x86.c > @@ -188,9 +188,9 @@ static int exclude_region(int *nr_ranges > static struct memory_range crash_memory_range[CRASH_MAX_MEMORY_RANGES]; > > /* Memory region reserved for storing panic kernel and other data. */ > -static struct memory_range crash_reserved_mem; > -/* under 4G parts */ > -static struct memory_range crash_reserved_low_mem; > +#define CRASH_RESERVED_MEM_NR 8 > +static struct memory_range crash_reserved_mem[CRASH_RESERVED_MEM_NR]; > +static int crash_reserved_mem_nr; > > /* Reads the appropriate file and retrieves the SYSTEM RAM regions for whom to > * create Elf headers. Keeping it separate from get_memory_ranges() as > @@ -207,7 +207,7 @@ static int get_crash_memory_ranges(struc > int kexec_flags, unsigned long lowmem_limit) > { > const char *iomem = proc_iomem(); > - int memory_ranges = 0, gart = 0; > + int memory_ranges = 0, gart = 0, i; > char line[MAX_LINE]; > FILE *fp; > unsigned long long start, end; > @@ -268,29 +268,28 @@ static int get_crash_memory_ranges(struc > } > fclose(fp); > if (kexec_flags & KEXEC_PRESERVE_CONTEXT) { > - int i; > for (i = 0; i < memory_ranges; i++) { > if (crash_memory_range[i].end > 0x0009ffff) { > - crash_reserved_mem.start = \ > + crash_reserved_mem[0].start = \ > crash_memory_range[i].start; > break; > } > } > - if (crash_reserved_mem.start >= mem_max) { > + if (crash_reserved_mem[0].start >= mem_max) { > fprintf(stderr, "Too small mem_max: 0x%llx.\n", > mem_max); > return -1; > } > - crash_reserved_mem.end = mem_max; > - crash_reserved_mem.type = RANGE_RAM; > + crash_reserved_mem[0].end = mem_max; > + crash_reserved_mem[0].type = RANGE_RAM; > + crash_reserved_mem_nr = 1; > } > - if (exclude_region(&memory_ranges, crash_reserved_mem.start, > - crash_reserved_mem.end) < 0) > - return -1; > - if (crash_reserved_low_mem.start && > - exclude_region(&memory_ranges, crash_reserved_low_mem.start, > - crash_reserved_low_mem.end) < 0) > - return -1; > + > + for (i = 0; i < crash_reserved_mem_nr; i++) > + if (exclude_region(&memory_ranges, crash_reserved_mem[i].start, > + crash_reserved_mem[i].end) < 0) > + return -1; > + > if (gart) { > /* exclude GART region if the system has one */ > if (exclude_region(&memory_ranges, gart_start, gart_end) < 0) > @@ -351,9 +350,10 @@ static int get_crash_memory_ranges_xen(s > > qsort(*range, *ranges, sizeof(struct memory_range), compare_ranges); > > - if (exclude_region(ranges, crash_reserved_mem.start, > - crash_reserved_mem.end) < 0) > - goto err; > + for (i = 0; i < crash_reserved_mem_nr; i++) > + if (exclude_region(ranges, crash_reserved_mem[i].start, > + crash_reserved_mem[i].end) < 0) > + goto err; > > ret = 0; > > @@ -434,9 +434,10 @@ static int get_crash_memory_ranges_xen(s > > qsort(*range, *ranges, sizeof(struct memory_range), compare_ranges); > > - if (exclude_region(ranges, crash_reserved_mem.start, > - crash_reserved_mem.end) < 0) > - goto err; > + for (i = 0; i < crash_reserved_mem_nr; i++) > + if (exclude_region(ranges, crash_reserved_mem[i].start, > + crash_reserved_mem[i].end) < 0) > + goto err; > > ret = 0; > > @@ -1022,15 +1023,10 @@ int load_crashdump_segments(struct kexec > memmap_p = xmalloc(sz); > memset(memmap_p, 0, sz); > add_memmap(memmap_p, info->backup_src_start, info->backup_src_size); > - sz = crash_reserved_mem.end - crash_reserved_mem.start +1; > - if (add_memmap(memmap_p, crash_reserved_mem.start, sz) < 0) { > - return ENOCRASHKERNEL; > - } > - > - if (crash_reserved_low_mem.start) { > - sz = crash_reserved_low_mem.end - crash_reserved_low_mem.start > - +1; > - add_memmap(memmap_p, crash_reserved_low_mem.start, sz); > + for (i = 0; i < crash_reserved_mem_nr; i++) { > + sz = crash_reserved_mem[i].end - crash_reserved_mem[i].start +1; > + if (add_memmap(memmap_p, crash_reserved_mem[i].start, sz) < 0) > + return ENOCRASHKERNEL; > } > > /* Create a backup region segment to store backup data*/ > @@ -1101,25 +1097,24 @@ int load_crashdump_segments(struct kexec > return 0; > } > > -int is_crashkernel_mem_reserved(void) > +static int crashkernel_mem_callback(void *UNUSED(data), int nr, > + char *UNUSED(str), > + unsigned long base, > + unsigned long length) > { > - uint64_t start, end; > - > - if (parse_iomem_single("Crash kernel\n", &start, &end) || start == end) > - return 0; > - > - crash_reserved_mem.start = start; > - crash_reserved_mem.end = end; > - crash_reserved_mem.type = RANGE_RAM; > - > - /* If there is no Crash low kernel, still can go on */ > - if (parse_iomem_single("Crash kernel low\n", &start, &end) || > - start == end) > + if (nr >= CRASH_RESERVED_MEM_NR) > return 1; > > - crash_reserved_low_mem.start = start; > - crash_reserved_low_mem.end = end; > - crash_reserved_low_mem.type = RANGE_RAM; > + crash_reserved_mem[nr].start = base; > + crash_reserved_mem[nr].end = base + length - 1; > + crash_reserved_mem[nr].type = RANGE_RAM; > + return 0; > +} > + > +int is_crashkernel_mem_reserved(void) > +{ > + crash_reserved_mem_nr = kexec_iomem_for_each_line("Crash kernel\n", > + crashkernel_mem_callback, NULL); > > - return 1; > + return !!crash_reserved_mem_nr; > } _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem 2013-03-22 17:59 ` Vivek Goyal @ 2013-03-22 20:52 ` Yinghai Lu 2013-03-22 21:21 ` Vivek Goyal 0 siblings, 1 reply; 7+ messages in thread From: Yinghai Lu @ 2013-03-22 20:52 UTC (permalink / raw) To: Vivek Goyal; +Cc: Simon Horman, kexec, Eric W. Biederman, H. Peter Anvin On Fri, Mar 22, 2013 at 10:59 AM, Vivek Goyal <vgoyal@redhat.com> wrote: > I get following error while loading kernel. > > parse_iomem_single failed. > Could not get memory layout > > I think you need to handle parse_iomem_single("Crash Kernel") in > kexec-x86-common.c. It assumes that there is a single contiguous > reserved region of memory and set mem_min and mem_max based on > that. But that will not work when there are multiple "Crash Kernel" > entries. > > In case of kexec_on_panic, we seem to have all the memory ranges in > info->memory_ranges[]. I guess we don't need that. We just need ranges > which are reserved for crash kernel and marked by "Crash Kernel". In > that case we will be able to handle multiple "Crash Kernel" ranges. but we still like to put kernel and initrd high, and leave low range for swiotlb. could just find mem_min and mem_max for largest and last one. Thanks Yinghai _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem 2013-03-22 20:52 ` Yinghai Lu @ 2013-03-22 21:21 ` Vivek Goyal 2013-03-22 21:27 ` H. Peter Anvin 2013-03-22 21:32 ` Yinghai Lu 0 siblings, 2 replies; 7+ messages in thread From: Vivek Goyal @ 2013-03-22 21:21 UTC (permalink / raw) To: Yinghai Lu; +Cc: Simon Horman, kexec, Eric W. Biederman, H. Peter Anvin On Fri, Mar 22, 2013 at 01:52:26PM -0700, Yinghai Lu wrote: > On Fri, Mar 22, 2013 at 10:59 AM, Vivek Goyal <vgoyal@redhat.com> wrote: > > I get following error while loading kernel. > > > > parse_iomem_single failed. > > Could not get memory layout > > > > I think you need to handle parse_iomem_single("Crash Kernel") in > > kexec-x86-common.c. It assumes that there is a single contiguous > > reserved region of memory and set mem_min and mem_max based on > > that. But that will not work when there are multiple "Crash Kernel" > > entries. > > > > In case of kexec_on_panic, we seem to have all the memory ranges in > > info->memory_ranges[]. I guess we don't need that. We just need ranges > > which are reserved for crash kernel and marked by "Crash Kernel". In > > that case we will be able to handle multiple "Crash Kernel" ranges. > > but we still like to put kernel and initrd high, and leave low range > for swiotlb. > could just find mem_min and mem_max for largest and last one. Yes, but I guess that should not be hard coded here. It is during load phase we should enforce where we want to load all the segments. It will work though for our current usage. May be later we can improve it further. Where loader sees all the memory ranges (low and high) and we choose appropriate segment. BTW, I had a query about loading 64bit entry point bzImage. In 32bit bzImage entry point logic, we used to load bzImage at the beginning of memory hole and initrd at the end of memory hole. So that bzImage and initramfs are as far as possible and initramfs decompression does not overwrite anything or for that matter setting bss are does not spill over into initramfs. In new code, It looks like we seem to be loading kernel towards the end of the hole. addr = add_buffer(info, kernel + kern16_size, k_size, size, align, 0x100000, -1, -1); IIUC, this has potential that new kernel can overwrite some of the old kernel's data structure while setting up bss. Shouldn't we do it 32bit entry code way where bzImage is loaded towards the beginning of hole and initramfs is loaded towards the end of the hole. Thanks Vivek _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem 2013-03-22 21:21 ` Vivek Goyal @ 2013-03-22 21:27 ` H. Peter Anvin 2013-03-22 21:35 ` Yinghai Lu 2013-03-22 21:32 ` Yinghai Lu 1 sibling, 1 reply; 7+ messages in thread From: H. Peter Anvin @ 2013-03-22 21:27 UTC (permalink / raw) To: Vivek Goyal; +Cc: kexec, Simon Horman, Yinghai Lu, Eric W. Biederman On 03/22/2013 02:21 PM, Vivek Goyal wrote: > > BTW, I had a query about loading 64bit entry point bzImage. In 32bit > bzImage entry point logic, we used to load bzImage at the beginning > of memory hole and initrd at the end of memory hole. So that bzImage > and initramfs are as far as possible and initramfs decompression does > not overwrite anything or for that matter setting bss are does not > spill over into initramfs. > > In new code, It looks like we seem to be loading kernel towards the end > of the hole. > > addr = add_buffer(info, kernel + kern16_size, k_size, > size, align, 0x100000, -1, -1); > > IIUC, this has potential that new kernel can overwrite some of the old > kernel's data structure while setting up bss. Shouldn't we do it 32bit > entry code way where bzImage is loaded towards the beginning of hole and > initramfs is loaded towards the end of the hole. > Since boot protocol 2.10+ the kernel actively exports how much memory it needs during its setup phase. -hpa _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem 2013-03-22 21:27 ` H. Peter Anvin @ 2013-03-22 21:35 ` Yinghai Lu 0 siblings, 0 replies; 7+ messages in thread From: Yinghai Lu @ 2013-03-22 21:35 UTC (permalink / raw) To: H. Peter Anvin; +Cc: Simon Horman, kexec, Eric W. Biederman, Vivek Goyal On Fri, Mar 22, 2013 at 2:27 PM, H. Peter Anvin <hpa@zytor.com> wrote: > On 03/22/2013 02:21 PM, Vivek Goyal wrote: >> >> BTW, I had a query about loading 64bit entry point bzImage. In 32bit >> bzImage entry point logic, we used to load bzImage at the beginning >> of memory hole and initrd at the end of memory hole. So that bzImage >> and initramfs are as far as possible and initramfs decompression does >> not overwrite anything or for that matter setting bss are does not >> spill over into initramfs. >> >> In new code, It looks like we seem to be loading kernel towards the end >> of the hole. >> >> addr = add_buffer(info, kernel + kern16_size, k_size, >> size, align, 0x100000, -1, -1); >> >> IIUC, this has potential that new kernel can overwrite some of the old >> kernel's data structure while setting up bss. Shouldn't we do it 32bit >> entry code way where bzImage is loaded towards the beginning of hole and >> initramfs is loaded towards the end of the hole. >> > > Since boot protocol 2.10+ the kernel actively exports how much memory it > needs during its setup phase. > yes, we got that size that kernel needed from hdr. more lines from the new code. /* need to use run-time size for buffer searching */ dbgprintf("kernel init_size 0x%x\n", real_mode->init_size); size = _ALIGN(real_mode->init_size, 4096); align = real_mode->kernel_alignment; _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem 2013-03-22 21:21 ` Vivek Goyal 2013-03-22 21:27 ` H. Peter Anvin @ 2013-03-22 21:32 ` Yinghai Lu 1 sibling, 0 replies; 7+ messages in thread From: Yinghai Lu @ 2013-03-22 21:32 UTC (permalink / raw) To: Vivek Goyal; +Cc: Simon Horman, kexec, Eric W. Biederman, H. Peter Anvin On Fri, Mar 22, 2013 at 2:21 PM, Vivek Goyal <vgoyal@redhat.com> wrote: >> > In case of kexec_on_panic, we seem to have all the memory ranges in >> > info->memory_ranges[]. I guess we don't need that. We just need ranges >> > which are reserved for crash kernel and marked by "Crash Kernel". In >> > that case we will be able to handle multiple "Crash Kernel" ranges. >> >> but we still like to put kernel and initrd high, and leave low range >> for swiotlb. >> could just find mem_min and mem_max for largest and last one. > > Yes, but I guess that should not be hard coded here. It is during load > phase we should enforce where we want to load all the segments. > > It will work though for our current usage. May be later we can improve > it further. Where loader sees all the memory ranges (low and high) and > we choose appropriate segment. ok. _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec ^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2013-03-22 21:35 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20130320163131.GE2273@redhat.com>
2013-03-20 19:24 ` [PATCH] kdump, x86: Process multiple Crash kernel in /proc/iomem Yinghai Lu
2013-03-22 17:59 ` Vivek Goyal
2013-03-22 20:52 ` Yinghai Lu
2013-03-22 21:21 ` Vivek Goyal
2013-03-22 21:27 ` H. Peter Anvin
2013-03-22 21:35 ` Yinghai Lu
2013-03-22 21:32 ` Yinghai Lu
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox