From: Andrew Morton <akpm@linux-foundation.org>
To: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>
Cc: vgoyal@redhat.com, ebiederm@xmission.com, cpw@sgi.com,
kumagai-atsushi@mxc.nes.nec.co.jp, lisa.mitchell@hp.com,
kexec@lists.infradead.org, linux-kernel@vger.kernel.org,
zhangyanfei@cn.fujitsu.com, jingbai.ma@hp.com,
linux-mm@kvack.org, riel@redhat.com, walken@google.com,
hughd@google.com, kosaki.motohiro@jp.fujitsu.com
Subject: Re: [PATCH v8 2/9] vmcore: allocate buffer for ELF headers on page-size alignment
Date: Thu, 23 May 2013 14:46:55 -0700 [thread overview]
Message-ID: <20130523144655.80cf1fd9622aae3fc7ec4161@linux-foundation.org> (raw)
In-Reply-To: <20130523052507.13864.61820.stgit@localhost6.localdomain6>
On Thu, 23 May 2013 14:25:07 +0900 HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com> wrote:
> Allocate ELF headers on page-size boundary using __get_free_pages()
> instead of kmalloc().
>
> Later patch will merge PT_NOTE entries into a single unique one and
> decrease the buffer size actually used. Keep original buffer size in
> variable elfcorebuf_sz_orig to kfree the buffer later and actually
> used buffer size with rounded up to page-size boundary in variable
> elfcorebuf_sz separately.
>
> The size of part of the ELF buffer exported from /proc/vmcore is
> elfcorebuf_sz.
>
> The merged, removed PT_NOTE entries, i.e. the range [elfcorebuf_sz,
> elfcorebuf_sz_orig], is filled with 0.
>
> Use size of the ELF headers as an initial offset value in
> set_vmcore_list_offsets_elf{64,32} and
> process_ptload_program_headers_elf{64,32} in order to indicate that
> the offset includes the holes towards the page boundary.
>
> As a result, both set_vmcore_list_offsets_elf{64,32} have the same
> definition. Merge them as set_vmcore_list_offsets.
>
> ...
>
> @@ -526,30 +505,35 @@ static int __init parse_crash_elf64_headers(void)
> }
>
> /* Read in all elf headers. */
> - elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
> - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
> + elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
> + elfcorebuf_sz = elfcorebuf_sz_orig;
> + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
> + get_order(elfcorebuf_sz_orig));
> if (!elfcorebuf)
> return -ENOMEM;
> addr = elfcorehdr_addr;
> - rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
> + rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
> if (rc < 0) {
> - kfree(elfcorebuf);
> + free_pages((unsigned long)elfcorebuf,
> + get_order(elfcorebuf_sz_orig));
> return rc;
> }
>
> /* Merge all PT_NOTE headers into one. */
> rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
> if (rc) {
> - kfree(elfcorebuf);
> + free_pages((unsigned long)elfcorebuf,
> + get_order(elfcorebuf_sz_orig));
> return rc;
> }
> rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
> &vmcore_list);
> if (rc) {
> - kfree(elfcorebuf);
> + free_pages((unsigned long)elfcorebuf,
> + get_order(elfcorebuf_sz_orig));
> return rc;
> }
> - set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
> + set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
> return 0;
> }
>
> @@ -581,30 +565,35 @@ static int __init parse_crash_elf32_headers(void)
> }
>
> /* Read in all elf headers. */
> - elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
> - elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
> + elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
> + elfcorebuf_sz = elfcorebuf_sz_orig;
> + elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
> + get_order(elfcorebuf_sz_orig));
> if (!elfcorebuf)
> return -ENOMEM;
> addr = elfcorehdr_addr;
> - rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
> + rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
> if (rc < 0) {
> - kfree(elfcorebuf);
> + free_pages((unsigned long)elfcorebuf,
> + get_order(elfcorebuf_sz_orig));
> return rc;
> }
>
> /* Merge all PT_NOTE headers into one. */
> rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
> if (rc) {
> - kfree(elfcorebuf);
> + free_pages((unsigned long)elfcorebuf,
> + get_order(elfcorebuf_sz_orig));
> return rc;
> }
> rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
> &vmcore_list);
> if (rc) {
> - kfree(elfcorebuf);
> + free_pages((unsigned long)elfcorebuf,
> + get_order(elfcorebuf_sz_orig));
> return rc;
> }
> - set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
> + set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
> return 0;
> }
>
> @@ -629,14 +618,14 @@ static int __init parse_crash_elf_headers(void)
> return rc;
>
> /* Determine vmcore size. */
> - vmcore_size = get_vmcore_size_elf64(elfcorebuf);
> + vmcore_size = get_vmcore_size_elf64(elfcorebuf, elfcorebuf_sz);
> } else if (e_ident[EI_CLASS] == ELFCLASS32) {
> rc = parse_crash_elf32_headers();
> if (rc)
> return rc;
>
> /* Determine vmcore size. */
> - vmcore_size = get_vmcore_size_elf32(elfcorebuf);
> + vmcore_size = get_vmcore_size_elf32(elfcorebuf, elfcorebuf_sz);
> } else {
> pr_warn("Warning: Core image elf header is not sane\n");
> return -EINVAL;
> @@ -683,7 +672,8 @@ void vmcore_cleanup(void)
> list_del(&m->list);
> kfree(m);
> }
> - kfree(elfcorebuf);
> + free_pages((unsigned long)elfcorebuf,
> + get_order(elfcorebuf_sz_orig));
> elfcorebuf = NULL;
> }
- the amount of code duplication is excessive
- the code sometimes leaves elfcorebuf==NULL and sometimes doesn't.
Please review and test this cleanup:
--- a/fs/proc/vmcore.c~vmcore-allocate-buffer-for-elf-headers-on-page-size-alignment-fix
+++ a/fs/proc/vmcore.c
@@ -477,6 +477,12 @@ static void __init set_vmcore_list_offse
}
}
+static void free_elfcorebuf(void)
+{
+ free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
+ elfcorebuf = NULL;
+}
+
static int __init parse_crash_elf64_headers(void)
{
int rc=0;
@@ -505,36 +511,31 @@ static int __init parse_crash_elf64_head
}
/* Read in all elf headers. */
- elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
+ elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
+ ehdr.e_phnum * sizeof(Elf64_Phdr);
elfcorebuf_sz = elfcorebuf_sz_orig;
- elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(elfcorebuf_sz_orig));
+ elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(elfcorebuf_sz_orig));
if (!elfcorebuf)
return -ENOMEM;
addr = elfcorehdr_addr;
rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
- if (rc < 0) {
- free_pages((unsigned long)elfcorebuf,
- get_order(elfcorebuf_sz_orig));
- return rc;
- }
+ if (rc < 0)
+ goto fail;
/* Merge all PT_NOTE headers into one. */
rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
- if (rc) {
- free_pages((unsigned long)elfcorebuf,
- get_order(elfcorebuf_sz_orig));
- return rc;
- }
+ if (rc)
+ goto fail;
rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
&vmcore_list);
- if (rc) {
- free_pages((unsigned long)elfcorebuf,
- get_order(elfcorebuf_sz_orig));
- return rc;
- }
+ if (rc)
+ goto fail;
set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
return 0;
+fail:
+ free_elfcorebuf();
+ return rc;
}
static int __init parse_crash_elf32_headers(void)
@@ -567,34 +568,28 @@ static int __init parse_crash_elf32_head
/* Read in all elf headers. */
elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
elfcorebuf_sz = elfcorebuf_sz_orig;
- elfcorebuf = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(elfcorebuf_sz_orig));
+ elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(elfcorebuf_sz_orig));
if (!elfcorebuf)
return -ENOMEM;
addr = elfcorehdr_addr;
rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
- if (rc < 0) {
- free_pages((unsigned long)elfcorebuf,
- get_order(elfcorebuf_sz_orig));
- return rc;
- }
+ if (rc < 0)
+ goto fail;
/* Merge all PT_NOTE headers into one. */
rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
- if (rc) {
- free_pages((unsigned long)elfcorebuf,
- get_order(elfcorebuf_sz_orig));
- return rc;
- }
+ if (rc)
+ goto fail;
rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
&vmcore_list);
- if (rc) {
- free_pages((unsigned long)elfcorebuf,
- get_order(elfcorebuf_sz_orig));
- return rc;
- }
+ if (rc)
+ goto fail;
set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
return 0;
+fail:
+ free_elfcorebuf();
+ return rc;
}
static int __init parse_crash_elf_headers(void)
@@ -672,8 +667,6 @@ void vmcore_cleanup(void)
list_del(&m->list);
kfree(m);
}
- free_pages((unsigned long)elfcorebuf,
- get_order(elfcorebuf_sz_orig));
- elfcorebuf = NULL;
+ free_elfcorebuf();
}
EXPORT_SYMBOL_GPL(vmcore_cleanup);
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-05-23 21:46 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-05-23 5:24 [PATCH v8 0/9] kdump, vmcore: support mmap() on /proc/vmcore HATAYAMA Daisuke
2013-05-23 5:25 ` [PATCH v8 1/9] vmcore: clean up read_vmcore() HATAYAMA Daisuke
2013-05-23 5:25 ` [PATCH v8 2/9] vmcore: allocate buffer for ELF headers on page-size alignment HATAYAMA Daisuke
2013-05-23 14:22 ` Vivek Goyal
2013-05-23 21:46 ` Andrew Morton [this message]
2013-05-23 5:25 ` [PATCH v8 3/9] vmcore: treat memory chunks referenced by PT_LOAD program header entries in page-size boundary in vmcore_list HATAYAMA Daisuke
2013-05-23 21:49 ` Andrew Morton
2013-05-24 13:12 ` Vivek Goyal
2013-05-27 0:13 ` HATAYAMA Daisuke
2013-05-23 5:25 ` [PATCH v8 4/9] vmalloc: make find_vm_area check in range HATAYAMA Daisuke
2013-05-23 5:25 ` [PATCH v8 5/9] vmalloc: introduce remap_vmalloc_range_partial HATAYAMA Daisuke
2013-05-23 22:00 ` Andrew Morton
2013-05-23 5:25 ` [PATCH v8 6/9] vmcore: allocate ELF note segment in the 2nd kernel vmalloc memory HATAYAMA Daisuke
2013-05-23 14:28 ` Vivek Goyal
2013-05-23 22:17 ` Andrew Morton
2013-05-23 5:25 ` [PATCH v8 7/9] vmcore: Allow user process to remap ELF note segment buffer HATAYAMA Daisuke
2013-05-23 14:32 ` Vivek Goyal
2013-05-23 5:25 ` [PATCH v8 8/9] vmcore: calculate vmcore file size from buffer size and total size of vmcore objects HATAYAMA Daisuke
2013-05-23 14:34 ` Vivek Goyal
2013-05-23 5:25 ` [PATCH v8 9/9] vmcore: support mmap() on /proc/vmcore HATAYAMA Daisuke
2013-05-23 22:24 ` Andrew Morton
2013-05-24 9:02 ` Maxim Uvarov
2013-05-27 1:49 ` HATAYAMA Daisuke
2013-05-30 9:14 ` Maxim Uvarov
2013-05-30 9:26 ` Zhang Yanfei
2013-05-30 10:30 ` Maxim Uvarov
2013-06-03 8:43 ` Atsushi Kumagai
2013-06-04 15:34 ` Maxim Uvarov
2013-06-07 1:11 ` Zhang Yanfei
2013-06-28 16:40 ` Maxim Uvarov
2013-06-30 23:53 ` HATAYAMA Daisuke
2013-07-01 14:34 ` Maxim Uvarov
2013-07-01 19:53 ` Andrew Morton
2013-07-02 7:00 ` Maxim Uvarov
2013-06-06 21:31 ` Arnd Bergmann
2013-06-07 1:01 ` HATAYAMA Daisuke
2013-06-07 18:34 ` Arnd Bergmann
2013-06-08 10:42 ` HATAYAMA Daisuke
2013-05-23 14:35 ` [PATCH v8 0/9] kdump, " Vivek Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20130523144655.80cf1fd9622aae3fc7ec4161@linux-foundation.org \
--to=akpm@linux-foundation.org \
--cc=cpw@sgi.com \
--cc=d.hatayama@jp.fujitsu.com \
--cc=ebiederm@xmission.com \
--cc=hughd@google.com \
--cc=jingbai.ma@hp.com \
--cc=kexec@lists.infradead.org \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=kumagai-atsushi@mxc.nes.nec.co.jp \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lisa.mitchell@hp.com \
--cc=riel@redhat.com \
--cc=vgoyal@redhat.com \
--cc=walken@google.com \
--cc=zhangyanfei@cn.fujitsu.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).