From: Laszlo Ersek <lersek@redhat.com>
To: Markus Armbruster <armbru@redhat.com>,
Eric Blake <eblake@redhat.com>,
Luiz Capitulino <lcapitulino@redhat.com>,
Wen Congyang <wency@cn.fujitsu.com>,
Laszlo Ersek <lersek@redhat.com>,
Jan Kiszka <jan.kiszka@siemens.com>,
Anthony Liguori <aliguori@us.ibm.com>,
qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH 1/4] dump: clamp guest-provided mapping lengths to ramblock sizes
Date: Mon, 29 Jul 2013 16:37:13 +0200 [thread overview]
Message-ID: <1375108636-17014-2-git-send-email-lersek@redhat.com> (raw)
In-Reply-To: <1375108636-17014-1-git-send-email-lersek@redhat.com>
Even a trusted & clean-state guest can map more memory than what it was
given. Since the vmcore contains RAMBlocks, mapping sizes should be
clamped to RAMBlock sizes. Otherwise such oversized mappings can exceed
the entire file size, and ELF parsers might refuse even the valid portion
of the PT_LOAD entry.
Related RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=981582
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
---
dump.c | 65 +++++++++++++++++++++++++++++++++++++++------------------------
1 files changed, 40 insertions(+), 25 deletions(-)
diff --git a/dump.c b/dump.c
index 6a3a72a..9a2f939 100644
--- a/dump.c
+++ b/dump.c
@@ -187,7 +187,8 @@ static int write_elf32_header(DumpState *s)
}
static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
- int phdr_index, hwaddr offset)
+ int phdr_index, hwaddr offset,
+ hwaddr filesz)
{
Elf64_Phdr phdr;
int ret;
@@ -197,15 +198,12 @@ static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
phdr.p_offset = cpu_convert_to_target64(offset, endian);
phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
- if (offset == -1) {
- /* When the memory is not stored into vmcore, offset will be -1 */
- phdr.p_filesz = 0;
- } else {
- phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
- }
+ phdr.p_filesz = cpu_convert_to_target64(filesz, endian);
phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
+ assert(memory_mapping->length >= filesz);
+
ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
if (ret < 0) {
dump_error(s, "dump: failed to write program header table.\n");
@@ -216,7 +214,8 @@ static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
}
static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
- int phdr_index, hwaddr offset)
+ int phdr_index, hwaddr offset,
+ hwaddr filesz)
{
Elf32_Phdr phdr;
int ret;
@@ -226,15 +225,12 @@ static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
phdr.p_offset = cpu_convert_to_target32(offset, endian);
phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
- if (offset == -1) {
- /* When the memory is not stored into vmcore, offset will be -1 */
- phdr.p_filesz = 0;
- } else {
- phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
- }
+ phdr.p_filesz = cpu_convert_to_target32(filesz, endian);
phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
+ assert(memory_mapping->length >= filesz);
+
ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
if (ret < 0) {
dump_error(s, "dump: failed to write program header table.\n");
@@ -418,17 +414,24 @@ static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
return 0;
}
-/* get the memory's offset in the vmcore */
-static hwaddr get_offset(hwaddr phys_addr,
- DumpState *s)
+/* get the memory's offset and size in the vmcore */
+static void get_offset_range(hwaddr phys_addr,
+ ram_addr_t mapping_length,
+ DumpState *s,
+ hwaddr *p_offset,
+ hwaddr *p_filesz)
{
RAMBlock *block;
hwaddr offset = s->memory_offset;
int64_t size_in_block, start;
+ /* When the memory is not stored into vmcore, offset will be -1 */
+ *p_offset = -1;
+ *p_filesz = 0;
+
if (s->has_filter) {
if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
- return -1;
+ return;
}
}
@@ -457,18 +460,26 @@ static hwaddr get_offset(hwaddr phys_addr,
}
if (phys_addr >= start && phys_addr < start + size_in_block) {
- return phys_addr - start + offset;
+ *p_offset = phys_addr - start + offset;
+
+ /* The offset range mapped from the vmcore file must not spill over
+ * the RAMBlock, clamp it. The rest of the mapping will be
+ * zero-filled in memory at load time; see
+ * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
+ */
+ *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
+ mapping_length :
+ size_in_block - (phys_addr - start);
+ return;
}
offset += size_in_block;
}
-
- return -1;
}
static int write_elf_loads(DumpState *s)
{
- hwaddr offset;
+ hwaddr offset, filesz;
MemoryMapping *memory_mapping;
uint32_t phdr_index = 1;
int ret;
@@ -481,11 +492,15 @@ static int write_elf_loads(DumpState *s)
}
QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
- offset = get_offset(memory_mapping->phys_addr, s);
+ get_offset_range(memory_mapping->phys_addr,
+ memory_mapping->length,
+ s, &offset, &filesz);
if (s->dump_info.d_class == ELFCLASS64) {
- ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
+ ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
+ filesz);
} else {
- ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
+ ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
+ filesz);
}
if (ret < 0) {
--
1.7.1
next prev parent reply other threads:[~2013-07-29 14:35 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
2013-07-29 14:37 ` Laszlo Ersek [this message]
2013-07-29 14:37 ` [Qemu-devel] [PATCH 2/4] dump: introduce GuestPhysBlockList Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 3/4] dump: populate guest_phys_blocks Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 4/4] dump: rebase from host-private RAMBlock offsets to guest-physical addresses Laszlo Ersek
2013-07-29 21:08 ` [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Luiz Capitulino
2013-07-29 21:53 ` Laszlo Ersek
2013-07-29 21:59 ` Laszlo Ersek
2013-07-30 18:51 ` Luiz Capitulino
2013-08-01 13:41 ` Luiz Capitulino
2013-08-01 14:31 ` Luiz Capitulino
2013-08-05 7:44 ` Laszlo Ersek
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1375108636-17014-2-git-send-email-lersek@redhat.com \
--to=lersek@redhat.com \
--cc=aliguori@us.ibm.com \
--cc=armbru@redhat.com \
--cc=eblake@redhat.com \
--cc=jan.kiszka@siemens.com \
--cc=lcapitulino@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=wency@cn.fujitsu.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).