* [Qemu-devel] [PATCH 1/4] dump: clamp guest-provided mapping lengths to ramblock sizes
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
@ 2013-07-29 14:37 ` Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 2/4] dump: introduce GuestPhysBlockList Laszlo Ersek
` (5 subsequent siblings)
6 siblings, 0 replies; 12+ messages in thread
From: Laszlo Ersek @ 2013-07-29 14:37 UTC (permalink / raw)
To: Markus Armbruster, Eric Blake, Luiz Capitulino, Wen Congyang,
Laszlo Ersek, Jan Kiszka, Anthony Liguori, qemu-devel
Even a trusted & clean-state guest can map more memory than what it was
given. Since the vmcore contains RAMBlocks, mapping sizes should be
clamped to RAMBlock sizes. Otherwise such oversized mappings can exceed
the entire file size, and ELF parsers might refuse even the valid portion
of the PT_LOAD entry.
Related RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=981582
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
---
dump.c | 65 +++++++++++++++++++++++++++++++++++++++------------------------
1 files changed, 40 insertions(+), 25 deletions(-)
diff --git a/dump.c b/dump.c
index 6a3a72a..9a2f939 100644
--- a/dump.c
+++ b/dump.c
@@ -187,7 +187,8 @@ static int write_elf32_header(DumpState *s)
}
static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
- int phdr_index, hwaddr offset)
+ int phdr_index, hwaddr offset,
+ hwaddr filesz)
{
Elf64_Phdr phdr;
int ret;
@@ -197,15 +198,12 @@ static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
phdr.p_offset = cpu_convert_to_target64(offset, endian);
phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
- if (offset == -1) {
- /* When the memory is not stored into vmcore, offset will be -1 */
- phdr.p_filesz = 0;
- } else {
- phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
- }
+ phdr.p_filesz = cpu_convert_to_target64(filesz, endian);
phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
+ assert(memory_mapping->length >= filesz);
+
ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
if (ret < 0) {
dump_error(s, "dump: failed to write program header table.\n");
@@ -216,7 +214,8 @@ static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
}
static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
- int phdr_index, hwaddr offset)
+ int phdr_index, hwaddr offset,
+ hwaddr filesz)
{
Elf32_Phdr phdr;
int ret;
@@ -226,15 +225,12 @@ static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
phdr.p_offset = cpu_convert_to_target32(offset, endian);
phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
- if (offset == -1) {
- /* When the memory is not stored into vmcore, offset will be -1 */
- phdr.p_filesz = 0;
- } else {
- phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
- }
+ phdr.p_filesz = cpu_convert_to_target32(filesz, endian);
phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
+ assert(memory_mapping->length >= filesz);
+
ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
if (ret < 0) {
dump_error(s, "dump: failed to write program header table.\n");
@@ -418,17 +414,24 @@ static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
return 0;
}
-/* get the memory's offset in the vmcore */
-static hwaddr get_offset(hwaddr phys_addr,
- DumpState *s)
+/* get the memory's offset and size in the vmcore */
+static void get_offset_range(hwaddr phys_addr,
+ ram_addr_t mapping_length,
+ DumpState *s,
+ hwaddr *p_offset,
+ hwaddr *p_filesz)
{
RAMBlock *block;
hwaddr offset = s->memory_offset;
int64_t size_in_block, start;
+ /* When the memory is not stored into vmcore, offset will be -1 */
+ *p_offset = -1;
+ *p_filesz = 0;
+
if (s->has_filter) {
if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
- return -1;
+ return;
}
}
@@ -457,18 +460,26 @@ static hwaddr get_offset(hwaddr phys_addr,
}
if (phys_addr >= start && phys_addr < start + size_in_block) {
- return phys_addr - start + offset;
+ *p_offset = phys_addr - start + offset;
+
+ /* The offset range mapped from the vmcore file must not spill over
+ * the RAMBlock, clamp it. The rest of the mapping will be
+ * zero-filled in memory at load time; see
+ * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
+ */
+ *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
+ mapping_length :
+ size_in_block - (phys_addr - start);
+ return;
}
offset += size_in_block;
}
-
- return -1;
}
static int write_elf_loads(DumpState *s)
{
- hwaddr offset;
+ hwaddr offset, filesz;
MemoryMapping *memory_mapping;
uint32_t phdr_index = 1;
int ret;
@@ -481,11 +492,15 @@ static int write_elf_loads(DumpState *s)
}
QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
- offset = get_offset(memory_mapping->phys_addr, s);
+ get_offset_range(memory_mapping->phys_addr,
+ memory_mapping->length,
+ s, &offset, &filesz);
if (s->dump_info.d_class == ELFCLASS64) {
- ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
+ ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
+ filesz);
} else {
- ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
+ ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
+ filesz);
}
if (ret < 0) {
--
1.7.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH 2/4] dump: introduce GuestPhysBlockList
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 1/4] dump: clamp guest-provided mapping lengths to ramblock sizes Laszlo Ersek
@ 2013-07-29 14:37 ` Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 3/4] dump: populate guest_phys_blocks Laszlo Ersek
` (4 subsequent siblings)
6 siblings, 0 replies; 12+ messages in thread
From: Laszlo Ersek @ 2013-07-29 14:37 UTC (permalink / raw)
To: Markus Armbruster, Eric Blake, Luiz Capitulino, Wen Congyang,
Laszlo Ersek, Jan Kiszka, Anthony Liguori, qemu-devel
The vmcore must use physical addresses that are visible to the guest, not
addresses that point into linear RAMBlocks. As first step, introduce the
list type into which we'll collect the physical mappings in effect at the
time of the dump.
Related RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=981582
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
---
include/sysemu/memory_mapping.h | 22 ++++++++++++++++++++++
dump.c | 31 +++++++++++++++++++------------
memory_mapping.c | 17 +++++++++++++++++
3 files changed, 58 insertions(+), 12 deletions(-)
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
index 6dfb68d..53c2cd5 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/sysemu/memory_mapping.h
@@ -17,6 +17,25 @@
#include "qemu/queue.h"
#include "qemu/typedefs.h"
+typedef struct GuestPhysBlock {
+ /* visible to guest, reflects PCI hole, etc */
+ hwaddr target_start;
+
+ /* implies size */
+ hwaddr target_end;
+
+ /* points into host memory */
+ uint8_t *host_addr;
+
+ QTAILQ_ENTRY(GuestPhysBlock) next;
+} GuestPhysBlock;
+
+/* point-in-time snapshot of guest-visible physical mappings */
+typedef struct GuestPhysBlockList {
+ unsigned num;
+ QTAILQ_HEAD(, GuestPhysBlock) head;
+} GuestPhysBlockList;
+
/* The physical and virtual address in the memory mapping are contiguous. */
typedef struct MemoryMapping {
hwaddr phys_addr;
@@ -45,6 +64,9 @@ void memory_mapping_list_free(MemoryMappingList *list);
void memory_mapping_list_init(MemoryMappingList *list);
+void guest_phys_blocks_free(GuestPhysBlockList *list);
+void guest_phys_blocks_init(GuestPhysBlockList *list);
+
void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp);
/* get guest's memory mapping without do paging(virtual address is 0). */
diff --git a/dump.c b/dump.c
index 9a2f939..716fb1d 100644
--- a/dump.c
+++ b/dump.c
@@ -59,6 +59,7 @@ static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
}
typedef struct DumpState {
+ GuestPhysBlockList guest_phys_blocks;
ArchDumpInfo dump_info;
MemoryMappingList list;
uint16_t phdr_num;
@@ -81,6 +82,7 @@ static int dump_cleanup(DumpState *s)
{
int ret = 0;
+ guest_phys_blocks_free(&s->guest_phys_blocks);
memory_mapping_list_free(&s->list);
if (s->fd != -1) {
close(s->fd);
@@ -728,31 +730,34 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
s->resume = false;
}
+ /* If we use KVM, we should synchronize the registers before we get dump
+ * info or physmap info.
+ */
+ cpu_synchronize_all_states();
+ nr_cpus = 0;
+ for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
+ nr_cpus++;
+ }
+
s->errp = errp;
s->fd = fd;
s->has_filter = has_filter;
s->begin = begin;
s->length = length;
+
+ guest_phys_blocks_init(&s->guest_phys_blocks);
+ /* FILL LIST */
+
s->start = get_start_block(s);
if (s->start == -1) {
error_set(errp, QERR_INVALID_PARAMETER, "begin");
goto cleanup;
}
- /*
- * get dump info: endian, class and architecture.
+ /* get dump info: endian, class and architecture.
* If the target architecture is not supported, cpu_get_dump_info() will
* return -1.
- *
- * If we use KVM, we should synchronize the registers before we get dump
- * info.
*/
- cpu_synchronize_all_states();
- nr_cpus = 0;
- for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
- nr_cpus++;
- }
-
ret = cpu_get_dump_info(&s->dump_info);
if (ret < 0) {
error_set(errp, QERR_UNSUPPORTED);
@@ -827,6 +832,8 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
return 0;
cleanup:
+ guest_phys_blocks_free(&s->guest_phys_blocks);
+
if (s->resume) {
vm_start();
}
@@ -874,7 +881,7 @@ void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
return;
}
- s = g_malloc(sizeof(DumpState));
+ s = g_malloc0(sizeof(DumpState));
ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
if (ret < 0) {
diff --git a/memory_mapping.c b/memory_mapping.c
index 515a984..c70505b 100644
--- a/memory_mapping.c
+++ b/memory_mapping.c
@@ -165,6 +165,23 @@ void memory_mapping_list_init(MemoryMappingList *list)
QTAILQ_INIT(&list->head);
}
+void guest_phys_blocks_free(GuestPhysBlockList *list)
+{
+ GuestPhysBlock *p, *q;
+
+ QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
+ QTAILQ_REMOVE(&list->head, p, next);
+ g_free(p);
+ }
+ list->num = 0;
+}
+
+void guest_phys_blocks_init(GuestPhysBlockList *list)
+{
+ list->num = 0;
+ QTAILQ_INIT(&list->head);
+}
+
static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
{
CPUState *cpu;
--
1.7.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH 3/4] dump: populate guest_phys_blocks
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 1/4] dump: clamp guest-provided mapping lengths to ramblock sizes Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 2/4] dump: introduce GuestPhysBlockList Laszlo Ersek
@ 2013-07-29 14:37 ` Laszlo Ersek
2013-07-29 14:37 ` [Qemu-devel] [PATCH 4/4] dump: rebase from host-private RAMBlock offsets to guest-physical addresses Laszlo Ersek
` (3 subsequent siblings)
6 siblings, 0 replies; 12+ messages in thread
From: Laszlo Ersek @ 2013-07-29 14:37 UTC (permalink / raw)
To: Markus Armbruster, Eric Blake, Luiz Capitulino, Wen Congyang,
Laszlo Ersek, Jan Kiszka, Anthony Liguori, qemu-devel
While the machine is paused, in guest_phys_blocks_append() we register a
one-shot MemoryListener, solely for the initial collection of the valid
guest-physical memory ranges that happens at client registration time.
For each range that is reported to guest_phys_blocks_set_memory(), we
attempt to merge the range with adjacent (preceding, subsequent, or both)
ranges. We use two hash tables for this purpose, both indexing the same
ranges, just by different keys (guest-phys-start vs. guest-phys-end).
Ranges can only be joined if they are contiguous in both guest-physical
address space, and contiguous in host virtual address space.
The "maximal" ranges that remain in the end constitute the guest-physical
memory map that the dump will be based on.
Related RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=981582
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
---
include/sysemu/memory_mapping.h | 1 +
dump.c | 2 +-
memory_mapping.c | 135 +++++++++++++++++++++++++++++++++++++++
3 files changed, 137 insertions(+), 1 deletions(-)
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
index 53c2cd5..6723dc5 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/sysemu/memory_mapping.h
@@ -66,6 +66,7 @@ void memory_mapping_list_init(MemoryMappingList *list);
void guest_phys_blocks_free(GuestPhysBlockList *list);
void guest_phys_blocks_init(GuestPhysBlockList *list);
+void guest_phys_blocks_append(GuestPhysBlockList *list);
void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp);
diff --git a/dump.c b/dump.c
index 716fb1d..3fa33fc 100644
--- a/dump.c
+++ b/dump.c
@@ -746,7 +746,7 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
s->length = length;
guest_phys_blocks_init(&s->guest_phys_blocks);
- /* FILL LIST */
+ guest_phys_blocks_append(&s->guest_phys_blocks);
s->start = get_start_block(s);
if (s->start == -1) {
diff --git a/memory_mapping.c b/memory_mapping.c
index c70505b..efaabf8 100644
--- a/memory_mapping.c
+++ b/memory_mapping.c
@@ -11,9 +11,13 @@
*
*/
+#include <glib.h>
+
#include "cpu.h"
#include "exec/cpu-all.h"
#include "sysemu/memory_mapping.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
MemoryMapping *mapping)
@@ -182,6 +186,137 @@ void guest_phys_blocks_init(GuestPhysBlockList *list)
QTAILQ_INIT(&list->head);
}
+typedef struct GuestPhysListener {
+ GHashTable *by_target_start;
+ GHashTable *by_target_end;
+ MemoryListener listener;
+} GuestPhysListener;
+
+static void guest_phys_blocks_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ GuestPhysListener *g;
+ uint64_t section_size;
+ hwaddr target_start, target_end;
+ uint8_t *host_addr;
+ GuestPhysBlock *predecessor, *successor, *block;
+ bool found;
+
+ /* we only care about RAM */
+ if (!memory_region_is_ram(section->mr)) {
+ return;
+ }
+
+ g = container_of(listener, GuestPhysListener, listener);
+ section_size = int128_get64(section->size);
+ target_start = section->offset_within_address_space;
+ target_end = target_start + section_size;
+ host_addr = memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region;
+
+ /* find continuity in guest physical address space */
+ predecessor = g_hash_table_lookup(g->by_target_end, &target_start);
+ successor = g_hash_table_lookup(g->by_target_start, &target_end);
+
+ /* we require continuity in host memory too */
+ if (predecessor != NULL) {
+ hwaddr predecessor_size = predecessor->target_end -
+ predecessor->target_start;
+ if (predecessor->host_addr + predecessor_size != host_addr) {
+ predecessor = NULL;
+ }
+ }
+ if (successor != NULL
+ && host_addr + section_size != successor->host_addr) {
+ successor = NULL;
+ }
+
+ if (predecessor == NULL) {
+ if (successor == NULL) {
+ /* Isolated mapping, allocate it and add it to both tables. */
+ block = g_malloc0(sizeof *block);
+
+ block->target_end = target_end;
+ g_hash_table_insert(g->by_target_end, &block->target_end, block);
+ } else {
+ /* Mapping has successor only. Merge current into successor by
+ * modifying successor's start. Successor's end doesn't change.
+ */
+ block = successor;
+ found = g_hash_table_steal(g->by_target_start,
+ &block->target_start);
+ g_assert(found);
+ }
+ block->target_start = target_start;
+ block->host_addr = host_addr;
+ g_hash_table_insert(g->by_target_start, &block->target_start, block);
+ return;
+ }
+
+ if (successor != NULL) {
+ /* Mapping has both predecessor and successor. Delete the successor
+ * and expand the predecessor to cover all three.
+ */
+ target_end = successor->target_end;
+
+ found = g_hash_table_steal(g->by_target_end, &successor->target_end);
+ g_assert(found);
+ found = g_hash_table_steal(g->by_target_start,
+ &successor->target_start);
+ g_assert(found);
+
+ g_free(successor);
+ }
+ /* otherwise, mapping has predecessor only */
+
+ /* Expand predecessor until @target_end. Predecessor's start doesn't
+ * change.
+ */
+ block = predecessor;
+ found = g_hash_table_steal(g->by_target_end, &block->target_end);
+ g_assert(found);
+
+ block->target_end = target_end;
+ g_hash_table_insert(g->by_target_end, &block->target_end, block);
+}
+
+static void guest_phys_block_link(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ GuestPhysBlock *block = value;
+ GuestPhysBlockList *list = user_data;
+
+ QTAILQ_INSERT_TAIL(&list->head, block, next);
+ ++list->num;
+}
+
+void guest_phys_blocks_append(GuestPhysBlockList *list)
+{
+ GHashFunc hash_func;
+ GEqualFunc equal_func;
+ GuestPhysListener g = { 0 };
+
+ if (sizeof(hwaddr) == sizeof(uint64_t)) {
+ hash_func = &g_int64_hash;
+ equal_func = &g_int64_equal;
+ } else {
+ hash_func = &g_int_hash;
+ equal_func = &g_int_equal;
+ }
+
+ g.by_target_start = g_hash_table_new(hash_func, equal_func);
+ g.by_target_end = g_hash_table_new(hash_func, equal_func);
+
+ g.listener.region_add = &guest_phys_blocks_region_add;
+ memory_listener_register(&g.listener, &address_space_memory);
+ memory_listener_unregister(&g.listener);
+
+ g_hash_table_foreach(g.by_target_start, &guest_phys_block_link, list);
+
+ g_hash_table_destroy(g.by_target_end);
+ g_hash_table_destroy(g.by_target_start);
+}
+
static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
{
CPUState *cpu;
--
1.7.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [Qemu-devel] [PATCH 4/4] dump: rebase from host-private RAMBlock offsets to guest-physical addresses
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
` (2 preceding siblings ...)
2013-07-29 14:37 ` [Qemu-devel] [PATCH 3/4] dump: populate guest_phys_blocks Laszlo Ersek
@ 2013-07-29 14:37 ` Laszlo Ersek
2013-07-29 21:08 ` [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Luiz Capitulino
` (2 subsequent siblings)
6 siblings, 0 replies; 12+ messages in thread
From: Laszlo Ersek @ 2013-07-29 14:37 UTC (permalink / raw)
To: Markus Armbruster, Eric Blake, Luiz Capitulino, Wen Congyang,
Laszlo Ersek, Jan Kiszka, Anthony Liguori, qemu-devel
RAMBlock.offset --> GuestPhysBlock.target_start
RAMBlock.offset + RAMBlock.length --> GuestPhysBlock.target_end
RAMBlock.length --> GuestPhysBlock.target_end -
GuestPhysBlock.target_start
"GuestPhysBlock.host_addr" is only used when writing the dump contents.
This patch enables "crash" to work with the vmcore by rebasing the vmcore
from the left side of the following diagram to the right side:
host-private
offset
relative
to ram_addr RAMBlock guest-visible paddrs
0 +-------------------+.....+-------------------+ 0
| ^ | | ^ |
| 640 KB | | 640 KB |
| v | | v |
0x0000a0000 +-------------------+.....+-------------------+ 0x0000a0000
| ^ | |XXXXXXXXXXXXXXXXXXX|
| 384 KB | |XXXXXXXXXXXXXXXXXXX|
| v | |XXXXXXXXXXXXXXXXXXX|
0x000100000 +-------------------+.....+-------------------+ 0x000100000
| ^ | | ^ |
| 3583 MB | | 3583 MB |
| v | | v |
0x0e0000000 +-------------------+.....+-------------------+ 0x0e0000000
| ^ |. |XXXXXXXXXXXXXXXXXXX|
| above_4g_mem_size | . |XXXX PCI hole XXXXX|
| v | . |XXXX XXXXX|
ram_size +-------------------+ . |XXXX 512 MB XXXXX|
. .|XXXXXXXXXXXXXXXXXXX|
. +-------------------+ 0x100000000
. | ^ |
. | above_4g_mem_size |
.| v |
+-------------------+ ram_size
+ 512 MB
Related RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=981582
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
---
include/sysemu/dump.h | 4 ++-
include/sysemu/memory_mapping.h | 7 +++-
dump.c | 77 +++++++++++++++++++--------------------
memory_mapping.c | 22 +++++++-----
stubs/dump.c | 3 +-
target-i386/arch_dump.c | 10 +++--
6 files changed, 67 insertions(+), 56 deletions(-)
diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
index b8c770f..19fafb2 100644
--- a/include/sysemu/dump.h
+++ b/include/sysemu/dump.h
@@ -20,7 +20,9 @@ typedef struct ArchDumpInfo {
int d_class; /* ELFCLASS32 or ELFCLASS64 */
} ArchDumpInfo;
-int cpu_get_dump_info(ArchDumpInfo *info);
+struct GuestPhysBlockList; /* memory_mapping.h */
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const struct GuestPhysBlockList *guest_phys_blocks);
ssize_t cpu_get_note_size(int class, int machine, int nr_cpus);
#endif
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
index 6723dc5..2d98a89 100644
--- a/include/sysemu/memory_mapping.h
+++ b/include/sysemu/memory_mapping.h
@@ -68,10 +68,13 @@ void guest_phys_blocks_free(GuestPhysBlockList *list);
void guest_phys_blocks_init(GuestPhysBlockList *list);
void guest_phys_blocks_append(GuestPhysBlockList *list);
-void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp);
+void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+ const GuestPhysBlockList *guest_phys_blocks,
+ Error **errp);
/* get guest's memory mapping without do paging(virtual address is 0). */
-void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list);
+void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
+ const GuestPhysBlockList *guest_phys_blocks);
void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
int64_t length);
diff --git a/dump.c b/dump.c
index 3fa33fc..c0dae2c 100644
--- a/dump.c
+++ b/dump.c
@@ -70,7 +70,7 @@ typedef struct DumpState {
hwaddr memory_offset;
int fd;
- RAMBlock *block;
+ GuestPhysBlock *next_block;
ram_addr_t start;
bool has_filter;
int64_t begin;
@@ -391,14 +391,14 @@ static int write_data(DumpState *s, void *buf, int length)
}
/* write the memroy to vmcore. 1 page per I/O. */
-static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
+static int write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
int64_t size)
{
int64_t i;
int ret;
for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
- ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
+ ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);
if (ret < 0) {
return ret;
@@ -406,7 +406,7 @@ static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
}
if ((size % TARGET_PAGE_SIZE) != 0) {
- ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
+ ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
size % TARGET_PAGE_SIZE);
if (ret < 0) {
return ret;
@@ -423,7 +423,7 @@ static void get_offset_range(hwaddr phys_addr,
hwaddr *p_offset,
hwaddr *p_filesz)
{
- RAMBlock *block;
+ GuestPhysBlock *block;
hwaddr offset = s->memory_offset;
int64_t size_in_block, start;
@@ -437,35 +437,34 @@ static void get_offset_range(hwaddr phys_addr,
}
}
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
if (s->has_filter) {
- if (block->offset >= s->begin + s->length ||
- block->offset + block->length <= s->begin) {
+ if (block->target_start >= s->begin + s->length ||
+ block->target_end <= s->begin) {
/* This block is out of the range */
continue;
}
- if (s->begin <= block->offset) {
- start = block->offset;
+ if (s->begin <= block->target_start) {
+ start = block->target_start;
} else {
start = s->begin;
}
- size_in_block = block->length - (start - block->offset);
- if (s->begin + s->length < block->offset + block->length) {
- size_in_block -= block->offset + block->length -
- (s->begin + s->length);
+ size_in_block = block->target_end - start;
+ if (s->begin + s->length < block->target_end) {
+ size_in_block -= block->target_end - (s->begin + s->length);
}
} else {
- start = block->offset;
- size_in_block = block->length;
+ start = block->target_start;
+ size_in_block = block->target_end - block->target_start;
}
if (phys_addr >= start && phys_addr < start + size_in_block) {
*p_offset = phys_addr - start + offset;
/* The offset range mapped from the vmcore file must not spill over
- * the RAMBlock, clamp it. The rest of the mapping will be
+ * the GuestPhysBlock, clamp it. The rest of the mapping will be
* zero-filled in memory at load time; see
* <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
*/
@@ -613,7 +612,7 @@ static int dump_completed(DumpState *s)
return 0;
}
-static int get_next_block(DumpState *s, RAMBlock *block)
+static int get_next_block(DumpState *s, GuestPhysBlock *block)
{
while (1) {
block = QTAILQ_NEXT(block, next);
@@ -623,16 +622,16 @@ static int get_next_block(DumpState *s, RAMBlock *block)
}
s->start = 0;
- s->block = block;
+ s->next_block = block;
if (s->has_filter) {
- if (block->offset >= s->begin + s->length ||
- block->offset + block->length <= s->begin) {
+ if (block->target_start >= s->begin + s->length ||
+ block->target_end <= s->begin) {
/* This block is out of the range */
continue;
}
- if (s->begin > block->offset) {
- s->start = s->begin - block->offset;
+ if (s->begin > block->target_start) {
+ s->start = s->begin - block->target_start;
}
}
@@ -643,18 +642,18 @@ static int get_next_block(DumpState *s, RAMBlock *block)
/* write all memory to vmcore */
static int dump_iterate(DumpState *s)
{
- RAMBlock *block;
+ GuestPhysBlock *block;
int64_t size;
int ret;
while (1) {
- block = s->block;
+ block = s->next_block;
- size = block->length;
+ size = block->target_end - block->target_start;
if (s->has_filter) {
size -= s->start;
- if (s->begin + s->length < block->offset + block->length) {
- size -= block->offset + block->length - (s->begin + s->length);
+ if (s->begin + s->length < block->target_end) {
+ size -= block->target_end - (s->begin + s->length);
}
}
ret = write_memory(s, block, s->start, size);
@@ -689,23 +688,23 @@ static int create_vmcore(DumpState *s)
static ram_addr_t get_start_block(DumpState *s)
{
- RAMBlock *block;
+ GuestPhysBlock *block;
if (!s->has_filter) {
- s->block = QTAILQ_FIRST(&ram_list.blocks);
+ s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
return 0;
}
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- if (block->offset >= s->begin + s->length ||
- block->offset + block->length <= s->begin) {
+ QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
+ if (block->target_start >= s->begin + s->length ||
+ block->target_end <= s->begin) {
/* This block is out of the range */
continue;
}
- s->block = block;
- if (s->begin > block->offset) {
- s->start = s->begin - block->offset;
+ s->next_block = block;
+ if (s->begin > block->target_start) {
+ s->start = s->begin - block->target_start;
} else {
s->start = 0;
}
@@ -758,7 +757,7 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
* If the target architecture is not supported, cpu_get_dump_info() will
* return -1.
*/
- ret = cpu_get_dump_info(&s->dump_info);
+ ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
if (ret < 0) {
error_set(errp, QERR_UNSUPPORTED);
goto cleanup;
@@ -774,13 +773,13 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
/* get memory mapping */
memory_mapping_list_init(&s->list);
if (paging) {
- qemu_get_guest_memory_mapping(&s->list, &err);
+ qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
if (err != NULL) {
error_propagate(errp, err);
goto cleanup;
}
} else {
- qemu_get_guest_simple_memory_mapping(&s->list);
+ qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
}
if (s->has_filter) {
diff --git a/memory_mapping.c b/memory_mapping.c
index efaabf8..cf246e0 100644
--- a/memory_mapping.c
+++ b/memory_mapping.c
@@ -330,10 +330,12 @@ static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
return NULL;
}
-void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp)
+void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+ const GuestPhysBlockList *guest_phys_blocks,
+ Error **errp)
{
CPUState *cpu, *first_paging_enabled_cpu;
- RAMBlock *block;
+ GuestPhysBlock *block;
ram_addr_t offset, length;
first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
@@ -353,19 +355,21 @@ void qemu_get_guest_memory_mapping(MemoryMappingList *list, Error **errp)
* If the guest doesn't use paging, the virtual address is equal to physical
* address.
*/
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- offset = block->offset;
- length = block->length;
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ offset = block->target_start;
+ length = block->target_end - block->target_start;
create_new_memory_mapping(list, offset, offset, length);
}
}
-void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list)
+void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
+ const GuestPhysBlockList *guest_phys_blocks)
{
- RAMBlock *block;
+ GuestPhysBlock *block;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- create_new_memory_mapping(list, block->offset, 0, block->length);
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ create_new_memory_mapping(list, block->target_start, 0,
+ block->target_end - block->target_start);
}
}
diff --git a/stubs/dump.c b/stubs/dump.c
index 43c9a3f..370cd96 100644
--- a/stubs/dump.c
+++ b/stubs/dump.c
@@ -16,7 +16,8 @@
#include "qapi/qmp/qerror.h"
#include "qmp-commands.h"
-int cpu_get_dump_info(ArchDumpInfo *info)
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const struct GuestPhysBlockList *guest_phys_blocks)
{
return -1;
}
diff --git a/target-i386/arch_dump.c b/target-i386/arch_dump.c
index 10dc228..0bbed23 100644
--- a/target-i386/arch_dump.c
+++ b/target-i386/arch_dump.c
@@ -15,6 +15,7 @@
#include "exec/cpu-all.h"
#include "sysemu/dump.h"
#include "elf.h"
+#include "sysemu/memory_mapping.h"
#ifdef TARGET_X86_64
typedef struct {
@@ -389,10 +390,11 @@ int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cs,
return cpu_write_qemu_note(f, &cpu->env, opaque, 0);
}
-int cpu_get_dump_info(ArchDumpInfo *info)
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
{
bool lma = false;
- RAMBlock *block;
+ GuestPhysBlock *block;
#ifdef TARGET_X86_64
X86CPU *first_x86_cpu = X86_CPU(first_cpu);
@@ -412,8 +414,8 @@ int cpu_get_dump_info(ArchDumpInfo *info)
} else {
info->d_class = ELFCLASS32;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- if (block->offset + block->length > UINT_MAX) {
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ if (block->target_end > UINT_MAX) {
/* The memory size is greater than 4G */
info->d_class = ELFCLASS64;
break;
--
1.7.1
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
` (3 preceding siblings ...)
2013-07-29 14:37 ` [Qemu-devel] [PATCH 4/4] dump: rebase from host-private RAMBlock offsets to guest-physical addresses Laszlo Ersek
@ 2013-07-29 21:08 ` Luiz Capitulino
2013-07-29 21:53 ` Laszlo Ersek
2013-07-30 18:51 ` Luiz Capitulino
2013-08-01 13:41 ` Luiz Capitulino
6 siblings, 1 reply; 12+ messages in thread
From: Luiz Capitulino @ 2013-07-29 21:08 UTC (permalink / raw)
To: Laszlo Ersek; +Cc: Anthony Liguori, Jan Kiszka, Markus Armbruster, qemu-devel
On Mon, 29 Jul 2013 16:37:12 +0200
Laszlo Ersek <lersek@redhat.com> wrote:
> (Apologies for the long To: list, I'm including everyone who
> participated in
> <https://lists.gnu.org/archive/html/qemu-devel/2012-09/msg02607.html>).
>
> Conceptually, the dump-guest-memory command works as follows:
> (a) pause the guest,
> (b) get a snapshot of the guest's physical memory map, as provided by
> qemu,
> (c) retrieve the guest's virtual mappings, as seen by the guest (this is
> where paging=true vs. paging=false makes a difference),
> (d) filter (c) as requested by the QMP caller,
> (e) write ELF headers, keying off (b) -- the guest's physmap -- and (d)
> -- the filtered guest mappings.
> (f) dump RAM contents, keying off the same (b) and (d),
> (g) unpause the guest (if necessary).
>
> Patch #1 affects step (e); specifically, how (d) is matched against (b),
> when "paging" is "true", and the guest kernel maps more guest-physical
> RAM than it actually has.
>
> This can be done by non-malicious, clean-state guests (eg. a pristine
> RHEL-6.4 guest), and may cause libbfd errors due to PT_LOAD entries
> (coming directly from the guest page tables) exceeding the vmcore file's
> size.
>
> Patches #2 to #4 are independent of the "paging" option (or, more
> precisely, affect them equally); they affect (b). Currently input
> parameter (b), that is, the guest's physical memory map as provided by
> qemu, is implicitly represented by "ram_list.blocks". As a result, steps
> and outputs dependent on (b) will refer to qemu-internal offsets.
>
> Unfortunately, this breaks when the guest-visible physical addresses
> diverge from the qemu-internal, RAMBlock based representation. This can
> happen eg. for guests > 3.5 GB, due to the 32-bit PCI hole; see patch #4
> for a diagram.
>
> Patch #2 introduces input parameter (b) explicitly, as a reasonably
> minimal map of guest-physical address ranges. (Minimality is not a hard
> requirement here, it just decreases the number of PT_LOAD entries
> written to the vmcore header.) Patch #3 populates this map. Patch #4
> rebases the dump-guest-memory command to it, so that steps (e) and (f)
> work with guest-phys addresses.
>
> As a result, the "crash" utility can parse vmcores dumped for big x86_64
> guests (paging=false).
>
> Please refer to Red Hat Bugzilla 981582
> <https://bugzilla.redhat.com/show_bug.cgi?id=981582>.
>
> Disclaimer: as you can tell from my progress in the RHBZ, I'm new to the
> memory API. The way I'm using it might be retarded.
Is this for 1.6?
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores
2013-07-29 21:08 ` [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Luiz Capitulino
@ 2013-07-29 21:53 ` Laszlo Ersek
2013-07-29 21:59 ` Laszlo Ersek
0 siblings, 1 reply; 12+ messages in thread
From: Laszlo Ersek @ 2013-07-29 21:53 UTC (permalink / raw)
To: Luiz Capitulino
Cc: Anthony Liguori, Jan Kiszka, Markus Armbruster, qemu-devel
On 07/29/13 23:08, Luiz Capitulino wrote:
> On Mon, 29 Jul 2013 16:37:12 +0200
> Laszlo Ersek <lersek@redhat.com> wrote:
>
>> (Apologies for the long To: list, I'm including everyone who
>> participated in
>> <https://lists.gnu.org/archive/html/qemu-devel/2012-09/msg02607.html>).
>>
>> Conceptually, the dump-guest-memory command works as follows:
>> (a) pause the guest,
>> (b) get a snapshot of the guest's physical memory map, as provided by
>> qemu,
>> (c) retrieve the guest's virtual mappings, as seen by the guest (this is
>> where paging=true vs. paging=false makes a difference),
>> (d) filter (c) as requested by the QMP caller,
>> (e) write ELF headers, keying off (b) -- the guest's physmap -- and (d)
>> -- the filtered guest mappings.
>> (f) dump RAM contents, keying off the same (b) and (d),
>> (g) unpause the guest (if necessary).
>>
>> Patch #1 affects step (e); specifically, how (d) is matched against (b),
>> when "paging" is "true", and the guest kernel maps more guest-physical
>> RAM than it actually has.
>>
>> This can be done by non-malicious, clean-state guests (eg. a pristine
>> RHEL-6.4 guest), and may cause libbfd errors due to PT_LOAD entries
>> (coming directly from the guest page tables) exceeding the vmcore file's
>> size.
>>
>> Patches #2 to #4 are independent of the "paging" option (or, more
>> precisely, affect them equally); they affect (b). Currently input
>> parameter (b), that is, the guest's physical memory map as provided by
>> qemu, is implicitly represented by "ram_list.blocks". As a result, steps
>> and outputs dependent on (b) will refer to qemu-internal offsets.
>>
>> Unfortunately, this breaks when the guest-visible physical addresses
>> diverge from the qemu-internal, RAMBlock based representation. This can
>> happen eg. for guests > 3.5 GB, due to the 32-bit PCI hole; see patch #4
>> for a diagram.
>>
>> Patch #2 introduces input parameter (b) explicitly, as a reasonably
>> minimal map of guest-physical address ranges. (Minimality is not a hard
>> requirement here, it just decreases the number of PT_LOAD entries
>> written to the vmcore header.) Patch #3 populates this map. Patch #4
>> rebases the dump-guest-memory command to it, so that steps (e) and (f)
>> work with guest-phys addresses.
>>
>> As a result, the "crash" utility can parse vmcores dumped for big x86_64
>> guests (paging=false).
>>
>> Please refer to Red Hat Bugzilla 981582
>> <https://bugzilla.redhat.com/show_bug.cgi?id=981582>.
>>
>> Disclaimer: as you can tell from my progress in the RHBZ, I'm new to the
>> memory API. The way I'm using it might be retarded.
>
> Is this for 1.6?
It's for whichever release reviewers and maintainers accept it! :)
On a more serious note, if someone makes an exception out of this, I
won't object, but I'm not pushing for it. My posting close to the hard
freeze was a coincidence.
Thanks,
Laszlo
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores
2013-07-29 21:53 ` Laszlo Ersek
@ 2013-07-29 21:59 ` Laszlo Ersek
0 siblings, 0 replies; 12+ messages in thread
From: Laszlo Ersek @ 2013-07-29 21:59 UTC (permalink / raw)
To: Luiz Capitulino
Cc: Anthony Liguori, Jan Kiszka, Markus Armbruster, qemu-devel
On 07/29/13 23:53, Laszlo Ersek wrote:
> On 07/29/13 23:08, Luiz Capitulino wrote:
>> Is this for 1.6?
>
> It's for whichever release reviewers and maintainers accept it! :)
>
> On a more serious note, if someone makes an exception out of this, I
> won't object, but I'm not pushing for it. My posting close to the hard
> freeze was a coincidence.
Hmm. I've just caught up on <http://wiki.qemu.org/Planning/1.6>.
Apparently the hard freeze blocks features only, and this is a bugfix.
So yeah, why not.
Thanks,
Laszlo
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
` (4 preceding siblings ...)
2013-07-29 21:08 ` [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Luiz Capitulino
@ 2013-07-30 18:51 ` Luiz Capitulino
2013-08-01 13:41 ` Luiz Capitulino
6 siblings, 0 replies; 12+ messages in thread
From: Luiz Capitulino @ 2013-07-30 18:51 UTC (permalink / raw)
To: Laszlo Ersek
Cc: Anthony Liguori, Jan Kiszka, Markus Armbruster, qemu-devel,
stefanha, pbonzini
On Mon, 29 Jul 2013 16:37:12 +0200
Laszlo Ersek <lersek@redhat.com> wrote:
> (Apologies for the long To: list, I'm including everyone who
> participated in
> <https://lists.gnu.org/archive/html/qemu-devel/2012-09/msg02607.html>).
>
> Conceptually, the dump-guest-memory command works as follows:
> (a) pause the guest,
> (b) get a snapshot of the guest's physical memory map, as provided by
> qemu,
> (c) retrieve the guest's virtual mappings, as seen by the guest (this is
> where paging=true vs. paging=false makes a difference),
> (d) filter (c) as requested by the QMP caller,
> (e) write ELF headers, keying off (b) -- the guest's physmap -- and (d)
> -- the filtered guest mappings.
> (f) dump RAM contents, keying off the same (b) and (d),
> (g) unpause the guest (if necessary).
>
> Patch #1 affects step (e); specifically, how (d) is matched against (b),
> when "paging" is "true", and the guest kernel maps more guest-physical
> RAM than it actually has.
>
> This can be done by non-malicious, clean-state guests (eg. a pristine
> RHEL-6.4 guest), and may cause libbfd errors due to PT_LOAD entries
> (coming directly from the guest page tables) exceeding the vmcore file's
> size.
>
> Patches #2 to #4 are independent of the "paging" option (or, more
> precisely, affect them equally); they affect (b). Currently input
> parameter (b), that is, the guest's physical memory map as provided by
> qemu, is implicitly represented by "ram_list.blocks". As a result, steps
> and outputs dependent on (b) will refer to qemu-internal offsets.
>
> Unfortunately, this breaks when the guest-visible physical addresses
> diverge from the qemu-internal, RAMBlock based representation. This can
> happen eg. for guests > 3.5 GB, due to the 32-bit PCI hole; see patch #4
> for a diagram.
>
> Patch #2 introduces input parameter (b) explicitly, as a reasonably
> minimal map of guest-physical address ranges. (Minimality is not a hard
> requirement here, it just decreases the number of PT_LOAD entries
> written to the vmcore header.) Patch #3 populates this map. Patch #4
> rebases the dump-guest-memory command to it, so that steps (e) and (f)
> work with guest-phys addresses.
>
> As a result, the "crash" utility can parse vmcores dumped for big x86_64
> guests (paging=false).
>
> Please refer to Red Hat Bugzilla 981582
> <https://bugzilla.redhat.com/show_bug.cgi?id=981582>.
>
> Disclaimer: as you can tell from my progress in the RHBZ, I'm new to the
> memory API. The way I'm using it might be retarded.
Series looks sane to me, but the important details go beyond my background
in this area, so I'd like an additional Reviewed-by before applying this
to the qmp-for-1.6 tree.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores
2013-07-29 14:37 [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores Laszlo Ersek
` (5 preceding siblings ...)
2013-07-30 18:51 ` Luiz Capitulino
@ 2013-08-01 13:41 ` Luiz Capitulino
2013-08-01 14:31 ` Luiz Capitulino
6 siblings, 1 reply; 12+ messages in thread
From: Luiz Capitulino @ 2013-08-01 13:41 UTC (permalink / raw)
To: Laszlo Ersek; +Cc: Anthony Liguori, Jan Kiszka, Markus Armbruster, qemu-devel
On Mon, 29 Jul 2013 16:37:12 +0200
Laszlo Ersek <lersek@redhat.com> wrote:
> (Apologies for the long To: list, I'm including everyone who
> participated in
> <https://lists.gnu.org/archive/html/qemu-devel/2012-09/msg02607.html>).
>
> Conceptually, the dump-guest-memory command works as follows:
> (a) pause the guest,
> (b) get a snapshot of the guest's physical memory map, as provided by
> qemu,
> (c) retrieve the guest's virtual mappings, as seen by the guest (this is
> where paging=true vs. paging=false makes a difference),
> (d) filter (c) as requested by the QMP caller,
> (e) write ELF headers, keying off (b) -- the guest's physmap -- and (d)
> -- the filtered guest mappings.
> (f) dump RAM contents, keying off the same (b) and (d),
> (g) unpause the guest (if necessary).
>
> Patch #1 affects step (e); specifically, how (d) is matched against (b),
> when "paging" is "true", and the guest kernel maps more guest-physical
> RAM than it actually has.
>
> This can be done by non-malicious, clean-state guests (eg. a pristine
> RHEL-6.4 guest), and may cause libbfd errors due to PT_LOAD entries
> (coming directly from the guest page tables) exceeding the vmcore file's
> size.
>
> Patches #2 to #4 are independent of the "paging" option (or, more
> precisely, affect them equally); they affect (b). Currently input
> parameter (b), that is, the guest's physical memory map as provided by
> qemu, is implicitly represented by "ram_list.blocks". As a result, steps
> and outputs dependent on (b) will refer to qemu-internal offsets.
>
> Unfortunately, this breaks when the guest-visible physical addresses
> diverge from the qemu-internal, RAMBlock based representation. This can
> happen eg. for guests > 3.5 GB, due to the 32-bit PCI hole; see patch #4
> for a diagram.
>
> Patch #2 introduces input parameter (b) explicitly, as a reasonably
> minimal map of guest-physical address ranges. (Minimality is not a hard
> requirement here, it just decreases the number of PT_LOAD entries
> written to the vmcore header.) Patch #3 populates this map. Patch #4
> rebases the dump-guest-memory command to it, so that steps (e) and (f)
> work with guest-phys addresses.
>
> As a result, the "crash" utility can parse vmcores dumped for big x86_64
> guests (paging=false).
Applied to the qmp branch, thanks.
>
> Please refer to Red Hat Bugzilla 981582
> <https://bugzilla.redhat.com/show_bug.cgi?id=981582>.
>
> Disclaimer: as you can tell from my progress in the RHBZ, I'm new to the
> memory API. The way I'm using it might be retarded.
>
> Laszlo Ersek (4):
> dump: clamp guest-provided mapping lengths to ramblock sizes
> dump: introduce GuestPhysBlockList
> dump: populate guest_phys_blocks
> dump: rebase from host-private RAMBlock offsets to guest-physical
> addresses
>
> include/sysemu/dump.h | 4 +-
> include/sysemu/memory_mapping.h | 30 ++++++-
> dump.c | 171 +++++++++++++++++++++-----------------
> memory_mapping.c | 174 +++++++++++++++++++++++++++++++++++++--
> stubs/dump.c | 3 +-
> target-i386/arch_dump.c | 10 ++-
> 6 files changed, 300 insertions(+), 92 deletions(-)
>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores
2013-08-01 13:41 ` Luiz Capitulino
@ 2013-08-01 14:31 ` Luiz Capitulino
2013-08-05 7:44 ` Laszlo Ersek
0 siblings, 1 reply; 12+ messages in thread
From: Luiz Capitulino @ 2013-08-01 14:31 UTC (permalink / raw)
To: Laszlo Ersek; +Cc: Anthony Liguori, Jan Kiszka, Markus Armbruster, qemu-devel
On Thu, 1 Aug 2013 09:41:07 -0400
Luiz Capitulino <lcapitulino@redhat.com> wrote:
> Applied to the qmp branch, thanks.
Hmm, it brakes the build. Dropping it from the queue for now:
/home/lcapitulino/work/src/upstream/qmp-unstable/target-s390x/arch_dump.c:179:5: error: conflicting types for ‘cpu_get_dump_info’
int cpu_get_dump_info(ArchDumpInfo *info)
^
In file included from /home/lcapitulino/work/src/upstream/qmp-unstable/target-s390x/arch_dump.c:17:0:
/home/lcapitulino/work/src/upstream/qmp-unstable/include/sysemu/dump.h:24:5: note: previous declaration of ‘cpu_get_dump_info’ was here
int cpu_get_dump_info(ArchDumpInfo *info,
^
make[1]: *** [target-s390x/arch_dump.o] Error 1
make: *** [subdir-s390x-softmmu] Error 2
make: *** Waiting for unfinished jobs....
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [Qemu-devel] [PATCH 0/4] dump-guest-memory: correct the vmcores
2013-08-01 14:31 ` Luiz Capitulino
@ 2013-08-05 7:44 ` Laszlo Ersek
0 siblings, 0 replies; 12+ messages in thread
From: Laszlo Ersek @ 2013-08-05 7:44 UTC (permalink / raw)
To: Luiz Capitulino
Cc: Anthony Liguori, Jan Kiszka, Markus Armbruster, qemu-devel
On 08/01/13 16:31, Luiz Capitulino wrote:
> On Thu, 1 Aug 2013 09:41:07 -0400
> Luiz Capitulino <lcapitulino@redhat.com> wrote:
>
>> Applied to the qmp branch, thanks.
>
> Hmm, it brakes the build. Dropping it from the queue for now:
>
> /home/lcapitulino/work/src/upstream/qmp-unstable/target-s390x/arch_dump.c:179:5: error: conflicting types for ‘cpu_get_dump_info’
> int cpu_get_dump_info(ArchDumpInfo *info)
> ^
> In file included from /home/lcapitulino/work/src/upstream/qmp-unstable/target-s390x/arch_dump.c:17:0:
> /home/lcapitulino/work/src/upstream/qmp-unstable/include/sysemu/dump.h:24:5: note: previous declaration of ‘cpu_get_dump_info’ was here
> int cpu_get_dump_info(ArchDumpInfo *info,
> ^
> make[1]: *** [target-s390x/arch_dump.o] Error 1
> make: *** [subdir-s390x-softmmu] Error 2
> make: *** Waiting for unfinished jobs....
>
My series was based on
Author: Aurelien Jarno <aurelien@aurel32.net> 2013-07-29 09:03:23
Committer: Aurelien Jarno <aurelien@aurel32.net> 2013-07-29 09:03:23
Merge branch 'trivial-patches' of git://git.corpit.ru/qemu
and it compiled then just fine.
"target-s390x/arch_dump.c" was added in
Author: Ekaterina Tumanova <tumanova@linux.vnet.ibm.com> 2013-07-10 15:26:46
Committer: Christian Borntraeger <borntraeger@de.ibm.com> 2013-07-30 16:12:25
s390: Implement dump-guest-memory support for target s390x
See the commit date: 2013-07-30.
I'll refresh the series.
Laszlo
^ permalink raw reply [flat|nested] 12+ messages in thread