>From 09d67ba76cb5027f69e9635c07b899546d121b56 Mon Sep 17 00:00:00 2001 From: Laszlo Ersek Date: Thu, 21 Apr 2016 23:11:22 +0200 Subject: [PATCH 2/3] util/mmap-alloc: factor out size_with_guard_pages() The qemu_ram_mmap() and qemu_ram_munmap() functions currently hard-code the number of guard pages as 1. Expressed in bytes: final_size = size + getpagesize(); Factor out this formula to a static helper function, so that we can customize it in the next patch. For now there is no change in observable behavior. Signed-off-by: Laszlo Ersek --- util/mmap-alloc.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index 63bb1e215c6e..41e36f74d7be 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -39,13 +39,19 @@ size_t qemu_fd_getpagesize(int fd) return getpagesize(); } +static size_t size_with_guard_pages(size_t size, size_t align) +{ + return size + getpagesize(); +} + void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) { /* * Note: this always allocates at least one extra page of virtual address * space, even if size is already aligned. */ - size_t total = size + align; + size_t final_size = size_with_guard_pages(size, align); + size_t total = final_size + (align - getpagesize()); #if defined(__powerpc64__) && defined(__linux__) /* On ppc64 mappings in the same segment (aka slice) must share the same * page size. Since we will be re-allocating part of this segment @@ -91,11 +97,14 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) } /* - * Leave a single PROT_NONE page allocated after the RAM block, to serve as - * a guard page guarding against potential buffer overflows. + * "final_size" includes a customized number of PROT_NONE pages allocated + * after the RAM block, to serve as guard pages guarding against potential + * buffer overflows. Here we chip off any leftover PROT_NONE pages from the + * end: those that we had to allocate initially for guaranteeing the + * alignment, but no longer need now. */ - if (total > size + getpagesize()) { - munmap(ptr + size + getpagesize(), total - size - getpagesize()); + if (total > final_size) { + munmap(ptr + final_size, total - final_size); } return ptr; @@ -105,6 +114,6 @@ void qemu_ram_munmap(void *ptr, size_t size, size_t align) { if (ptr) { /* Unmap both the RAM block and the guard page */ - munmap(ptr, size + getpagesize()); + munmap(ptr, size_with_guard_pages(size, align)); } } -- 1.8.3.1